Hi Sai, After looking at your follow up patches, I feel it's better to rename OVS_MCAST_EVENT_ENTRIES as OVS_MCAST_EVENT_TYPE, and rename OVS_MCAST_EVENT_ENTRIES_MAX as OVS_MCAST_EVENT_TYPES_MAX. At first glance, event_entries_max seems to suggest the maximum number of events in a queue, which is not the case.
Best regards, Yin Lin On Wed, Jul 13, 2016 at 4:38 PM, Sairam Venugopal <vsai...@vmware.com> wrote: > Define new Conntrack events (new and delete) and add support for > subscribing to these events. Parse out OVS_NL_ATTR_MCAST_GRP and store it > as part of OVS_EVENT_SUBSCRIBE structure. > > Signed-off-by: Sairam Venugopal <vsai...@vmware.com> > --- > datapath-windows/ovsext/Datapath.c | 21 ++++++++++++++++++--- > datapath-windows/ovsext/Datapath.h | 3 ++- > datapath-windows/ovsext/DpInternal.h | 15 +++++++++++++++ > 3 files changed, 35 insertions(+), 4 deletions(-) > > diff --git a/datapath-windows/ovsext/Datapath.c > b/datapath-windows/ovsext/Datapath.c > index e4d6ab1..a5a0b35 100644 > --- a/datapath-windows/ovsext/Datapath.c > +++ b/datapath-windows/ovsext/Datapath.c > @@ -1268,11 +1268,12 @@ > OvsSubscribeEventCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx, > OVS_EVENT_SUBSCRIBE request; > BOOLEAN rc; > UINT8 join; > + UINT32 mcastGrp; > PNL_ATTR attrs[2]; > const NL_POLICY policy[] = { > [OVS_NL_ATTR_MCAST_GRP] = {.type = NL_A_U32 }, > [OVS_NL_ATTR_MCAST_JOIN] = {.type = NL_A_U8 }, > - }; > + }; > > UNREFERENCED_PARAMETER(replyLen); > > @@ -1288,11 +1289,25 @@ > OvsSubscribeEventCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx, > goto done; > } > > - /* XXX Ignore the MC group for now */ > + mcastGrp = NlAttrGetU32(attrs[OVS_NL_ATTR_MCAST_GRP]); > join = NlAttrGetU8(attrs[OVS_NL_ATTR_MCAST_JOIN]); > request.dpNo = msgIn->ovsHdr.dp_ifindex; > request.subscribe = join; > - request.mask = OVS_EVENT_MASK_ALL; > + request.mcastGrp = mcastGrp; > + request.protocol = instance->protocol; > + request.mask = 0; > + > + /* We currently support Vport and CT related events */ > + if (instance->protocol == NETLINK_GENERIC) { > + request.mask = OVS_EVENT_MASK_ALL; > + } else if (instance->protocol == NETLINK_NETFILTER) { > + if (mcastGrp == NFNLGRP_CONNTRACK_NEW) { > + request.mask = OVS_EVENT_CT_NEW; > + } > + if (mcastGrp == NFNLGRP_CONNTRACK_DESTROY) { > + request.mask = OVS_EVENT_CT_DELETE; > + } > + } > > status = OvsSubscribeEventIoctl(instance->fileObject, &request, > sizeof request); > diff --git a/datapath-windows/ovsext/Datapath.h > b/datapath-windows/ovsext/Datapath.h > index 2b41d82..57b483a 100644 > --- a/datapath-windows/ovsext/Datapath.h > +++ b/datapath-windows/ovsext/Datapath.h > @@ -51,7 +51,8 @@ typedef struct _OVS_OPEN_INSTANCE { > PVOID eventQueue; > POVS_USER_PACKET_QUEUE packetQueue; > UINT32 pid; > - UINT32 protocol; /* Refers to NETLINK Family (eg. NETLINK_GENERIC)*/ > + UINT32 protocol; /* Refers to NETLINK Family (eg. > NETLINK_GENERIC)*/ > + UINT32 mcastMask; /* Mask of subscribed Mcast Groups */ > > struct { > POVS_MESSAGE ovsMsg; /* OVS message passed during dump start. > */ > diff --git a/datapath-windows/ovsext/DpInternal.h > b/datapath-windows/ovsext/DpInternal.h > index 8abe61d..9641bf6 100644 > --- a/datapath-windows/ovsext/DpInternal.h > +++ b/datapath-windows/ovsext/DpInternal.h > @@ -310,6 +310,8 @@ typedef struct _OVS_EVENT_SUBSCRIBE { > uint32_t dpNo; > uint32_t subscribe; > uint32_t mask; > + uint32_t mcastGrp; > + uint32_t protocol; > } OVS_EVENT_SUBSCRIBE, *POVS_EVENT_SUBSCRIBE; > > typedef struct _OVS_EVENT_POLL { > @@ -327,6 +329,19 @@ enum { > OVS_EVENT_MASK_ALL = 0x3f, > }; > > +enum { > + OVS_EVENT_CT_NEW = ((uint32_t)0x1 << 0), > + OVS_EVENT_CT_DELETE = ((uint32_t)0x1 << 1), > + OVS_EVENT_CT_MASK_ALL = 0x3 > +}; > + > +/* Supported mcast event groups */ > +enum OVS_MCAST_EVENT_ENTRIES { > + OVS_MCAST_VPORT_EVENT, > + OVS_MCAST_CT_EVENT, > + __OVS_MCAST_EVENT_ENTRIES_MAX > +}; > +#define OVS_MCAST_EVENT_ENTRIES_MAX (__OVS_MCAST_EVENT_ENTRIES_MAX - > OVS_MCAST_VPORT_EVENT) > > typedef struct _OVS_VPORT_EVENT_ENTRY { > UINT32 portNo; > -- > 2.9.0.windows.1 > > _______________________________________________ > dev mailing list > dev@openvswitch.org > http://openvswitch.org/mailman/listinfo/dev > _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev