Qevents are attach points for TC blocks, where filters can be put that are
executed when "interesting events" take place in a qdisc. The data to keep
and the functions to invoke to maintain a qevent will be largely the same
between qevents. Therefore introduce sched-wide helpers for qevent
management.

Currently, similarly to ingress and egress blocks of clsact pseudo-qdisc,
blocks attachment cannot be changed after the qdisc is created. To that
end, add a helper tcf_qevent_validate_change(), which verifies whether
block index attribute is not attached, or if it is, whether its value
matches the current one (i.e. there is no material change).

The function tcf_qevent_handle() is supposed to be invoked when qdisc hits
the "interesting event" corresponding to this block.

Signed-off-by: Petr Machata <pe...@mellanox.com>
---
 include/net/pkt_cls.h |  48 +++++++++++++++++++
 net/sched/cls_api.c   | 107 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 155 insertions(+)

diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ed65619cbc47..efb20e3c2c98 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -32,6 +32,13 @@ struct tcf_block_ext_info {
        u32 block_index;
 };
 
+struct tcf_qevent {
+       int                     attr_name;
+       struct tcf_block        *block;
+       struct tcf_block_ext_info info;
+       struct tcf_proto __rcu *filter_chain;
+};
+
 struct tcf_block_cb;
 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 
@@ -552,6 +559,47 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct 
tcf_proto *tp,
                          void *cb_priv, u32 *flags, unsigned int *in_hw_count);
 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
 
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe,
+                              struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 struct sk_buff *skb, struct sk_buff **to_free,
+                                 int *ret);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 enum flow_block_binder_type binder_type,
+                                 struct nlattr *block_index_attr,
+                                 struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe,
+                                            struct nlattr *block_index_attr,
+                                            struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch,
+                 struct sk_buff *skb, struct sk_buff **to_free,
+                 int *ret)
+{
+       return skb;
+}
+#endif
+
 struct tc_cls_u32_knode {
        struct tcf_exts *exts;
        struct tcf_result *res;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 752d608f4442..f95a5eee9279 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3801,6 +3801,113 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
 }
 EXPORT_SYMBOL(tcf_exts_num_actions);
 
+static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
+                                       u32 *p_block_index,
+                                       struct netlink_ext_ack *extack)
+{
+       *p_block_index = nla_get_u32(block_index_attr);
+       if (!*p_block_index) {
+               NL_SET_ERR_MSG(extack, "Block number may not be zero");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index,
+                                          extack);
+       if (err)
+               return err;
+
+       if (!block_index)
+               return 0;
+
+       qe->info.binder_type = binder_type;
+       qe->info.chain_head_change = tcf_chain_head_change_dflt;
+       qe->info.chain_head_change_priv = &qe->filter_chain;
+       qe->info.block_index = block_index;
+
+       return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
+}
+EXPORT_SYMBOL(tcf_qevent_init);
+
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+       if (qe->info.block_index)
+               tcf_block_put_ext(qe->block, sch, &qe->info);
+}
+EXPORT_SYMBOL(tcf_qevent_destroy);
+
+int tcf_qevent_validate_change(struct tcf_qevent *qe,
+                              struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index,
+                                          extack);
+       if (err)
+               return err;
+
+       /* Bounce newly-configured block or change in block. */
+       if (block_index != qe->info.block_index) {
+               NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tcf_qevent_validate_change);
+
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 struct sk_buff *skb, struct sk_buff **to_free,
+                                 int *ret)
+{
+       struct tcf_result cl_res;
+       struct tcf_proto *fl;
+
+       if (!qe->info.block_index)
+               return skb;
+
+       fl = rcu_dereference_bh(qe->filter_chain);
+
+       switch (tcf_classify(skb, fl, &cl_res, false)) {
+       case TC_ACT_SHOT:
+               qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_BYPASS;
+               return NULL;
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+       case TC_ACT_TRAP:
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       case TC_ACT_REDIRECT:
+               skb_do_redirect(skb);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       }
+
+       return skb;
+}
+EXPORT_SYMBOL(tcf_qevent_handle);
+
 static __net_init int tcf_net_init(struct net *net)
 {
        struct tcf_net *tn = net_generic(net, tcf_net_id);
-- 
2.20.1

Reply via email to