unsigned int refcnt; /* usage count */
struct tc_stats stats; /* generic statistics */
+ spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
qdisc_kill_estimator(&cl->stats);
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
}
#endif
return 0;
cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
- cl->stats.lock = &sch->dev->queue_lock;
+ cl->stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&cl->children);
INIT_LIST_HEAD(&cl->actlist);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
- qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
+ qdisc_new_estimator(&cl->stats, cl->stats_lock,
+ tca[TCA_RATE-1]);
#endif
*arg = (unsigned long)cl;
return 0;
}
static struct hfsc_class *
-hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
+hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ int terminal = 0;
+ switch (result) {
+ case TC_ACT_SHOT:
+ *qres = NET_XMIT_DROP;
+ terminal = 1;
+ break;
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ terminal = 1;
+ break;
+ case TC_ACT_RECLASSIFY:
+ case TC_ACT_OK:
+ case TC_ACT_UNSPEC:
+ default:
+ break;
+ }
+
+ if (terminal) {
+ kfree_skb(skb);
+ return NULL;
+ }
+#else
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT)
return NULL;
+#endif
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
{
cl->stats.qlen = cl->qdisc->q.qlen;
- if (qdisc_copy_stats(skb, &cl->stats) < 0)
+ if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock) < 0)
goto rtattr_failure;
return skb->len;
qopt = RTA_DATA(opt);
memset(q, 0, sizeof(struct hfsc_sched));
- sch->stats.lock = &sch->dev->queue_lock;
+ sch->stats_lock = &sch->dev->queue_lock;
q->defcls = qopt->defcls;
for (i = 0; i < HFSC_HSIZE; i++)
q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
- q->root.stats.lock = &sch->dev->queue_lock;
+ q->root.stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&q->root.children);
INIT_LIST_HEAD(&q->root.actlist);
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
sch->stats.qlen = sch->q.qlen;
- if (qdisc_copy_stats(skb, &sch->stats) < 0)
+ if (qdisc_copy_stats(skb, &sch->stats, sch->stats_lock) < 0)
goto rtattr_failure;
return skb->len;
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_class *cl = hfsc_classify(skb, sch);
+ int ret = NET_XMIT_SUCCESS;
+ struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
unsigned int len = skb->len;
int err;
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (cl == NULL) {
+ if (NET_XMIT_DROP == ret) {
+ sch->stats.drops++;
+ }
+ return ret;
+ }
+#else
if (cl == NULL) {
kfree_skb(skb);
sch->stats.drops++;
return NET_XMIT_DROP;
}
+#endif
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {