patch-2.3.6 linux/net/sched/sch_cbq.c
Next file: linux/net/sched/sch_csz.c
Previous file: linux/net/sched/sch_api.c
Back to the patch index
Back to the overall index
- Lines: 156
- Date:
Wed Jun 9 14:45:37 1999
- Orig file:
v2.3.5/linux/net/sched/sch_cbq.c
- Orig date:
Sat Apr 24 17:51:48 1999
diff -u --recursive --new-file v2.3.5/linux/net/sched/sch_cbq.c linux/net/sched/sch_cbq.c
@@ -1417,6 +1417,7 @@
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
+ q->link.stats.lock = &sch->dev->queue_lock;
init_timer(&q->wd_timer);
q->wd_timer.data = (unsigned long)sch;
@@ -1558,6 +1559,16 @@
return 0;
}
+int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
+{
+ RTA_PUT(skb, TCA_STATS, sizeof(*st), st);
+ return 0;
+
+rtattr_failure:
+ return -1;
+}
+
+
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
@@ -1569,8 +1580,13 @@
if (cbq_dump_attr(skb, &q->link) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
+ spin_lock_bh(&sch->dev->queue_lock);
q->link.xstats.avgidle = q->link.avgidle;
- RTA_PUT(skb, TCA_XSTATS, sizeof(q->link.xstats), &q->link.xstats);
+ if (cbq_copy_xstats(skb, &q->link.xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
@@ -1600,12 +1616,19 @@
goto rtattr_failure;
rta->rta_len = skb->tail - b;
cl->stats.qlen = cl->q->q.qlen;
- RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
+ if (qdisc_copy_stats(skb, &cl->stats))
+ goto rtattr_failure;
+ spin_lock_bh(&sch->dev->queue_lock);
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
if (!PSCHED_IS_PASTPERFECT(cl->undertime))
cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
- RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
+ q->link.xstats.avgidle = q->link.avgidle;
+ if (cbq_copy_xstats(skb, &cl->xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
@@ -1631,8 +1654,11 @@
new->reshape_fail = cbq_reshape_fail;
#endif
}
- if ((*old = xchg(&cl->q, new)) != NULL)
- qdisc_reset(*old);
+ sch_tree_lock(sch);
+ *old = cl->q;
+ cl->q = new;
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
@@ -1710,16 +1736,16 @@
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
- start_bh_atomic();
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
+ spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
+ spin_unlock_bh(&sch->dev->queue_lock);
#endif
+
cbq_destroy_class(cl);
}
- end_bh_atomic();
- return;
}
static int
@@ -1780,7 +1806,7 @@
}
/* Change class parameters */
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive != NULL)
cbq_deactivate_class(cl);
@@ -1812,7 +1838,7 @@
if (cl->q->q.qlen)
cbq_activate_class(cl);
- end_bh_atomic();
+ sch_tree_lock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
@@ -1878,8 +1904,9 @@
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
+ cl->stats.lock = &sch->dev->queue_lock;
- start_bh_atomic();
+ sch_tree_lock(sch);
cbq_link_class(cl);
cl->borrow = cl->tparent;
if (cl->tparent != &q->link)
@@ -1903,7 +1930,7 @@
#endif
if (tb[TCA_CBQ_FOPT-1])
cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
- end_bh_atomic();
+ sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
@@ -1926,7 +1953,7 @@
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive)
cbq_deactivate_class(cl);
@@ -1948,11 +1975,10 @@
cbq_sync_defmap(cl);
cbq_rmprio(q, cl);
+ sch_tree_unlock(sch);
if (--cl->refcnt == 0)
cbq_destroy_class(cl);
-
- end_bh_atomic();
return 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)