95 NET_XMIT_POLICED - dropped by police. |
95 NET_XMIT_POLICED - dropped by police. |
96 Expected action: backoff or error to real-time apps. |
96 Expected action: backoff or error to real-time apps. |
97 |
97 |
98 Auxiliary routines: |
98 Auxiliary routines: |
99 |
99 |
100 ---requeue |
100 ---peek |
101 |
101 |
102 requeues once dequeued packet. It is used for non-standard or |
102 like dequeue but without removing a packet from the queue |
103 just buggy devices, which can defer output even if netif_queue_stopped()=0. |
|
104 |
103 |
105 ---reset |
104 ---reset |
106 |
105 |
107 returns qdisc to initial state: purge all buffers, clear all |
106 returns qdisc to initial state: purge all buffers, clear all |
108 timers, counters (except for statistics) etc. |
107 timers, counters (except for statistics) etc. |
145 if (!strcmp(qops->id, q->id)) |
144 if (!strcmp(qops->id, q->id)) |
146 goto out; |
145 goto out; |
147 |
146 |
148 if (qops->enqueue == NULL) |
147 if (qops->enqueue == NULL) |
149 qops->enqueue = noop_qdisc_ops.enqueue; |
148 qops->enqueue = noop_qdisc_ops.enqueue; |
150 if (qops->requeue == NULL) |
149 if (qops->peek == NULL) { |
151 qops->requeue = noop_qdisc_ops.requeue; |
150 if (qops->dequeue == NULL) { |
|
151 qops->peek = noop_qdisc_ops.peek; |
|
152 } else { |
|
153 rc = -EINVAL; |
|
154 goto out; |
|
155 } |
|
156 } |
152 if (qops->dequeue == NULL) |
157 if (qops->dequeue == NULL) |
153 qops->dequeue = noop_qdisc_ops.dequeue; |
158 qops->dequeue = noop_qdisc_ops.dequeue; |
154 |
159 |
155 qops->next = NULL; |
160 qops->next = NULL; |
156 *qp = qops; |
161 *qp = qops; |
182 |
187 |
183 /* We know handle. Find qdisc among all qdisc's attached to device |
188 /* We know handle. Find qdisc among all qdisc's attached to device |
184 (root qdisc, all its children, children of children etc.) |
189 (root qdisc, all its children, children of children etc.) |
185 */ |
190 */ |
186 |
191 |
187 struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) |
192 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) |
188 { |
193 { |
189 struct Qdisc *q; |
194 struct Qdisc *q; |
190 |
195 |
191 if (!(root->flags & TCQ_F_BUILTIN) && |
196 if (!(root->flags & TCQ_F_BUILTIN) && |
192 root->handle == handle) |
197 root->handle == handle) |
197 return q; |
202 return q; |
198 } |
203 } |
199 return NULL; |
204 return NULL; |
200 } |
205 } |
201 |
206 |
202 /* |
|
203 * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() |
|
204 * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() |
|
205 */ |
|
206 static DEFINE_SPINLOCK(qdisc_list_lock); |
|
207 |
|
208 static void qdisc_list_add(struct Qdisc *q) |
207 static void qdisc_list_add(struct Qdisc *q) |
209 { |
208 { |
210 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
209 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) |
211 spin_lock_bh(&qdisc_list_lock); |
|
212 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); |
210 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); |
213 spin_unlock_bh(&qdisc_list_lock); |
|
214 } |
|
215 } |
211 } |
216 |
212 |
217 void qdisc_list_del(struct Qdisc *q) |
213 void qdisc_list_del(struct Qdisc *q) |
218 { |
214 { |
219 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
215 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) |
220 spin_lock_bh(&qdisc_list_lock); |
|
221 list_del(&q->list); |
216 list_del(&q->list); |
222 spin_unlock_bh(&qdisc_list_lock); |
|
223 } |
|
224 } |
217 } |
225 EXPORT_SYMBOL(qdisc_list_del); |
218 EXPORT_SYMBOL(qdisc_list_del); |
226 |
219 |
227 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
220 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
228 { |
221 { |
229 unsigned int i; |
222 unsigned int i; |
230 struct Qdisc *q; |
223 struct Qdisc *q; |
231 |
|
232 spin_lock_bh(&qdisc_list_lock); |
|
233 |
224 |
234 for (i = 0; i < dev->num_tx_queues; i++) { |
225 for (i = 0; i < dev->num_tx_queues; i++) { |
235 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
226 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
236 struct Qdisc *txq_root = txq->qdisc_sleeping; |
227 struct Qdisc *txq_root = txq->qdisc_sleeping; |
237 |
228 |
238 q = qdisc_match_from_root(txq_root, handle); |
229 q = qdisc_match_from_root(txq_root, handle); |
239 if (q) |
230 if (q) |
240 goto unlock; |
231 goto out; |
241 } |
232 } |
242 |
233 |
243 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); |
234 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); |
244 |
235 out: |
245 unlock: |
|
246 spin_unlock_bh(&qdisc_list_lock); |
|
247 |
|
248 return q; |
236 return q; |
249 } |
237 } |
250 |
238 |
251 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
239 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
252 { |
240 { |
460 { |
448 { |
461 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
449 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
462 timer); |
450 timer); |
463 |
451 |
464 wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
452 wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
465 smp_wmb(); |
|
466 __netif_schedule(qdisc_root(wd->qdisc)); |
453 __netif_schedule(qdisc_root(wd->qdisc)); |
467 |
454 |
468 return HRTIMER_NORESTART; |
455 return HRTIMER_NORESTART; |
469 } |
456 } |
470 |
457 |
890 |
877 |
891 qdisc_put_stab(sch->stab); |
878 qdisc_put_stab(sch->stab); |
892 sch->stab = stab; |
879 sch->stab = stab; |
893 |
880 |
894 if (tca[TCA_RATE]) |
881 if (tca[TCA_RATE]) |
|
882 /* NB: ignores errors from replace_estimator |
|
883 because change can't be undone. */ |
895 gen_replace_estimator(&sch->bstats, &sch->rate_est, |
884 gen_replace_estimator(&sch->bstats, &sch->rate_est, |
896 qdisc_root_sleeping_lock(sch), |
885 qdisc_root_sleeping_lock(sch), |
897 tca[TCA_RATE]); |
886 tca[TCA_RATE]); |
|
887 |
898 return 0; |
888 return 0; |
899 } |
889 } |
900 |
890 |
901 struct check_loop_arg |
891 struct check_loop_arg |
902 { |
892 { |