2 * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c 203340 2010-02-01 12:06:37Z luigi $");
32 * Configuration and internal object management for dummynet.
35 #include "opt_inet6.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
41 #include <sys/kernel.h>
43 #include <sys/module.h>
46 #include <sys/rwlock.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
50 #include <sys/taskqueue.h>
51 #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
52 #include <netinet/in.h>
53 #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
54 #include <netinet/ip_fw.h>
55 #include <netinet/ip_dummynet.h>
57 #include <netinet/ipfw/ip_fw_private.h>
58 #include <netinet/ipfw/dn_heap.h>
59 #include <netinet/ipfw/ip_dn_private.h>
60 #include <netinet/ipfw/dn_sched.h>
62 /* which objects to copy */
63 #define DN_C_LINK 0x01
65 #define DN_C_FLOW 0x04
67 #define DN_C_QUEUE 0x10
69 /* we use this argument in case of a schk_new */
75 /*---- callout hooks. ----*/
76 static struct callout dn_timeout;
77 static struct task dn_task;
78 static struct taskqueue *dn_tq = NULL;
80 /* dummynet and ipfw_tick can't be static in windows */
85 (void)arg; /* UNUSED */
86 taskqueue_enqueue(dn_tq, &dn_task);
92 callout_reset_on(&dn_timeout, 1, dummynet, NULL, 0);
94 /*----- end of callout hooks -----*/
96 /* Return a scheduler descriptor given the type or name. */
97 static struct dn_alg *
98 find_sched_type(int type, char *name)
102 SLIST_FOREACH(d, &dn_cfg.schedlist, next) {
103 if (d->type == type || (name && !strcasecmp(d->name, name)))
106 return NULL; /* not found */
110 ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg)
113 const char *op = NULL;
121 } else if (oldv > hi) {
127 printf("%s %s to %d (was %d)\n", op, msg, *v, oldv);
131 /*---- flow_id mask, hash and compare functions ---*/
133 * The flow_id includes the 5-tuple, the queue/pipe number
134 * which we store in the extra area in host order,
135 * and for ipv6 also the flow_id6.
136 * XXX see if we want the tos byte (can store in 'flags')
138 static struct ipfw_flow_id *
139 flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id)
141 int is_v6 = IS_IP6_FLOW_ID(id);
143 id->dst_port &= mask->dst_port;
144 id->src_port &= mask->src_port;
145 id->proto &= mask->proto;
146 id->extra &= mask->extra;
148 APPLY_MASK(&id->dst_ip6, &mask->dst_ip6);
149 APPLY_MASK(&id->src_ip6, &mask->src_ip6);
150 id->flow_id6 &= mask->flow_id6;
152 id->dst_ip &= mask->dst_ip;
153 id->src_ip &= mask->src_ip;
158 /* computes an OR of two masks, result in dst and also returned */
159 static struct ipfw_flow_id *
160 flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst)
162 int is_v6 = IS_IP6_FLOW_ID(dst);
164 dst->dst_port |= src->dst_port;
165 dst->src_port |= src->src_port;
166 dst->proto |= src->proto;
167 dst->extra |= src->extra;
169 #define OR_MASK(_d, _s) \
170 (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \
171 (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \
172 (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \
173 (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3];
174 OR_MASK(&dst->dst_ip6, &src->dst_ip6);
175 OR_MASK(&dst->src_ip6, &src->src_ip6);
177 dst->flow_id6 |= src->flow_id6;
179 dst->dst_ip |= src->dst_ip;
180 dst->src_ip |= src->src_ip;
186 nonzero_mask(struct ipfw_flow_id *m)
188 if (m->dst_port || m->src_port || m->proto || m->extra)
190 if (IS_IP6_FLOW_ID(m)) {
192 m->dst_ip6.__u6_addr.__u6_addr32[0] ||
193 m->dst_ip6.__u6_addr.__u6_addr32[1] ||
194 m->dst_ip6.__u6_addr.__u6_addr32[2] ||
195 m->dst_ip6.__u6_addr.__u6_addr32[3] ||
196 m->src_ip6.__u6_addr.__u6_addr32[0] ||
197 m->src_ip6.__u6_addr.__u6_addr32[1] ||
198 m->src_ip6.__u6_addr.__u6_addr32[2] ||
199 m->src_ip6.__u6_addr.__u6_addr32[3] ||
202 return m->dst_ip || m->src_ip;
206 /* XXX we may want a better hash function */
208 flow_id_hash(struct ipfw_flow_id *id)
212 if (IS_IP6_FLOW_ID(id)) {
213 uint32_t *d = (uint32_t *)&id->dst_ip6;
214 uint32_t *s = (uint32_t *)&id->src_ip6;
215 i = (d[0] ) ^ (d[1]) ^
217 (d[0] >> 15) ^ (d[1] >> 15) ^
218 (d[2] >> 15) ^ (d[3] >> 15) ^
219 (s[0] << 1) ^ (s[1] << 1) ^
220 (s[2] << 1) ^ (s[3] << 1) ^
221 (s[0] << 16) ^ (s[1] << 16) ^
222 (s[2] << 16) ^ (s[3] << 16) ^
223 (id->dst_port << 1) ^ (id->src_port) ^
225 (id->proto ) ^ (id->flow_id6);
227 i = (id->dst_ip) ^ (id->dst_ip >> 15) ^
228 (id->src_ip << 1) ^ (id->src_ip >> 16) ^
230 (id->dst_port << 1) ^ (id->src_port) ^ (id->proto);
235 /* Like bcmp, returns 0 if ids match, 1 otherwise. */
237 flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
239 int is_v6 = IS_IP6_FLOW_ID(id1);
242 if (IS_IP6_FLOW_ID(id2))
243 return 1; /* different address families */
245 return (id1->dst_ip == id2->dst_ip &&
246 id1->src_ip == id2->src_ip &&
247 id1->dst_port == id2->dst_port &&
248 id1->src_port == id2->src_port &&
249 id1->proto == id2->proto &&
250 id1->extra == id2->extra) ? 0 : 1;
254 !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) &&
255 !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) &&
256 id1->dst_port == id2->dst_port &&
257 id1->src_port == id2->src_port &&
258 id1->proto == id2->proto &&
259 id1->extra == id2->extra &&
260 id1->flow_id6 == id2->flow_id6) ? 0 : 1;
262 /*--------- end of flow-id mask, hash and compare ---------*/
264 /*--- support functions for the qht hashtable ----
265 * Entries are hashed by flow-id
268 q_hash(uintptr_t key, int flags, void *arg)
270 /* compute the hash slot from the flow id */
271 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
272 &((struct dn_queue *)key)->ni.fid :
273 (struct ipfw_flow_id *)key;
275 return flow_id_hash(id);
279 q_match(void *obj, uintptr_t key, int flags, void *arg)
281 struct dn_queue *o = (struct dn_queue *)obj;
282 struct ipfw_flow_id *id2;
284 if (flags & DNHT_KEY_IS_OBJ) {
285 /* compare pointers */
286 id2 = &((struct dn_queue *)key)->ni.fid;
288 id2 = (struct ipfw_flow_id *)key;
290 return (0 == flow_id_cmp(&o->ni.fid, id2));
294 * create a new queue instance for the given 'key'.
297 q_new(uintptr_t key, int flags, void *arg)
299 struct dn_queue *q, *template = arg;
300 struct dn_fsk *fs = template->fs;
301 int size = sizeof(*q) + fs->sched->fp->q_datalen;
303 q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO);
305 D("no memory for new queue");
309 set_oid(&q->ni.oid, DN_QUEUE, size);
310 if (fs->fs.flags & DN_QHT_HASH)
311 q->ni.fid = *(struct ipfw_flow_id *)key;
313 q->_si = ipdn_si_find(q->fs->sched, &(template->ni.fid));
314 if (q->_si == NULL) {
315 D("no memory for new si");
316 free (q, M_DUMMYNET);
322 if (fs->sched->fp->new_queue)
323 fs->sched->fp->new_queue(q);
324 dn_cfg.queue_count++;
330 * Notify schedulers that a queue is going away.
331 * If (flags & DN_DESTROY), also free the packets.
332 * The version for callbacks is called q_delete_cb().
333 * Returns 1 if the queue is NOT deleted (usually when
334 * the drain routine try to delete a queue that a scheduler
335 * instance needs), 0 otherwise.
336 * NOTE: flag DN_DEL_SAFE means that the queue should be
337 * deleted only if the scheduler no longer needs it
340 dn_delete_queue(struct dn_queue *q, int flags)
342 struct dn_fsk *fs = q->fs;
344 // D("fs %p si %p\n", fs, q->_si);
345 /* notify the parent scheduler that the queue is going away */
346 if (fs && fs->sched->fp->free_queue)
347 if (fs->sched->fp->free_queue(q, flags & DN_DEL_SAFE) == 1)
348 return 1; /* queue NOT deleted */
351 if (flags & DN_DESTROY) {
353 dn_free_pkts(q->mq.head);
356 bzero(q, sizeof(*q)); // safety
358 dn_cfg.queue_count--;
364 q_delete_cb(void *q, void *arg)
366 int flags = (int)(uintptr_t)arg;
367 dn_delete_queue(q, flags);
368 return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0;
372 * calls dn_delete_queue/q_delete_cb on all queues,
373 * which notifies the parent scheduler and possibly drains packets.
374 * flags & DN_DESTROY: drains queues and destroy qht;
377 qht_delete(struct dn_fsk *fs, int flags)
379 ND("fs %d start flags %d qht %p",
380 fs->fs.fs_nr, flags, fs->qht);
383 if (fs->fs.flags & DN_QHT_HASH) {
384 dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags);
385 if (flags & DN_DESTROY) {
386 dn_ht_free(fs->qht, 0);
390 dn_delete_queue((struct dn_queue *)(fs->qht), flags);
391 if (flags & DN_DESTROY)
397 * Find and possibly create the queue for a MULTIQUEUE scheduler.
398 * We never call it for !MULTIQUEUE (the queue is in the sch_inst).
401 ipdn_q_find(struct dn_fsk *fs, struct ipfw_flow_id *id)
403 struct dn_queue template;
407 if (fs->fs.flags & DN_QHT_HASH) {
408 struct ipfw_flow_id masked_id;
409 if (fs->qht == NULL) {
410 fs->qht = dn_ht_init(NULL, fs->fs.buckets,
411 offsetof(struct dn_queue, q_next),
412 q_hash, q_match, q_new);
417 flow_id_mask(&fs->fsk_mask, &masked_id);
418 return dn_ht_find(fs->qht, (uintptr_t)&masked_id,
419 DNHT_INSERT, &template);
422 fs->qht = q_new(0, 0, &template);
423 return (struct dn_queue *)fs->qht;
426 /*--- end of queue hash table ---*/
428 /*--- support functions for the sch_inst hashtable ----
430 * These are hashed by flow-id
433 si_hash(uintptr_t key, int flags, void *arg)
435 /* compute the hash slot from the flow id */
436 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
437 &((struct dn_sch_inst *)key)->ni.fid :
438 (struct ipfw_flow_id *)key;
440 return flow_id_hash(id);
444 si_match(void *obj, uintptr_t key, int flags, void *arg)
446 struct dn_sch_inst *o = obj;
447 struct ipfw_flow_id *id2;
449 id2 = (flags & DNHT_KEY_IS_OBJ) ?
450 &((struct dn_sch_inst *)key)->ni.fid :
451 (struct ipfw_flow_id *)key;
452 return flow_id_cmp(&o->ni.fid, id2) == 0;
455 static int si_reset_credit(void *_si, void *arg); // XXX si_new use this
458 * create a new instance for the given 'key'
459 * Allocate memory for instance, delay line and scheduler private data.
462 si_new(uintptr_t key, int flags, void *arg)
464 struct dn_schk *s = arg;
465 struct dn_sch_inst *si;
466 int l = sizeof(*si) + s->fp->si_datalen;
468 si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
472 /* Set length only for the part passed up to userland. */
473 set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow));
474 set_oid(&(si->dline.oid), DN_DELAY_LINE,
475 sizeof(struct delay_line));
476 /* mark si and dline as outside the event queue */
477 si->ni.oid.id = si->dline.oid.id = -1;
482 if (s->fp->new_sched && s->fp->new_sched(si)) {
483 D("new_sched error");
486 if (s->sch.flags & DN_HAVE_MASK)
487 si->ni.fid = *(struct ipfw_flow_id *)key;
489 si_reset_credit(si, NULL);
496 bzero(si, sizeof(*si)); // safety
497 free(si, M_DUMMYNET);
503 * Callback from siht to delete all scheduler instances. Remove
504 * si and delay line from the system heap, destroy all queues.
505 * We assume that all flowset have been notified and do not
506 * point to us anymore.
509 si_destroy(void *_si, void *arg)
511 struct dn_sch_inst *si = _si;
512 struct dn_schk *s = si->sched;
513 struct delay_line *dl = &si->dline;
515 if (dl->oid.subtype) /* remove delay line from event heap */
516 heap_extract(&dn_cfg.evheap, dl);
517 if (si->ni.length == 0)
519 dn_free_pkts(dl->mq.head); /* drain delay line */
520 if (si->kflags & DN_ACTIVE) /* remove si from event heap */
521 heap_extract(&dn_cfg.evheap, si);
522 if (s->fp->free_sched)
523 s->fp->free_sched(si);
524 bzero(si, sizeof(*si)); /* safety */
525 free(si, M_DUMMYNET);
527 return DNHT_SCAN_DEL;
531 * Find the scheduler instance for this packet. If we need to apply
532 * a mask, do on a local copy of the flow_id to preserve the original.
533 * Assume siht is always initialized if we have a mask.
536 ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id)
539 if (s->sch.flags & DN_HAVE_MASK) {
540 struct ipfw_flow_id id_t = *id;
541 flow_id_mask(&s->sch.sched_mask, &id_t);
542 return dn_ht_find(s->siht, (uintptr_t)&id_t,
546 s->siht = si_new(0, 0, s);
547 return (struct dn_sch_inst *)s->siht;
550 /* callback to flush credit for the scheduler instance */
552 si_reset_credit(void *_si, void *arg)
554 struct dn_sch_inst *si = _si;
555 struct dn_link *p = &si->sched->link;
557 si->idle_time = dn_cfg.curr_time;
558 si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0);
563 schk_reset_credit(struct dn_schk *s)
565 if (s->sch.flags & DN_HAVE_MASK)
566 dn_ht_scan(s->siht, si_reset_credit, NULL);
568 si_reset_credit(s->siht, NULL);
570 /*---- end of sch_inst hashtable ---------------------*/
572 /*-------------------------------------------------------
573 * flowset hash (fshash) support. Entries are hashed by fs_nr.
574 * New allocations are put in the fsunlinked list, from which
575 * they are removed when they point to a specific scheduler.
578 fsk_hash(uintptr_t key, int flags, void *arg)
580 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
581 ((struct dn_fsk *)key)->fs.fs_nr;
583 return ( (i>>8)^(i>>4)^i );
587 fsk_match(void *obj, uintptr_t key, int flags, void *arg)
589 struct dn_fsk *fs = obj;
590 int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
591 ((struct dn_fsk *)key)->fs.fs_nr;
593 return (fs->fs.fs_nr == i);
597 fsk_new(uintptr_t key, int flags, void *arg)
601 fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO);
603 set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs));
605 fs->drain_bucket = 0;
606 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
612 * detach flowset from its current scheduler. Flags as follows:
613 * DN_DETACH removes from the fsk_list
614 * DN_DESTROY deletes individual queues
615 * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked).
618 fsk_detach(struct dn_fsk *fs, int flags)
620 if (flags & DN_DELETE_FS)
622 ND("fs %d from sched %d flags %s %s %s",
623 fs->fs.fs_nr, fs->fs.sched_nr,
624 (flags & DN_DELETE_FS) ? "DEL_FS":"",
625 (flags & DN_DESTROY) ? "DEL":"",
626 (flags & DN_DETACH) ? "DET":"");
627 if (flags & DN_DETACH) { /* detach from the list */
628 struct dn_fsk_head *h;
629 h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu;
630 SLIST_REMOVE(h, fs, dn_fsk, sch_chain);
632 /* Free the RED parameters, they will be recomputed on
633 * subsequent attach if needed.
636 free(fs->w_q_lookup, M_DUMMYNET);
637 fs->w_q_lookup = NULL;
638 qht_delete(fs, flags);
639 if (fs->sched && fs->sched->fp->free_fsk)
640 fs->sched->fp->free_fsk(fs);
642 if (flags & DN_DELETE_FS) {
643 bzero(fs, sizeof(*fs)); /* safety */
644 free(fs, M_DUMMYNET);
647 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
652 * Detach or destroy all flowsets in a list.
653 * flags specifies what to do:
654 * DN_DESTROY: flush all queues
655 * DN_DELETE_FS: DN_DESTROY + destroy flowset
656 * DN_DELETE_FS implies DN_DESTROY
659 fsk_detach_list(struct dn_fsk_head *h, int flags)
662 int n = 0; /* only for stats */
664 ND("head %p flags %x", h, flags);
665 while ((fs = SLIST_FIRST(h))) {
666 SLIST_REMOVE_HEAD(h, sch_chain);
668 fsk_detach(fs, flags);
670 ND("done %d flowsets", n);
674 * called on 'queue X delete' -- removes the flowset from fshash,
675 * deletes all queues for the flowset, and removes the flowset.
678 delete_fs(int i, int locked)
685 fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL);
686 if (dn_ht_entries(dn_cfg.fshash) == 0) {
687 dn_ht_free(dn_cfg.fshash, 0);
688 dn_cfg.fshash = NULL;
690 ND("fs %d found %p", i, fs);
692 fsk_detach(fs, DN_DETACH | DN_DELETE_FS);
701 /*----- end of flowset hashtable support -------------*/
703 /*------------------------------------------------------------
704 * Scheduler hash. When searching by index we pass sched_nr,
705 * otherwise we pass struct dn_sch * which is the first field in
706 * struct dn_schk so we can cast between the two. We use this trick
707 * because in the create phase (but it should be fixed).
710 schk_hash(uintptr_t key, int flags, void *_arg)
712 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
713 ((struct dn_schk *)key)->sch.sched_nr;
714 return ( (i>>8)^(i>>4)^i );
718 schk_match(void *obj, uintptr_t key, int flags, void *_arg)
720 struct dn_schk *s = (struct dn_schk *)obj;
721 int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
722 ((struct dn_schk *)key)->sch.sched_nr;
723 return (s->sch.sched_nr == i);
727 * Create the entry and intialize with the sched hash if needed.
728 * Leave s->fp unset so we can tell whether a dn_ht_find() returns
729 * a new object or a previously existing one.
732 schk_new(uintptr_t key, int flags, void *arg)
734 struct schk_new_arg *a = arg;
736 int l = sizeof(*s) +a->fp->schk_datalen;
738 s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
741 set_oid(&s->link.oid, DN_LINK, sizeof(s->link));
742 s->sch = *a->sch; // copy initial values
743 s->link.link_nr = s->sch.sched_nr;
744 SLIST_INIT(&s->fsk_list);
745 /* initialize the hash table or create the single instance */
746 s->fp = a->fp; /* si_new needs this */
748 if (s->sch.flags & DN_HAVE_MASK) {
749 s->siht = dn_ht_init(NULL, s->sch.buckets,
750 offsetof(struct dn_sch_inst, si_next),
751 si_hash, si_match, si_new);
752 if (s->siht == NULL) {
757 s->fp = NULL; /* mark as a new scheduler */
763 * Callback for sched delete. Notify all attached flowsets to
764 * detach from the scheduler, destroy the internal flowset, and
765 * all instances. The scheduler goes away too.
766 * arg is 0 (only detach flowsets and destroy instances)
767 * DN_DESTROY (detach & delete queues, delete schk)
768 * or DN_DELETE_FS (delete queues and flowsets, delete schk)
771 schk_delete_cb(void *obj, void *arg)
773 struct dn_schk *s = obj;
776 ND("sched %d arg %s%s",
778 a&DN_DESTROY ? "DEL ":"",
779 a&DN_DELETE_FS ? "DEL_FS":"");
781 fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0);
782 /* no more flowset pointing to us now */
783 if (s->sch.flags & DN_HAVE_MASK) {
784 dn_ht_scan(s->siht, si_destroy, NULL);
785 dn_ht_free(s->siht, 0);
788 si_destroy(s->siht, NULL);
790 free(s->profile, M_DUMMYNET);
796 bzero(s, sizeof(*s)); // safety
797 free(obj, M_DUMMYNET);
799 return DNHT_SCAN_DEL;
803 * called on a 'sched X delete' command. Deletes a single scheduler.
804 * This is done by removing from the schedhash, unlinking all
805 * flowsets and deleting their traffic.
812 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
813 if (dn_ht_entries(dn_cfg.schedhash) == 0) {
814 dn_ht_free(dn_cfg.schedhash, 0);
815 dn_cfg.schedhash = NULL;
820 delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */
821 /* then detach flowsets, delete traffic */
822 schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY);
825 /*--- end of schk hashtable support ---*/
828 copy_obj(char **start, char *end, void *_o, const char *msg, int i)
830 struct dn_id *o = _o;
831 int have = end - *start;
833 if (have < o->len || o->len == 0 || o->type == 0) {
834 D("(WARN) type %d %s %d have %d need %d",
835 o->type, msg, i, have, o->len);
838 ND("type %d %s %d len %d", o->type, msg, i, o->len);
839 bcopy(_o, *start, o->len);
840 if (o->type == DN_LINK) {
841 /* Adjust burst parameter for link */
842 struct dn_link *l = (struct dn_link *)*start;
843 l->burst = div64(l->burst, 8 * hz);
844 } else if (o->type == DN_SCH) {
845 /* Set id->id to the number of instances */
846 struct dn_schk *s = _o;
847 struct dn_id *id = (struct dn_id *)(*start);
848 id->id = (s->sch.flags & DN_HAVE_MASK) ?
849 dn_ht_entries(s->siht) : (s->siht ? 1 : 0);
855 /* Specific function to copy a queue.
856 * Copies only the user-visible part of a queue (which is in
857 * a struct dn_flow), and sets len accordingly.
860 copy_obj_q(char **start, char *end, void *_o, const char *msg, int i)
862 struct dn_id *o = _o;
863 int have = end - *start;
864 int len = sizeof(struct dn_flow); /* see above comment */
866 if (have < len || o->len == 0 || o->type != DN_QUEUE) {
867 D("ERROR type %d %s %d have %d need %d",
868 o->type, msg, i, have, len);
871 ND("type %d %s %d len %d", o->type, msg, i, len);
872 bcopy(_o, *start, len);
873 ((struct dn_id*)(*start))->len = len;
879 copy_q_cb(void *obj, void *arg)
881 struct dn_queue *q = obj;
882 struct copy_args *a = arg;
883 struct dn_flow *ni = (struct dn_flow *)(*a->start);
884 if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1))
885 return DNHT_SCAN_END;
886 ni->oid.type = DN_FLOW; /* override the DN_QUEUE */
887 ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL);
892 copy_q(struct copy_args *a, struct dn_fsk *fs, int flags)
896 if (fs->fs.flags & DN_QHT_HASH)
897 dn_ht_scan(fs->qht, copy_q_cb, a);
899 copy_q_cb(fs->qht, a);
904 * This routine only copies the initial part of a profile ? XXX
905 * XXX marta: I think this routine is called to print a summary
906 * of the pipe configuration and does not need to show the
907 * profile samples list.
910 copy_profile(struct copy_args *a, struct dn_profile *p)
912 int have = a->end - *a->start;
913 /* XXX here we check for max length */
914 int profile_len = sizeof(struct dn_profile);
918 if (have < profile_len) {
919 D("error have %d need %d", have, profile_len);
922 bcopy(p, *a->start, profile_len);
923 ((struct dn_id *)(*a->start))->len = profile_len;
924 *a->start += profile_len;
929 copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags)
931 struct dn_fs *ufs = (struct dn_fs *)(*a->start);
934 ND("flowset %d", fs->fs.fs_nr);
935 if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr))
936 return DNHT_SCAN_END;
937 ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ?
938 dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0);
939 if (flags) { /* copy queues */
946 copy_si_cb(void *obj, void *arg)
948 struct dn_sch_inst *si = obj;
949 struct copy_args *a = arg;
950 struct dn_flow *ni = (struct dn_flow *)(*a->start);
951 if (copy_obj(a->start, a->end, &si->ni, "inst",
952 si->sched->sch.sched_nr))
953 return DNHT_SCAN_END;
954 ni->oid.type = DN_FLOW; /* override the DN_SCH_I */
955 ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL);
960 copy_si(struct copy_args *a, struct dn_schk *s, int flags)
962 if (s->sch.flags & DN_HAVE_MASK)
963 dn_ht_scan(s->siht, copy_si_cb, a);
965 copy_si_cb(s->siht, a);
970 * compute a list of children of a scheduler and copy up
973 copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags)
979 int n = 0, space = sizeof(*o);
980 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
981 if (fs->fs.fs_nr < DN_MAX_ID)
984 space += n * sizeof(uint32_t);
985 DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n);
986 if (a->end - *(a->start) < space)
987 return DNHT_SCAN_END;
988 o = (struct dn_id *)(*(a->start));
992 p = (uint32_t *)(o+1);
993 SLIST_FOREACH(fs, &s->fsk_list, sch_chain)
994 if (fs->fs.fs_nr < DN_MAX_ID)
1000 copy_data_helper(void *_o, void *_arg)
1002 struct copy_args *a = _arg;
1003 uint32_t *r = a->extra->r; /* start of first range */
1004 uint32_t *lim; /* first invalid pointer */
1007 lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len);
1009 if (a->type == DN_LINK || a->type == DN_SCH) {
1010 /* pipe|sched show, we receive a dn_schk */
1011 struct dn_schk *s = _o;
1013 n = s->sch.sched_nr;
1014 if (a->type == DN_SCH && n >= DN_MAX_ID)
1015 return 0; /* not a scheduler */
1016 if (a->type == DN_LINK && n <= DN_MAX_ID)
1017 return 0; /* not a pipe */
1019 /* see if the object is within one of our ranges */
1020 for (;r < lim; r += 2) {
1021 if (n < r[0] || n > r[1])
1023 /* Found a valid entry, copy and we are done */
1024 if (a->flags & DN_C_LINK) {
1025 if (copy_obj(a->start, a->end,
1026 &s->link, "link", n))
1027 return DNHT_SCAN_END;
1028 if (copy_profile(a, s->profile))
1029 return DNHT_SCAN_END;
1030 if (copy_flowset(a, s->fs, 0))
1031 return DNHT_SCAN_END;
1033 if (a->flags & DN_C_SCH) {
1034 if (copy_obj(a->start, a->end,
1035 &s->sch, "sched", n))
1036 return DNHT_SCAN_END;
1037 /* list all attached flowsets */
1038 if (copy_fsk_list(a, s, 0))
1039 return DNHT_SCAN_END;
1041 if (a->flags & DN_C_FLOW)
1045 } else if (a->type == DN_FS) {
1046 /* queue show, skip internal flowsets */
1047 struct dn_fsk *fs = _o;
1052 /* see if the object is within one of our ranges */
1053 for (;r < lim; r += 2) {
1054 if (n < r[0] || n > r[1])
1056 if (copy_flowset(a, fs, 0))
1057 return DNHT_SCAN_END;
1059 break; /* we are done */
1065 static inline struct dn_schk *
1066 locate_scheduler(int i)
1068 return dn_ht_find(dn_cfg.schedhash, i, 0, NULL);
1072 * red parameters are in fixed point arithmetic.
1075 config_red(struct dn_fsk *fs)
1077 int64_t s, idle, weight, w0;
1080 fs->w_q = fs->fs.w_q;
1081 fs->max_p = fs->fs.max_p;
1083 /* Doing stuff that was in userland */
1084 i = fs->sched->link.bandwidth;
1086 hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i;
1088 idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */
1089 fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth);
1090 /* fs->lookup_step not scaled, */
1091 if (!fs->lookup_step)
1092 fs->lookup_step = 1;
1093 w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled
1095 for (t = fs->lookup_step; t > 1; --t)
1096 weight = SCALE_MUL(weight, w0);
1097 fs->lookup_weight = (int)(weight); // scaled
1099 /* Now doing stuff that was in kerneland */
1100 fs->min_th = SCALE(fs->fs.min_th);
1101 fs->max_th = SCALE(fs->fs.max_th);
1103 fs->c_1 = fs->max_p / (fs->fs.max_th - fs->fs.min_th);
1104 fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th));
1106 if (fs->fs.flags & DN_IS_GENTLE_RED) {
1107 fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th;
1108 fs->c_4 = SCALE(1) - 2 * fs->max_p;
1111 /* If the lookup table already exist, free and create it again. */
1112 if (fs->w_q_lookup) {
1113 free(fs->w_q_lookup, M_DUMMYNET);
1114 fs->w_q_lookup = NULL;
1116 if (dn_cfg.red_lookup_depth == 0) {
1117 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
1119 fs->fs.flags &= ~DN_IS_RED;
1120 fs->fs.flags &= ~DN_IS_GENTLE_RED;
1123 fs->lookup_depth = dn_cfg.red_lookup_depth;
1124 fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int),
1125 M_DUMMYNET, M_NOWAIT);
1126 if (fs->w_q_lookup == NULL) {
1127 printf("dummynet: sorry, cannot allocate red lookup table\n");
1128 fs->fs.flags &= ~DN_IS_RED;
1129 fs->fs.flags &= ~DN_IS_GENTLE_RED;
1133 /* Fill the lookup table with (1 - w_q)^x */
1134 fs->w_q_lookup[0] = SCALE(1) - fs->w_q;
1136 for (i = 1; i < fs->lookup_depth; i++)
1138 SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight);
1140 if (dn_cfg.red_avg_pkt_size < 1)
1141 dn_cfg.red_avg_pkt_size = 512;
1142 fs->avg_pkt_size = dn_cfg.red_avg_pkt_size;
1143 if (dn_cfg.red_max_pkt_size < 1)
1144 dn_cfg.red_max_pkt_size = 1500;
1145 fs->max_pkt_size = dn_cfg.red_max_pkt_size;
1150 /* Scan all flowset attached to this scheduler and update red */
1152 update_red(struct dn_schk *s)
1155 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
1156 if (fs && (fs->fs.flags & DN_IS_RED))
1161 /* attach flowset to scheduler s, possibly requeue */
1163 fsk_attach(struct dn_fsk *fs, struct dn_schk *s)
1165 ND("remove fs %d from fsunlinked, link to sched %d",
1166 fs->fs.fs_nr, s->sch.sched_nr);
1167 SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain);
1169 SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain);
1172 /* XXX compute fsk_mask */
1173 fs->fsk_mask = fs->fs.flow_mask;
1174 if (fs->sched->sch.flags & DN_HAVE_MASK)
1175 flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask);
1178 * we must drain qht according to the old
1179 * type, and reinsert according to the new one.
1180 * The requeue is complex -- in general we need to
1181 * reclassify every single packet.
1182 * For the time being, let's hope qht is never set
1183 * when we reach this point.
1185 D("XXX TODO requeue from fs %d to sch %d",
1186 fs->fs.fs_nr, s->sch.sched_nr);
1189 /* set the new type for qht */
1190 if (nonzero_mask(&fs->fsk_mask))
1191 fs->fs.flags |= DN_QHT_HASH;
1193 fs->fs.flags &= ~DN_QHT_HASH;
1195 /* XXX config_red() can fail... */
1196 if (fs->fs.flags & DN_IS_RED)
1200 /* update all flowsets which may refer to this scheduler */
1202 update_fs(struct dn_schk *s)
1204 struct dn_fsk *fs, *tmp;
1206 SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) {
1207 if (s->sch.sched_nr != fs->fs.sched_nr) {
1208 D("fs %d for sch %d not %d still unlinked",
1209 fs->fs.fs_nr, fs->fs.sched_nr,
1218 * Configuration -- to preserve backward compatibility we use
1219 * the following scheme (N is 65536)
1220 * NUMBER SCHED LINK FLOWSET
1221 * 1 .. N-1 (1)WFQ (2)WFQ (3)queue
1222 * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1
1223 * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1
1225 * "pipe i config" configures #1, #2 and #3
1226 * "sched i config" configures #1 and possibly #6
1227 * "queue i config" configures #3
1228 * #1 is configured with 'pipe i config' or 'sched i config'
1229 * #2 is configured with 'pipe i config', and created if not
1230 * existing with 'sched i config'
1231 * #3 is configured with 'queue i config'
1232 * #4 is automatically configured after #1, can only be FIFO
1233 * #5 is automatically configured after #2
1234 * #6 is automatically created when #1 is !MULTIQUEUE,
1235 * and can be updated.
1236 * #7 is automatically configured after #2
1240 * configure a link (and its FIFO instance)
1243 config_link(struct dn_link *p, struct dn_id *arg)
1247 if (p->oid.len != sizeof(*p)) {
1248 D("invalid pipe len %d", p->oid.len);
1252 if (i <= 0 || i >= DN_MAX_ID)
1255 * The config program passes parameters as follows:
1256 * bw = bits/second (0 means no limits),
1257 * delay = ms, must be translated into ticks.
1258 * qsize = slots/bytes
1261 p->delay = (p->delay * hz) / 1000;
1262 /* Scale burst size: bytes -> bits * hz */
1266 /* do it twice, base link and FIFO link */
1267 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
1268 struct dn_schk *s = locate_scheduler(i);
1271 D("sched %d not found", i);
1274 /* remove profile if exists */
1276 free(s->profile, M_DUMMYNET);
1279 /* copy all parameters */
1280 s->link.oid = p->oid;
1281 s->link.link_nr = i;
1282 s->link.delay = p->delay;
1283 if (s->link.bandwidth != p->bandwidth) {
1284 /* XXX bandwidth changes, need to update red params */
1285 s->link.bandwidth = p->bandwidth;
1288 s->link.burst = p->burst;
1289 schk_reset_credit(s);
1297 * configure a flowset. Can be called from inside with locked=1,
1299 static struct dn_fsk *
1300 config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked)
1305 if (nfs->oid.len != sizeof(*nfs)) {
1306 D("invalid flowset len %d", nfs->oid.len);
1310 if (i <= 0 || i >= 3*DN_MAX_ID)
1312 ND("flowset %d", i);
1313 /* XXX other sanity checks */
1314 if (nfs->flags & DN_QSIZE_BYTES) {
1315 ipdn_bound_var(&nfs->qsize, 16384,
1316 1500, dn_cfg.byte_limit, NULL); // "queue byte size");
1318 ipdn_bound_var(&nfs->qsize, 50,
1319 1, dn_cfg.slot_limit, NULL); // "queue slot size");
1321 if (nfs->flags & DN_HAVE_MASK) {
1322 /* make sure we have some buckets */
1323 ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size,
1324 1, dn_cfg.max_hash_size, "flowset buckets");
1326 nfs->buckets = 1; /* we only need 1 */
1330 if (dn_cfg.fshash == NULL)
1331 dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size,
1332 offsetof(struct dn_fsk, fsk_next),
1333 fsk_hash, fsk_match, fsk_new);
1334 do { /* exit with break when done */
1336 int flags = nfs->sched_nr ? DNHT_INSERT : 0;
1338 int oldc = dn_cfg.fsk_count;
1339 fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL);
1341 D("missing sched for flowset %d", i);
1344 /* grab some defaults from the existing one */
1345 if (nfs->sched_nr == 0) /* reuse */
1346 nfs->sched_nr = fs->fs.sched_nr;
1347 for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) {
1348 if (nfs->par[j] == -1) /* reuse */
1349 nfs->par[j] = fs->fs.par[j];
1351 if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) {
1352 ND("flowset %d unchanged", i);
1353 break; /* no change, nothing to do */
1355 if (oldc != dn_cfg.fsk_count) /* new item */
1357 s = locate_scheduler(nfs->sched_nr);
1358 /* detach from old scheduler if needed, preserving
1359 * queues if we need to reattach. Then update the
1360 * configuration, and possibly attach to the new sched.
1362 DX(2, "fs %d changed sched %d@%p to %d@%p",
1364 fs->fs.sched_nr, fs->sched, nfs->sched_nr, s);
1366 int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY);
1367 flags |= DN_DESTROY; /* XXX temporary */
1368 fsk_detach(fs, flags);
1370 fs->fs = *nfs; /* copy configuration */
1380 * config/reconfig a scheduler and its FIFO variant.
1381 * For !MULTIQUEUE schedulers, also set up the flowset.
1383 * On reconfigurations (detected because s->fp is set),
1384 * detach existing flowsets preserving traffic, preserve link,
1385 * and delete the old scheduler creating a new one.
1388 config_sched(struct dn_sch *_nsch, struct dn_id *arg)
1391 struct schk_new_arg a; /* argument for schk_new */
1393 struct dn_link p; /* copy of oldlink */
1394 struct dn_profile *pf = NULL; /* copy of old link profile */
1395 /* Used to preserv mask parameter */
1396 struct ipfw_flow_id new_mask;
1397 int new_buckets = 0;
1403 if (a.sch->oid.len != sizeof(*a.sch)) {
1404 D("bad sched len %d", a.sch->oid.len);
1407 i = a.sch->sched_nr;
1408 if (i <= 0 || i >= DN_MAX_ID)
1410 /* make sure we have some buckets */
1411 if (a.sch->flags & DN_HAVE_MASK)
1412 ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size,
1413 1, dn_cfg.max_hash_size, "sched buckets");
1414 /* XXX other sanity checks */
1415 bzero(&p, sizeof(p));
1417 pipe_cmd = a.sch->flags & DN_PIPE_CMD;
1418 a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set?
1420 /* Copy mask parameter */
1421 new_mask = a.sch->sched_mask;
1422 new_buckets = a.sch->buckets;
1423 new_flags = a.sch->flags;
1426 if (dn_cfg.schedhash == NULL)
1427 dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size,
1428 offsetof(struct dn_schk, schk_next),
1429 schk_hash, schk_match, schk_new);
1430 again: /* run twice, for wfq and fifo */
1432 * lookup the type. If not supplied, use the previous one
1433 * or default to WF2Q+. Otherwise, return an error.
1436 a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name);
1438 /* found. Lookup or create entry */
1439 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a);
1440 } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) {
1441 /* No type. search existing s* or retry with WF2Q+ */
1442 s = dn_ht_find(dn_cfg.schedhash, i, 0, &a);
1445 /* Scheduler exists, skip to FIFO scheduler
1446 * if command was pipe config...
1451 /* New scheduler, create a wf2q+ with no mask
1452 * if command was pipe config...
1455 /* clear mask parameter */
1456 bzero(&a.sch->sched_mask, sizeof(new_mask));
1458 a.sch->flags &= ~DN_HAVE_MASK;
1460 a.sch->oid.subtype = DN_SCHED_WF2QP;
1464 D("invalid scheduler type %d %s",
1465 a.sch->oid.subtype, a.sch->name);
1469 /* normalize name and subtype */
1470 a.sch->oid.subtype = a.fp->type;
1471 bzero(a.sch->name, sizeof(a.sch->name));
1472 strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name));
1474 D("cannot allocate scheduler %d", i);
1477 /* restore existing link if any */
1480 if (!pf || pf->link_nr != p.link_nr) { /* no saved value */
1481 s->profile = NULL; /* XXX maybe not needed */
1483 size_t pf_size = sizeof(struct dn_profile) +
1484 s->profile->samples_no * sizeof(int);
1486 s->profile = malloc(pf_size,
1487 M_DUMMYNET, M_NOWAIT | M_ZERO);
1488 if (s->profile == NULL) {
1489 D("cannot allocate profile");
1492 bcopy(pf, s->profile, pf_size);
1496 if (s->fp == NULL) {
1497 DX(2, "sched %d new type %s", i, a.fp->name);
1498 } else if (s->fp != a.fp ||
1499 bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) {
1500 /* already existing. */
1501 DX(2, "sched %d type changed from %s to %s",
1502 i, s->fp->name, a.fp->name);
1503 DX(4, " type/sub %d/%d -> %d/%d",
1504 s->sch.oid.type, s->sch.oid.subtype,
1505 a.sch->oid.type, a.sch->oid.subtype);
1506 if (s->link.link_nr == 0)
1507 D("XXX WARNING link 0 for sched %d", i);
1508 p = s->link; /* preserve link */
1509 if (s->profile) {/* preserve profile */
1511 pf = malloc(sizeof(*pf),
1512 M_DUMMYNET, M_NOWAIT | M_ZERO);
1513 if (pf) /* XXX should issue a warning otherwise */
1514 bcopy(s->profile, pf, sizeof(*pf));
1516 /* remove from the hash */
1517 dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
1518 /* Detach flowsets, preserve queues. */
1519 // schk_delete_cb(s, NULL);
1520 // XXX temporarily, kill queues
1521 schk_delete_cb(s, (void *)DN_DESTROY);
1524 DX(4, "sched %d unchanged type %s", i, a.fp->name);
1526 /* complete initialization */
1530 // XXX schk_reset_credit(s);
1531 /* create the internal flowset if needed,
1532 * trying to reuse existing ones if available
1534 if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) {
1535 s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL);
1538 bzero(&fs, sizeof(fs));
1539 set_oid(&fs.oid, DN_FS, sizeof(fs));
1540 fs.fs_nr = i + DN_MAX_ID;
1542 s->fs = config_fs(&fs, NULL, 1 /* locked */);
1545 schk_delete_cb(s, (void *)DN_DESTROY);
1546 D("error creating internal fs for %d", i);
1550 /* call init function after the flowset is created */
1555 if (i < DN_MAX_ID) { /* now configure the FIFO instance */
1558 /* Restore mask parameter for FIFO */
1559 a.sch->sched_mask = new_mask;
1560 a.sch->buckets = new_buckets;
1561 a.sch->flags = new_flags;
1563 /* sched config shouldn't modify the FIFO scheduler */
1564 if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) {
1565 /* FIFO already exist, don't touch it */
1566 err = 0; /* and this is not an error */
1570 a.sch->sched_nr = i;
1571 a.sch->oid.subtype = DN_SCHED_FIFO;
1572 bzero(a.sch->name, sizeof(a.sch->name));
1579 free(pf, M_DUMMYNET);
1584 * attach a profile to a link
1587 config_profile(struct dn_profile *pf, struct dn_id *arg)
1590 int i, olen, err = 0;
1592 if (pf->oid.len < sizeof(*pf)) {
1593 D("short profile len %d", pf->oid.len);
1597 if (i <= 0 || i >= DN_MAX_ID)
1599 /* XXX other sanity checks */
1601 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
1602 s = locate_scheduler(i);
1610 * If we had a profile and the new one does not fit,
1611 * or it is deleted, then we need to free memory.
1613 if (s->profile && (pf->samples_no == 0 ||
1614 s->profile->oid.len < pf->oid.len)) {
1615 free(s->profile, M_DUMMYNET);
1618 if (pf->samples_no == 0)
1621 * new profile, possibly allocate memory
1624 if (s->profile == NULL)
1625 s->profile = malloc(pf->oid.len,
1626 M_DUMMYNET, M_NOWAIT | M_ZERO);
1627 if (s->profile == NULL) {
1628 D("no memory for profile %d", i);
1632 /* preserve larger length XXX double check */
1633 olen = s->profile->oid.len;
1634 if (olen < pf->oid.len)
1636 bcopy(pf, s->profile, pf->oid.len);
1637 s->profile->oid.len = olen;
1644 * Delete all objects:
1647 dummynet_flush(void)
1650 /* delete all schedulers and related links/queues/flowsets */
1651 dn_ht_scan(dn_cfg.schedhash, schk_delete_cb,
1652 (void *)(uintptr_t)DN_DELETE_FS);
1653 /* delete all remaining (unlinked) flowsets */
1654 DX(4, "still %d unlinked fs", dn_cfg.fsk_count);
1655 dn_ht_free(dn_cfg.fshash, DNHT_REMOVE);
1656 fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS);
1658 dn_ht_free(dn_cfg.schedhash, DNHT_REMOVE);
1659 /* Reinitialize system heap... */
1660 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
1664 * Main handler for configuration. We are guaranteed to be called
1665 * with an oid which is at least a dn_id.
1666 * - the first object is the command (config, delete, flush, ...)
1667 * - config_link must be issued after the corresponding config_sched
1668 * - parameters (DN_TXT) for an object must preceed the object
1669 * processed on a config_sched.
1672 do_config(void *p, int l)
1674 struct dn_id *next, *o;
1675 int err = 0, err2 = 0;
1676 struct dn_id *arg = NULL;
1680 if (o->id != DN_API_VERSION) {
1681 D("invalid api version got %d need %d",
1682 o->id, DN_API_VERSION);
1685 for (; l >= sizeof(*o); o = next) {
1686 struct dn_id *prev = arg;
1687 if (o->len < sizeof(*o) || l < o->len) {
1688 D("bad len o->len %d len %d", o->len, l);
1693 next = (struct dn_id *)((char *)o + o->len);
1697 D("cmd %d not implemented", o->type);
1700 #ifdef EMULATE_SYSCTL
1701 /* sysctl emulation.
1702 * if we recognize the command, jump to the correct
1703 * handler and return
1706 err = kesysctl_emu_set(p, l);
1710 case DN_CMD_CONFIG: /* simply a header */
1714 /* the argument is in the first uintptr_t after o */
1715 a = (uintptr_t *)(o+1);
1716 if (o->len < sizeof(*o) + sizeof(*a)) {
1720 switch (o->subtype) {
1722 /* delete base and derived schedulers */
1724 err = delete_schk(*a);
1725 err2 = delete_schk(*a + DN_MAX_ID);
1732 D("invalid delete type %d",
1738 err = (*a <1 || *a >= DN_MAX_ID) ?
1739 EINVAL : delete_fs(*a, 0) ;
1749 case DN_TEXT: /* store argument the next block */
1754 err = config_link((struct dn_link *)o, arg);
1757 err = config_profile((struct dn_profile *)o, arg);
1760 err = config_sched((struct dn_sch *)o, arg);
1763 err = (NULL==config_fs((struct dn_fs *)o, arg, 0));
1775 compute_space(struct dn_id *cmd, struct copy_args *a)
1777 int x = 0, need = 0;
1778 int profile_size = sizeof(struct dn_profile);
1780 /* NOTE about compute space:
1781 * NP = dn_cfg.schk_count
1782 * NSI = dn_cfg.si_count
1783 * NF = dn_cfg.fsk_count
1784 * NQ = dn_cfg.queue_count
1786 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
1787 * link, scheduler template, flowset
1788 * integrated in scheduler and header
1790 * (NSI)*(dn_flow) all scheduler instance (includes
1791 * the queue instance)
1793 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
1794 * link, scheduler template, flowset
1795 * integrated in scheduler and header
1797 * (NSI * dn_flow) all scheduler instances
1798 * (NF * sizeof(uint_32)) space for flowset list linked to scheduler
1799 * (NQ * dn_queue) all queue [XXXfor now not listed]
1801 * (NF * dn_fs) all flowset
1802 * (NQ * dn_queue) all queues
1804 switch (cmd->subtype) {
1807 /* XXX where do LINK and SCH differ ? */
1808 /* 'ipfw sched show' could list all queues associated to
1809 * a scheduler. This feature for now is disabled
1811 case DN_LINK: /* pipe show */
1812 x = DN_C_LINK | DN_C_SCH | DN_C_FLOW;
1813 need += dn_cfg.schk_count *
1814 (sizeof(struct dn_fs) + profile_size) / 2;
1815 need += dn_cfg.fsk_count * sizeof(uint32_t);
1817 case DN_SCH: /* sched show */
1818 need += dn_cfg.schk_count *
1819 (sizeof(struct dn_fs) + profile_size) / 2;
1820 need += dn_cfg.fsk_count * sizeof(uint32_t);
1821 x = DN_C_SCH | DN_C_LINK | DN_C_FLOW;
1823 case DN_FS: /* queue show */
1824 x = DN_C_FS | DN_C_QUEUE;
1826 case DN_GET_COMPAT: /* compatibility mode */
1827 need = dn_compat_calc_size();
1832 need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2;
1833 /* NOT also, each fs might be attached to a sched */
1834 need += dn_cfg.schk_count * sizeof(struct dn_id) / 2;
1837 need += dn_cfg.fsk_count * sizeof(struct dn_fs);
1838 if (x & DN_C_LINK) {
1839 need += dn_cfg.schk_count * sizeof(struct dn_link) / 2;
1842 * When exporting a queue to userland, only pass up the
1843 * struct dn_flow, which is the only visible part.
1847 need += dn_cfg.queue_count * sizeof(struct dn_flow);
1849 need += dn_cfg.si_count * (sizeof(struct dn_flow));
1854 * If compat != NULL dummynet_get is called in compatibility mode.
1855 * *compat will be the pointer to the buffer to pass to ipfw
1858 dummynet_get(struct sockopt *sopt, void **compat)
1860 int have, i, need, error;
1861 char *start = NULL, *buf;
1862 size_t sopt_valsize;
1865 struct copy_range r;
1866 int l = sizeof(struct dn_id);
1868 bzero(&a, sizeof(a));
1869 bzero(&r, sizeof(r));
1871 /* save and restore original sopt_valsize around copyin */
1872 sopt_valsize = sopt->sopt_valsize;
1877 /* copy at least an oid, and possibly a full object */
1878 error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd));
1879 sopt->sopt_valsize = sopt_valsize;
1883 #ifdef EMULATE_SYSCTL
1884 /* sysctl emulation. */
1885 if (cmd->type == DN_SYSCTL_GET)
1886 return kesysctl_emu_get(sopt);
1888 if (l > sizeof(r)) {
1889 /* request larger than default, allocate buffer */
1890 cmd = malloc(l, M_DUMMYNET, M_WAITOK);
1891 error = sooptcopyin(sopt, cmd, l, l);
1892 sopt->sopt_valsize = sopt_valsize;
1896 } else { /* compatibility */
1898 cmd->type = DN_CMD_GET;
1899 cmd->len = sizeof(struct dn_id);
1900 cmd->subtype = DN_GET_COMPAT;
1901 // cmd->id = sopt_valsize;
1902 D("compatibility mode");
1904 a.extra = (struct copy_range *)cmd;
1905 if (cmd->len == sizeof(*cmd)) { /* no range, create a default */
1906 uint32_t *rp = (uint32_t *)(cmd + 1);
1907 cmd->len += 2* sizeof(uint32_t);
1909 rp[1] = DN_MAX_ID - 1;
1910 if (cmd->subtype == DN_LINK) {
1915 /* Count space (under lock) and allocate (outside lock).
1916 * Exit with lock held if we manage to get enough buffer.
1917 * Try a few times then give up.
1919 for (have = 0, i = 0; i < 10; i++) {
1921 need = compute_space(cmd, &a);
1923 /* if there is a range, ignore value from compute_space() */
1924 if (l > sizeof(*cmd))
1925 need = sopt_valsize - sizeof(*cmd);
1932 need += sizeof(*cmd);
1939 free(start, M_DUMMYNET);
1941 if (need > sopt_valsize)
1945 start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO);
1948 if (start == NULL) {
1953 error = sooptcopyout(sopt, cmd, sizeof(*cmd));
1957 ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, "
1958 "%d:%d si %d, %d:%d queues %d",
1959 dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH,
1960 dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK,
1961 dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS,
1962 dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I,
1963 dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE);
1964 sopt->sopt_valsize = sopt_valsize;
1965 a.type = cmd->subtype;
1967 if (compat == NULL) {
1968 bcopy(cmd, start, sizeof(*cmd));
1969 ((struct dn_id*)(start))->len = sizeof(struct dn_id);
1970 buf = start + sizeof(*cmd);
1974 a.end = start + have;
1975 /* start copying other objects */
1977 a.type = DN_COMPAT_PIPE;
1978 dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a);
1979 a.type = DN_COMPAT_QUEUE;
1980 dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a);
1981 } else if (a.type == DN_FS) {
1982 dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a);
1984 dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a);
1990 sopt->sopt_valsize = buf - start;
1991 /* free() is done by ip_dummynet_compat() */
1992 start = NULL; //XXX hack
1994 error = sooptcopyout(sopt, start, buf - start);
1997 if (cmd && cmd != &r.o)
1998 free(cmd, M_DUMMYNET);
2000 free(start, M_DUMMYNET);
2005 * Functions to drain idle objects -- see dummynet_task() for some notes
2007 /* Callback called on scheduler instance to delete it if idle */
2009 drain_scheduler_cb(void *_si, void *_arg)
2011 struct dn_sch_inst *si = _si;
2015 if ( (*arg++) > dn_cfg.expire_object_examined)
2016 return DNHT_SCAN_END;
2018 if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL)
2022 * if the scheduler is multiqueue, q_count also reflects empty
2023 * queues that point to si, so we need to check si->q_count to
2024 * tell whether we can remove the instance.
2026 if (si->ni.length == 0) {
2027 /* si was marked as idle:
2028 * remove it or increment idle_si_wait counter
2030 empty = (si->sched->fp->flags & DN_MULTIQUEUE) ?
2031 (si->q_count == 0) : 1;
2033 (si->idle_time < dn_cfg.curr_time - dn_cfg.object_idle_tick))
2034 return si_destroy(si, NULL);
2036 dn_cfg.idle_si_wait++;
2041 /* Callback called on scheduler to check if it has instances */
2043 drain_scheduler_sch_cb(void *_s, void *_arg)
2045 struct dn_schk *s = _s;
2048 if (s->sch.flags & DN_HAVE_MASK) {
2049 dn_ht_scan_bucket(s->siht, &s->drain_bucket,
2050 drain_scheduler_cb, _arg);
2053 if (drain_scheduler_cb(s->siht, _arg) == DNHT_SCAN_DEL)
2057 return ( (*arg++) > dn_cfg.expire_object_examined) ? DNHT_SCAN_END : 0;
2060 /* Called every tick, try to delete a 'bucket' of scheduler */
2062 dn_drain_scheduler(void)
2066 dn_ht_scan_bucket(dn_cfg.schedhash, (int *)&dn_cfg.drain_sch,
2067 drain_scheduler_sch_cb, &arg);
2070 /* Callback called on queue to delete if it is idle */
2072 drain_queue_cb(void *_q, void *_arg)
2074 struct dn_queue *q = _q;
2077 if ( (*arg++) > dn_cfg.expire_object_examined)
2078 return DNHT_SCAN_END;
2080 if (q->ni.length == 0) {
2081 if (q->q_time < dn_cfg.curr_time - dn_cfg.object_idle_tick) {
2082 if (dn_delete_queue(q, DN_DESTROY | DN_DEL_SAFE) == 0)
2083 return DNHT_SCAN_DEL; /* queue is deleted */
2085 dn_cfg.idle_queue_wait++;
2088 return 0; /* queue isn't deleted */
2091 /* Callback called on flowset used to check if it has queues */
2093 drain_queue_fs_cb(void *_fs, void *_arg)
2095 struct dn_fsk *fs = _fs;
2098 if (fs->fs.flags & DN_QHT_HASH) {
2099 /* Flowset has a hash table for queues */
2100 dn_ht_scan_bucket(fs->qht, &fs->drain_bucket,
2101 drain_queue_cb, _arg);
2103 /* No hash table for this flowset, null the pointer
2104 * if the queue is deleted
2107 if (drain_queue_cb(fs->qht, _arg) == DNHT_SCAN_DEL)
2111 return ( (*arg++) > dn_cfg.expire_object_examined) ? DNHT_SCAN_END : 0;
2114 /* Called every tick, try to delete a 'bucket' of queue */
2116 dn_drain_queue(void)
2120 /* scan a bucket of flowset */
2121 dn_ht_scan_bucket(dn_cfg.fshash, (int *)&dn_cfg.drain_fs,
2122 drain_queue_fs_cb, &arg);
2126 * Handler for the various dummynet socket options
2129 ip_dn_ctl(struct sockopt *sopt)
2134 error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
2138 /* Disallow sets in really-really secure mode. */
2139 if (sopt->sopt_dir == SOPT_SET) {
2140 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
2145 switch (sopt->sopt_name) {
2147 D("dummynet: unknown option %d", sopt->sopt_name);
2151 case IP_DUMMYNET_FLUSH:
2152 case IP_DUMMYNET_CONFIGURE:
2153 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2154 case IP_DUMMYNET_GET:
2155 D("dummynet: compat option %d", sopt->sopt_name);
2156 error = ip_dummynet_compat(sopt);
2160 if (sopt->sopt_dir == SOPT_GET) {
2161 error = dummynet_get(sopt, NULL);
2164 l = sopt->sopt_valsize;
2165 if (l < sizeof(struct dn_id) || l > 12000) {
2166 D("argument len %d invalid", l);
2169 p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ?
2170 error = sooptcopyin(sopt, p, l, l);
2173 error = do_config(p, l);
2187 if (dn_cfg.init_done)
2189 printf("DUMMYNET %p with IPv6 initialized (100409)\n", curvnet);
2190 dn_cfg.init_done = 1;
2191 /* Set defaults here. MSVC does not accept initializers,
2192 * and this is also useful for vimages
2195 dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */
2196 dn_cfg.byte_limit = 1024 * 1024;
2199 /* RED parameters */
2200 dn_cfg.red_lookup_depth = 256; /* default lookup table depth */
2201 dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */
2202 dn_cfg.red_max_pkt_size = 1500; /* default max packet size */
2205 dn_cfg.max_hash_size = 1024; /* max in the hash tables */
2207 if (dn_cfg.hash_size == 0) /* XXX or <= 0 ? */
2208 dn_cfg.hash_size = 64; /* default hash size */
2210 /* hash tables for schedulers and flowsets are created
2211 * when the first scheduler/flowset is inserted.
2212 * This is done to allow to use the right hash_size value.
2213 * When the last object is deleted, the table is destroyed,
2214 * so a new hash_size value can be used.
2215 * XXX rehash is not supported for now
2217 dn_cfg.schedhash = NULL;
2218 dn_cfg.fshash = NULL;
2219 /* bucket index to drain object */
2220 dn_cfg.drain_fs = 0;
2221 dn_cfg.drain_sch = 0;
2223 if (dn_cfg.expire_object == 0)
2224 dn_cfg.expire_object = 50;
2225 if (dn_cfg.object_idle_tick == 0)
2226 dn_cfg.object_idle_tick = 1000;
2227 if (dn_cfg.expire_object_examined == 0)
2228 dn_cfg.expire_object_examined = 10;
2229 if (dn_cfg.drain_ratio == 0)
2230 dn_cfg.drain_ratio = 1;
2232 // XXX what if we don't have a tsc ?
2234 dn_cfg.cycle_task_new = dn_cfg.cycle_task_old = readTSC();
2236 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
2237 SLIST_INIT(&dn_cfg.fsu);
2238 SLIST_INIT(&dn_cfg.schedlist);
2242 TASK_INIT(&dn_task, 0, dummynet_task, curvnet);
2243 dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
2244 taskqueue_thread_enqueue, &dn_tq);
2245 taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
2247 callout_init(&dn_timeout, CALLOUT_MPSAFE);
2248 callout_reset_on(&dn_timeout, 1, dummynet, NULL, 0);
2250 /* Initialize curr_time adjustment mechanics. */
2251 getmicrouptime(&dn_cfg.prev_t);
2256 ip_dn_destroy(int last)
2258 callout_drain(&dn_timeout);
2262 ND("removing last instance\n");
2263 ip_dn_ctl_ptr = NULL;
2264 ip_dn_io_ptr = NULL;
2269 taskqueue_drain(dn_tq, &dn_task);
2270 taskqueue_free(dn_tq);
2272 dn_ht_free(dn_cfg.schedhash, 0);
2273 dn_ht_free(dn_cfg.fshash, 0);
2274 heap_free(&dn_cfg.evheap);
2278 #endif /* KLD_MODULE */
2281 dummynet_modevent(module_t mod, int type, void *data)
2284 if (type == MOD_LOAD) {
2286 printf("DUMMYNET already loaded\n");
2290 ip_dn_ctl_ptr = ip_dn_ctl;
2291 ip_dn_io_ptr = dummynet_io;
2293 } else if (type == MOD_UNLOAD) {
2294 #if !defined(KLD_MODULE)
2295 printf("dummynet statically compiled, cannot unload\n");
2298 ip_dn_destroy(1 /* last */);
2305 /* modevent helpers for the modules */
2307 load_dn_sched(struct dn_alg *d)
2312 return 1; /* error */
2313 ip_dn_init(); /* just in case, we need the lock */
2315 /* Check that mandatory funcs exists */
2316 if (d->enqueue == NULL || d->dequeue == NULL) {
2317 D("missing enqueue or dequeue for %s", d->name);
2321 /* Search if scheduler already exists */
2323 SLIST_FOREACH(s, &dn_cfg.schedlist, next) {
2324 if (strcmp(s->name, d->name) == 0) {
2325 D("%s already loaded", d->name);
2326 break; /* scheduler already exists */
2330 SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next);
2332 D("dn_sched %s %sloaded", d->name, s ? "not ":"");
2337 unload_dn_sched(struct dn_alg *s)
2339 struct dn_alg *tmp, *r;
2342 ND("called for %s", s->name);
2345 SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) {
2346 if (strcmp(s->name, r->name) != 0)
2348 ND("ref_count = %d", r->ref_count);
2349 err = (r->ref_count != 0) ? EBUSY : 0;
2351 SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next);
2355 D("dn_sched %s %sunloaded", s->name, err ? "not ":"");
2360 dn_sched_modevent(module_t mod, int cmd, void *arg)
2362 struct dn_alg *sch = arg;
2364 if (cmd == MOD_LOAD)
2365 return load_dn_sched(sch);
2366 else if (cmd == MOD_UNLOAD)
2367 return unload_dn_sched(sch);
2372 static moduledata_t dummynet_mod = {
2373 "dummynet", dummynet_modevent, NULL
2376 #define DN_SI_SUB SI_SUB_PROTO_IFATTACHDOMAIN
2377 #define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */
2378 DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD);
2379 MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
2380 MODULE_VERSION(dummynet, 3);
2383 * Starting up. Done in order after dummynet_modevent() has been called.
2384 * VNET_SYSINIT is also called for each existing vnet and each new vnet.
2386 //VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL);
2389 * Shutdown handlers up shop. These are done in REVERSE ORDER, but still
2390 * after dummynet_modevent() has been called. Not called on reboot.
2391 * VNET_SYSUNINIT is also called for each exiting vnet as it exits.
2392 * or when the module is unloaded.
2394 //VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL);