2 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Dummynet portions related to packet handling.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c 203321 2010-01-31 21:39:25Z luigi $");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
39 #include <sys/kernel.h>
41 #include <sys/module.h>
44 #include <sys/rwlock.h>
45 #include <sys/socket.h>
47 #include <sys/sysctl.h>
49 #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
50 #include <net/netisr.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h> /* ip_len, ip_off */
55 #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
56 #include <netinet/ip_fw.h>
57 #include <netinet/ipfw/ip_fw_private.h>
58 #include <netinet/ipfw/dn_heap.h>
59 #include <netinet/ip_dummynet.h>
60 #include <netinet/ipfw/ip_dn_private.h>
61 #include <netinet/ipfw/dn_sched.h>
63 #include <netinet/if_ether.h> /* various ether_* routines */
65 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
66 #include <netinet6/ip6_var.h>
69 * We keep a private variable for the simulation time, but we could
70 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
71 * instead of dn_cfg.curr_time
74 struct dn_parms dn_cfg;
75 //VNET_DEFINE(struct dn_parms, _base_dn_cfg);
77 static long tick_last; /* Last tick duration (usec). */
78 static long tick_delta; /* Last vs standard tick diff (usec). */
79 static long tick_delta_sum; /* Accumulated tick difference (usec).*/
80 static long tick_adjustment; /* Tick adjustments done. */
81 static long tick_lost; /* Lost(coalesced) ticks number. */
82 /* Adjusted vs non-adjusted curr_time difference (ticks). */
83 static long tick_diff;
85 static unsigned long io_pkt;
86 static unsigned long io_pkt_fast;
87 static unsigned long io_pkt_drop;
90 * We use a heap to store entities for which we have pending timer events.
91 * The heap is checked at every tick and all entities with expired events
95 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
97 extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
103 SYSCTL_DECL(_net_inet);
104 SYSCTL_DECL(_net_inet_ip);
105 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
107 /* wrapper to pass dn_cfg fields to SYSCTL_* */
108 //#define DC(x) (&(VNET_NAME(_base_dn_cfg).x))
109 #define DC(x) (&(dn_cfg.x))
111 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
112 CTLFLAG_RW, DC(hash_size), 0, "Default hash table size");
113 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
114 CTLFLAG_RW, DC(slot_limit), 0,
115 "Upper limit in slots for pipe queue.");
116 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
117 CTLFLAG_RW, DC(byte_limit), 0,
118 "Upper limit in bytes for pipe queue.");
119 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
120 CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io.");
121 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
122 CTLFLAG_RW, DC(debug), 0, "Dummynet debug level");
125 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
126 CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table");
127 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
128 CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size");
129 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
130 CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size");
132 /* time adjustment */
133 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
134 CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
135 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
136 CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
137 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
138 CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
139 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
140 CTLFLAG_RD, &tick_diff, 0,
141 "Adjusted vs non-adjusted curr_time difference (ticks).");
142 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
143 CTLFLAG_RD, &tick_lost, 0,
144 "Number of ticks coalesced by dummynet taskqueue.");
146 /* Drain parameters */
147 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
148 CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes");
149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
150 CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes");
151 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire_object,
152 CTLFLAG_RW, DC(expire_object), 0, "Min # of objects before start drain routine");
153 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, object_idle_tick,
154 CTLFLAG_RD, DC(object_idle_tick), 0, "Time (in ticks) to cosiderer an object as idle");
155 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, drain_ratio,
156 CTLFLAG_RD, DC(drain_ratio), 0, "% of dummynet_task() to dedicate to drain routine");
159 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
160 CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers");
161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
162 CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances");
163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
164 CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets");
165 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
166 CTLFLAG_RD, DC(queue_count), 0, "Number of queues");
167 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
168 CTLFLAG_RD, &io_pkt, 0,
169 "Number of packets passed to dummynet.");
170 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
171 CTLFLAG_RD, &io_pkt_fast, 0,
172 "Number of packets bypassed dummynet scheduler.");
173 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
174 CTLFLAG_RD, &io_pkt_drop, 0,
175 "Number of packets dropped by dummynet.");
181 static void dummynet_send(struct mbuf *);
184 * Packets processed by dummynet have an mbuf tag associated with
185 * them that carries their dummynet state.
186 * Outside dummynet, only the 'rule' field is relevant, and it must
187 * be at the beginning of the structure.
190 struct ipfw_rule_ref rule; /* matching rule */
192 /* second part, dummynet specific */
193 int dn_dir; /* action when packet comes out.*/
194 /* see ip_fw_private.h */
195 uint64_t output_time; /* when the pkt is due for delivery*/
196 struct ifnet *ifp; /* interface, for ip_output */
197 struct _ip6dn_args ip6opt; /* XXX ipv6 options */
201 * Return the mbuf tag holding the dummynet state (it should
202 * be the first one on the list).
204 static struct dn_pkt_tag *
205 dn_tag_get(struct mbuf *m)
207 struct m_tag *mtag = m_tag_first(m);
208 KASSERT(mtag != NULL &&
209 mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
210 mtag->m_tag_id == PACKET_TAG_DUMMYNET,
211 ("packet on dummynet queue w/o dummynet tag!"));
212 return (struct dn_pkt_tag *)(mtag+1);
216 mq_append(struct mq *q, struct mbuf *m)
221 q->tail->m_nextpkt = m;
227 * Dispose a list of packet. Use a functions so if we need to do
228 * more work, this is a central point to do it.
230 void dn_free_pkts(struct mbuf *mnext)
234 while ((m = mnext) != NULL) {
235 mnext = m->m_nextpkt;
241 red_drops (struct dn_queue *q, int len)
246 * RED calculates the average queue size (avg) using a low-pass filter
247 * with an exponential weighted (w_q) moving average:
248 * avg <- (1-w_q) * avg + w_q * q_size
249 * where q_size is the queue length (measured in bytes or * packets).
251 * If q_size == 0, we compute the idle time for the link, and set
252 * avg = (1 - w_q)^(idle/s)
253 * where s is the time needed for transmitting a medium-sized packet.
255 * Now, if avg < min_th the packet is enqueued.
256 * If avg > max_th the packet is dropped. Otherwise, the packet is
257 * dropped with probability P function of avg.
260 struct dn_fsk *fs = q->fs;
263 /* Queue in bytes or packets? */
264 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
265 q->ni.len_bytes : q->ni.length;
267 /* Average queue size estimation. */
269 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
270 int diff = SCALE(q_size) - q->avg;
271 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
276 * Queue is empty, find for how long the queue has been
277 * empty and use a lookup table for computing
278 * (1 - * w_q)^(idle_time/s) where s is the time to send a
283 u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step);
285 q->avg = (t < fs->lookup_depth) ?
286 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
291 if (q->avg < fs->min_th) {
293 return (0); /* accept packet */
295 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
296 if (fs->fs.flags & DN_IS_GENTLE_RED) {
298 * According to Gentle-RED, if avg is greater than
299 * max_th the packet is dropped with a probability
300 * p_b = c_3 * avg - c_4
301 * where c_3 = (1 - max_p) / max_th
302 * c_4 = 1 - 2 * max_p
304 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
310 } else if (q->avg > fs->min_th) {
312 * We compute p_b using the linear dropping function
313 * p_b = c_1 * avg - c_2
314 * where c_1 = max_p / (max_th - min_th)
315 * c_2 = max_p * min_th / (max_th - min_th)
317 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
320 if (fs->fs.flags & DN_QSIZE_BYTES)
321 p_b = div64((p_b * len) , fs->max_pkt_size);
323 q->random = random() & 0xffff;
326 * q->count counts packets arrived since last drop, so a greater
327 * value of q->count means a greater packet drop probability.
329 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
331 /* After a drop we calculate a new random value. */
332 q->random = random() & 0xffff;
333 return (1); /* drop */
336 /* End of RED algorithm. */
338 return (0); /* accept */
343 * Enqueue a packet in q, subject to space and queue management policy
344 * (whose parameters are in q->fs).
345 * Update stats for the queue and the scheduler.
346 * Return 0 on success, 1 on drop. The packet is consumed anyways.
349 dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
352 struct dn_flow *ni; /* stats for scheduler instance */
355 if (q->fs == NULL || q->_si == NULL) {
356 printf("%s fs %p si %p, dropping\n",
357 __FUNCTION__, q->fs, q->_si);
363 len = m->m_pkthdr.len;
364 /* Update statistics, then check reasons to drop pkt. */
365 q->ni.tot_bytes += len;
367 ni->tot_bytes += len;
371 if (f->plr && random() < f->plr)
373 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len))
375 if (f->flags & DN_QSIZE_BYTES) {
376 if (q->ni.len_bytes > f->qsize)
378 } else if (q->ni.length >= f->qsize) {
381 mq_append(&q->mq, m);
382 if (q->ni.length == 0) { /* queue was idle */
384 if (ni->length == 0) /* scheduler was idle */
388 q->ni.len_bytes += len;
390 ni->len_bytes += len;
402 * Fetch packets from the delay line which are due now. If there are
403 * leftover packets, reinsert the delay line in the heap.
404 * Runs under scheduler lock.
407 transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
410 struct dn_pkt_tag *pkt = NULL;
412 dline->oid.subtype = 0; /* not in heap */
413 while ((m = dline->mq.head) != NULL) {
415 if (!DN_KEY_LEQ(pkt->output_time, now))
417 dline->mq.head = m->m_nextpkt;
421 dline->oid.subtype = 1; /* in heap */
422 heap_insert(&dn_cfg.evheap, pkt->output_time, dline);
427 * Convert the additional MAC overheads/delays into an equivalent
428 * number of bits for the given data rate. The samples are
429 * in milliseconds so we need to divide by 1000.
432 extra_bits(struct mbuf *m, struct dn_schk *s)
436 struct dn_profile *pf = s->profile;
438 if (!pf || pf->samples_no == 0)
440 index = random() % pf->samples_no;
441 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
442 if (index >= pf->loss_level) {
443 struct dn_pkt_tag *dt = dn_tag_get(m);
445 dt->dn_dir = DIR_DROP;
451 * Send traffic from a scheduler instance due by 'now'.
452 * Return a pointer to the head of the queue.
455 serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
458 struct dn_schk *s = si->sched;
459 struct mbuf *m = NULL;
460 int delay_line_idle = (si->dline.mq.head == NULL);
468 bw = s->link.bandwidth;
469 si->kflags &= ~DN_ACTIVE;
472 si->credit += (now - si->sched_time) * bw;
475 si->sched_time = now;
477 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
481 * Some schedulers might want wake up the scheduler later.
482 * To suppor this the caller returns an mbuf with len < 0
483 * this will result in a new wake up of the scheduler
484 * instance between m->m_pkthdr.len ticks.
486 if (m->m_pkthdr.len < 0) {
487 si->kflags |= DN_ACTIVE;
488 heap_insert(&dn_cfg.evheap, now - m->m_pkthdr.len, si);
489 if (delay_line_idle && done)
490 transmit_event(q, &si->dline, now);
494 /* a regular mbuf received */
496 if (bw == 0) printf("bw is null\n");
497 len_scaled = (bw == 0) ? 0 : hz *
498 (m->m_pkthdr.len * 8 + extra_bits(m, s));
499 si->credit -= len_scaled;
500 /* Move packet in the delay line */
501 dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay;
502 mq_append(&si->dline.mq, m);
506 * If credit >= 0 the instance is idle, mark time.
507 * Otherwise put back in the heap, and adjust the output
508 * time of the last inserted packet, m, which was too early.
510 if (si->credit >= 0) {
514 KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
515 t = div64(bw - 1 - si->credit, bw);
517 dn_tag_get(m)->output_time += t;
518 si->kflags |= DN_ACTIVE;
519 heap_insert(&dn_cfg.evheap, now + t, si);
521 if (delay_line_idle && done)
522 transmit_event(q, &si->dline, now);
527 * Support function to read the TSC (or equivalent). We use this
528 * high resolution timer to adapt the amount of work done for
529 * expiring the clock.
530 * Supports Linux and FreeBSD both i386 and amd64 platform
531 * Supports OpenWRT mips architecture
533 * SMP no special works is needed in
534 * - In linux 2.6 timers will always run in the same cpu that have added it.See
535 * (http://book.opensourceproject.org.cn/kernel/kernel3rd/opensource/0596005652/understandlk-chp-6-sect-5.html)
536 * - FreeBSD8 has a new callout_reset_on() with specify the cpu on which
537 * the timer must be run
538 * - Windows runs dummynet_task() on cpu0.
540 * - Linux 2.4 doesn't assure to run a timer in the same cpu every time.
549 /* Linux and openwrt have a macro to read the tsc for i386 and
551 * Openwrt have patched the kernel and allow use of tsc with mips
552 * and other platforms
553 * rdtscll() is a macro defined in include/asm-xxx/msr.h,
554 * where xxx is the architecture (x86, mips).
557 #elif defined(_WIN32)
558 /* Microsoft recommends the use of KeQueryPerformanceCounter()
559 * insteead of rdtsc().
561 KeQueryPerformanceCounter((PLARGE_INTEGER)&a); //XXX not tested!
562 #elif defined(__FreeBSD__)
563 /* FreeBSD (i386/amd64) has macro rdtsc() defined in machine/cpufunc.h.
564 * We could use the macro instead of explicity assembly XXX
570 #endif /* HAVE_TSC */
573 * compute avg task period.
574 * We could do something more complex, possibly.
577 do_update_cycle(void)
580 uint64_t tmp = readTSC();
581 #if defined (LINUX_24) && defined(CONFIG_SMP)
582 /* on LINUX24 and SMP, we have no guarantees on which cpu runs
583 * the timer callbacks. If the difference between new and
584 * old value is negative, we assume that the values come from
585 * different cpus so we adjust 'new' accordingly.
587 if (tmp <= dn_cfg.cycle_task_new)
588 dn_cfg.cycle_task_new = tmp - dn_cfg.cycle_task;
589 #endif /* !(linux24 && SMP) */
590 dn_cfg.cycle_task_old = dn_cfg.cycle_task_new;
591 dn_cfg.cycle_task_new = tmp;
592 dn_cfg.cycle_task = dn_cfg.cycle_task_new - dn_cfg.cycle_task_old;
594 /* Update the average
595 * avg = (2^N * avg + new - avg ) / 2^N * avg
596 * N==4 seems to be a good compromise between clock clock change
597 * and 'spurious' cycle_task value
600 dn_cfg.cycle_task_avg = (dn_cfg.cycle_task_avg << DN_N) +
601 dn_cfg.cycle_task - dn_cfg.cycle_task_avg;
602 dn_cfg.cycle_task_avg = dn_cfg.cycle_task_avg >> DN_N;
605 #endif /* HAVE_TSC */
614 if (!dn_cfg.expire || ++dn_cfg.expire_cycle < dn_cfg.expire)
616 /* It's time to check if drain routines should be called */
617 dn_cfg.expire_cycle = 0;
619 dn_cfg.idle_queue_wait = 0;
620 dn_cfg.idle_si_wait = 0;
621 /* Do a drain cycle even if there isn't time to do it */
623 dt_max = dn_cfg.cycle_task_avg * dn_cfg.drain_ratio;
628 if (dn_cfg.idle_queue > dn_cfg.expire_object &&
629 dn_cfg.idle_queue_wait < dn_cfg.idle_queue) {
633 if (dn_cfg.idle_si > dn_cfg.expire_object &&
634 dn_cfg.idle_si_wait < dn_cfg.idle_si) {
635 dn_drain_scheduler();
640 /* If tsc does not exist, do only one drain cycle and exit */
643 /* Exit when nothing was done or we have consumed all time */
645 ((readTSC() - dn_cfg.cycle_task_new) * 100 > dt_max) )
647 #endif /* HAVE_TSC */
652 * The timer handler for dummynet. Time is computed in ticks, but
653 * but the code is tolerant to the actual rate at which this is called.
654 * Once complete, the function reschedules itself for the next tick.
657 dummynet_task(void *context, int pending)
660 struct mq q = { NULL, NULL }; /* queue to accumulate results */
662 CURVNET_SET((struct vnet *)context);
664 do_update_cycle(); /* compute avg. tick duration */
668 /* Update number of lost(coalesced) ticks. */
669 tick_lost += pending - 1;
672 /* Last tick duration (usec). */
673 tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
674 (t.tv_usec - dn_cfg.prev_t.tv_usec);
675 /* Last tick vs standard tick difference (usec). */
676 tick_delta = (tick_last * hz - 1000000) / hz;
677 /* Accumulated tick difference (usec). */
678 tick_delta_sum += tick_delta;
683 * Adjust curr_time if the accumulated tick difference is
684 * greater than the 'standard' tick. Since curr_time should
685 * be monotonically increasing, we do positive adjustments
686 * as required, and throttle curr_time in case of negative
690 if (tick_delta_sum - tick >= 0) {
691 int diff = tick_delta_sum / tick;
693 dn_cfg.curr_time += diff;
695 tick_delta_sum %= tick;
697 } else if (tick_delta_sum + tick <= 0) {
700 tick_delta_sum += tick;
704 /* serve pending events, accumulate in q */
706 struct dn_id *p; /* generic parameter to handler */
708 if (dn_cfg.evheap.elements == 0 ||
709 DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
711 p = HEAP_TOP(&dn_cfg.evheap)->object;
712 heap_extract(&dn_cfg.evheap, NULL);
714 if (p->type == DN_SCH_I) {
715 serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
716 } else { /* extracted a delay line */
717 transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
725 dummynet_send(q.head);
730 * forward a chain of packets to the proper destination.
731 * This runs outside the dummynet lock.
734 dummynet_send(struct mbuf *m)
738 for (; m != NULL; m = n) {
739 struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */
745 tag = m_tag_first(m);
746 if (tag == NULL) { /* should not happen */
749 struct dn_pkt_tag *pkt = dn_tag_get(m);
750 /* extract the dummynet info, rename the tag
751 * to carry reinject info.
755 tag->m_tag_cookie = MTAG_IPFW_RULE;
761 SET_HOST_IPLEN(mtod(m, struct ip *));
762 ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
766 /* put header in network format for ip_input() */
767 //SET_NET_IPLEN(mtod(m, struct ip *));
768 netisr_dispatch(NETISR_IP, m);
772 case DIR_IN | PROTO_IPV6:
773 netisr_dispatch(NETISR_IPV6, m);
776 case DIR_OUT | PROTO_IPV6:
777 SET_HOST_IPLEN(mtod(m, struct ip *));
778 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
782 case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
783 if (bridge_dn_p != NULL)
784 ((*bridge_dn_p)(m, ifp));
786 printf("dummynet: if_bridge not loaded\n");
790 case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
792 * The Ethernet code assumes the Ethernet header is
793 * contiguous in the first mbuf header.
794 * Insure this is true.
796 if (m->m_len < ETHER_HDR_LEN &&
797 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
798 printf("dummynet/ether: pullup failed, "
799 "dropping packet\n");
802 ether_demux(m->m_pkthdr.rcvif, m);
805 case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
806 ether_output_frame(ifp, m);
810 /* drop the packet after some time */
815 printf("dummynet: bad switch %d!\n", dst);
823 tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
825 struct dn_pkt_tag *dt;
828 mtag = m_tag_get(PACKET_TAG_DUMMYNET,
829 sizeof(*dt), M_NOWAIT | M_ZERO);
831 return 1; /* Cannot allocate packet header. */
832 m_tag_prepend(m, mtag); /* Attach to mbuf chain. */
833 dt = (struct dn_pkt_tag *)(mtag + 1);
834 dt->rule = fwa->rule;
835 dt->rule.info &= IPFW_ONEPASS; /* only keep this info */
838 /* dt->output tame is updated as we move through */
839 dt->output_time = dn_cfg.curr_time;
845 * dummynet hook for packets.
846 * We use the argument to locate the flowset fs and the sched_set sch
847 * associated to it. The we apply flow_mask and sched_mask to
848 * determine the queue and scheduler instances.
850 * dir where shall we send the packet after dummynet.
851 * *m0 the mbuf with the packet
852 * ifp the 'ifp' parameter from the caller.
853 * NULL in ip_input, destination interface in ip_output,
856 dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
858 struct mbuf *m = *m0;
859 struct dn_fsk *fs = NULL;
860 struct dn_sch_inst *si;
861 struct dn_queue *q = NULL; /* default */
863 int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
864 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
867 /* we could actually tag outside the lock, but who cares... */
868 if (tag_mbuf(m, dir, fwa))
871 /* if the upper half is busy doing something expensive,
872 * lets queue the packet and move forward
874 mq_append(&dn_cfg.pending, m);
875 m = *m0 = NULL; /* consumed */
876 goto done; /* already active, nothing to do */
878 /* XXX locate_flowset could be optimised with a direct ref. */
879 fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
881 goto dropit; /* This queue/pipe does not exist! */
882 if (fs->sched == NULL) /* should not happen */
885 * If the scheduler supports multiple queues, find the right one
886 * (otherwise it will be ignored by enqueue).
888 if (fs->sched->fp->flags & DN_MULTIQUEUE) {
889 q = ipdn_q_find(fs, &(fwa->f_id));
892 /* The scheduler instance lookup is done only for new queue.
893 * The callback q_new() will create the scheduler instance
898 si = ipdn_si_find(fs->sched, &(fwa->f_id));
902 if (fs->sched->fp->enqueue(si, q, m)) {
903 /* packet was dropped by enqueue() */
908 if (si->kflags & DN_ACTIVE) {
909 m = *m0 = NULL; /* consumed */
910 goto done; /* already active, nothing to do */
913 /* compute the initial allowance */
914 if (si->idle_time < dn_cfg.curr_time) {
915 /* Do this only on the first packet on an idle pipe */
916 struct dn_link *p = &fs->sched->link;
918 si->sched_time = dn_cfg.curr_time;
919 si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
921 uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
922 if (burst > p->burst)
927 /* pass through scheduler and delay line */
928 m = serve_sched(NULL, si, dn_cfg.curr_time);
930 /* optimization -- pass it back to ipfw for immediate send */
931 /* XXX Don't call dummynet_send() if scheduler return the packet
932 * just enqueued. This avoid a lock order reversal.
935 if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
936 /* fast io, rename the tag * to carry reinject info. */
937 struct m_tag *tag = m_tag_first(m);
939 tag->m_tag_cookie = MTAG_IPFW_RULE;
942 if (m->m_nextpkt != NULL) {
943 printf("dummynet: fast io: pkt chain detected!\n");
962 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;