void
dn_reschedule(void)
{
- callout_reset(&dn_timeout, 1, dummynet, NULL);
+ callout_reset_on(&dn_timeout, 1, dummynet, NULL, 0);
}
/*----- end of callout hooks -----*/
return 1; /* different address families */
return (id1->dst_ip == id2->dst_ip &&
- id1->src_ip == id2->src_ip &&
- id1->dst_port == id2->dst_port &&
- id1->src_port == id2->src_port &&
- id1->proto == id2->proto &&
+ id1->src_ip == id2->src_ip &&
+ id1->dst_port == id2->dst_port &&
+ id1->src_port == id2->src_port &&
+ id1->proto == id2->proto &&
id1->extra == id2->extra) ? 0 : 1;
}
/* the ipv6 case */
if (fs->fs.flags & DN_QHT_HASH)
q->ni.fid = *(struct ipfw_flow_id *)key;
q->fs = fs;
- q->_si = template->_si;
+ q->_si = ipdn_si_find(q->fs->sched, &(template->ni.fid));
+ if (q->_si == NULL) {
+ D("no memory for new si");
+ free (q, M_DUMMYNET);
+ return NULL;
+ }
+
q->_si->q_count++;
if (fs->sched->fp->new_queue)
fs->sched->fp->new_queue(q);
dn_cfg.queue_count++;
+ dn_cfg.idle_queue++;
return q;
}
* Notify schedulers that a queue is going away.
* If (flags & DN_DESTROY), also free the packets.
* The version for callbacks is called q_delete_cb().
+ * Returns 1 if the queue is NOT deleted (usually when
+ * the drain routine try to delete a queue that a scheduler
+ * instance needs), 0 otherwise.
+ * NOTE: flag DN_DEL_SAFE means that the queue should be
+ * deleted only if the scheduler no longer needs it
*/
-static void
+static int
dn_delete_queue(struct dn_queue *q, int flags)
{
struct dn_fsk *fs = q->fs;
// D("fs %p si %p\n", fs, q->_si);
/* notify the parent scheduler that the queue is going away */
if (fs && fs->sched->fp->free_queue)
- fs->sched->fp->free_queue(q);
+ if (fs->sched->fp->free_queue(q, flags & DN_DEL_SAFE) == 1)
+ return 1; /* queue NOT deleted */
q->_si->q_count--;
q->_si = NULL;
if (flags & DN_DESTROY) {
if (q->mq.head)
dn_free_pkts(q->mq.head);
+ else
+ dn_cfg.idle_queue--;
bzero(q, sizeof(*q)); // safety
free(q, M_DUMMYNET);
dn_cfg.queue_count--;
}
+ return 0;
}
static int
* We never call it for !MULTIQUEUE (the queue is in the sch_inst).
*/
struct dn_queue *
-ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si,
- struct ipfw_flow_id *id)
+ipdn_q_find(struct dn_fsk *fs, struct ipfw_flow_id *id)
{
struct dn_queue template;
- template._si = si;
template.fs = fs;
if (fs->fs.flags & DN_QHT_HASH) {
return flow_id_cmp(&o->ni.fid, id2) == 0;
}
+static int si_reset_credit(void *_si, void *arg); // XXX si_new use this
+
/*
* create a new instance for the given 'key'
* Allocate memory for instance, delay line and scheduler private data.
si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
if (si == NULL)
goto error;
+
/* Set length only for the part passed up to userland. */
set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow));
set_oid(&(si->dline.oid), DN_DELAY_LINE,
if (s->sch.flags & DN_HAVE_MASK)
si->ni.fid = *(struct ipfw_flow_id *)key;
+ si_reset_credit(si, NULL);
dn_cfg.si_count++;
+ dn_cfg.idle_si++;
return si;
error:
if (dl->oid.subtype) /* remove delay line from event heap */
heap_extract(&dn_cfg.evheap, dl);
+ if (si->ni.length == 0)
+ dn_cfg.idle_si--;
dn_free_pkts(dl->mq.head); /* drain delay line */
if (si->kflags & DN_ACTIVE) /* remove si from event heap */
heap_extract(&dn_cfg.evheap, si);
struct dn_sch_inst *si = _si;
struct dn_link *p = &si->sched->link;
+ si->idle_time = dn_cfg.curr_time;
si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0);
return 0;
}
h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu;
SLIST_REMOVE(h, fs, dn_fsk, sch_chain);
}
- /* Free the RED parameters, they will be recomputed on
+ /* Free the RED parameters, they will be recomputed on
* subsequent attach if needed.
*/
if (fs->w_q_lookup)
if (!locked)
DN_BH_WLOCK();
fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL);
+ if (dn_ht_entries(dn_cfg.fshash) == 0) {
+ dn_ht_free(dn_cfg.fshash, 0);
+ dn_cfg.fshash = NULL;
+ }
ND("fs %d found %p", i, fs);
if (fs) {
fsk_detach(fs, DN_DETACH | DN_DELETE_FS);
#endif
fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0);
/* no more flowset pointing to us now */
- if (s->sch.flags & DN_HAVE_MASK)
+ if (s->sch.flags & DN_HAVE_MASK) {
dn_ht_scan(s->siht, si_destroy, NULL);
+ dn_ht_free(s->siht, 0);
+ }
else if (s->siht)
si_destroy(s->siht, NULL);
if (s->profile) {
struct dn_schk *s;
s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
+ if (dn_ht_entries(dn_cfg.schedhash) == 0) {
+ dn_ht_free(dn_cfg.schedhash, 0);
+ dn_cfg.schedhash = NULL;
+ }
ND("%d %p", i, s);
if (!s)
return EINVAL;
/*
* This routine only copies the initial part of a profile ? XXX
+ * XXX marta: I think this routine is called to print a summary
+ * of the pipe configuration and does not need to show the
+ * profile samples list.
*/
static int
copy_profile(struct copy_args *a, struct dn_profile *p)
{
int have = a->end - *a->start;
/* XXX here we check for max length */
- int profile_len = sizeof(struct dn_profile) -
- ED_MAX_SAMPLES_NO*sizeof(int);
+ int profile_len = sizeof(struct dn_profile);
if (p == NULL)
return 0;
return 0; /* not a pipe */
/* see if the object is within one of our ranges */
- for (;r < lim; r+=2) {
+ for (;r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
/* Found a valid entry, copy and we are done */
- if (a->flags & DN_C_LINK) {
- if (copy_obj(a->start, a->end,
+ if (a->flags & DN_C_LINK) {
+ if (copy_obj(a->start, a->end,
&s->link, "link", n))
- return DNHT_SCAN_END;
- if (copy_profile(a, s->profile))
- return DNHT_SCAN_END;
- if (copy_flowset(a, s->fs, 0))
- return DNHT_SCAN_END;
- }
- if (a->flags & DN_C_SCH) {
- if (copy_obj(a->start, a->end,
+ return DNHT_SCAN_END;
+ if (copy_profile(a, s->profile))
+ return DNHT_SCAN_END;
+ if (copy_flowset(a, s->fs, 0))
+ return DNHT_SCAN_END;
+ }
+ if (a->flags & DN_C_SCH) {
+ if (copy_obj(a->start, a->end,
&s->sch, "sched", n))
- return DNHT_SCAN_END;
- /* list all attached flowsets */
- if (copy_fsk_list(a, s, 0))
- return DNHT_SCAN_END;
- }
+ return DNHT_SCAN_END;
+ /* list all attached flowsets */
+ if (copy_fsk_list(a, s, 0))
+ return DNHT_SCAN_END;
+ }
if (a->flags & DN_C_FLOW)
- copy_si(a, s, 0);
+ copy_si(a, s, 0);
break;
}
} else if (a->type == DN_FS) {
if (n >= DN_MAX_ID)
return 0;
/* see if the object is within one of our ranges */
- for (;r < lim; r+=2) {
+ for (;r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
- if (copy_flowset(a, fs, 0))
- return DNHT_SCAN_END;
- copy_q(a, fs, 0);
+ if (copy_flowset(a, fs, 0))
+ return DNHT_SCAN_END;
+ copy_q(a, fs, 0);
break; /* we are done */
- }
}
+ }
return 0;
}
}
if (!locked)
DN_BH_WLOCK();
+ if (dn_cfg.fshash == NULL)
+ dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size,
+ offsetof(struct dn_fsk, fsk_next),
+ fsk_hash, fsk_match, fsk_new);
do { /* exit with break when done */
struct dn_schk *s;
int flags = nfs->sched_nr ? DNHT_INSERT : 0;
new_flags = a.sch->flags;
}
DN_BH_WLOCK();
+ if (dn_cfg.schedhash == NULL)
+ dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size,
+ offsetof(struct dn_schk, schk_next),
+ schk_hash, schk_match, schk_new);
again: /* run twice, for wfq and fifo */
/*
* lookup the type. If not supplied, use the previous one
if (!pf || pf->link_nr != p.link_nr) { /* no saved value */
s->profile = NULL; /* XXX maybe not needed */
} else {
- s->profile = malloc(sizeof(struct dn_profile),
+ size_t pf_size = sizeof(struct dn_profile) +
+ s->profile->samples_no * sizeof(int);
+
+ s->profile = malloc(pf_size,
M_DUMMYNET, M_NOWAIT | M_ZERO);
if (s->profile == NULL) {
D("cannot allocate profile");
goto error; //XXX
}
- bcopy(pf, s->profile, sizeof(*pf));
+ bcopy(pf, s->profile, pf_size);
}
}
p.link_nr = 0;
bcopy(pf, s->profile, pf->oid.len);
s->profile->oid.len = olen;
}
+
DN_BH_WUNLOCK();
return err;
}
DX(4, "still %d unlinked fs", dn_cfg.fsk_count);
dn_ht_free(dn_cfg.fshash, DNHT_REMOVE);
fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS);
+
+ dn_ht_free(dn_cfg.schedhash, DNHT_REMOVE);
/* Reinitialize system heap... */
heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
}
default:
D("cmd %d not implemented", o->type);
break;
+
#ifdef EMULATE_SYSCTL
/* sysctl emulation.
* if we recognize the command, jump to the correct
* handler and return
*/
case DN_SYSCTL_SET:
- err = kesysctl_emu_set(p,l);
+ err = kesysctl_emu_set(p, l);
return err;
#endif
+
case DN_CMD_CONFIG: /* simply a header */
break;
compute_space(struct dn_id *cmd, struct copy_args *a)
{
int x = 0, need = 0;
- int profile_size = sizeof(struct dn_profile) -
- ED_MAX_SAMPLES_NO*sizeof(int);
+ int profile_size = sizeof(struct dn_profile);
/* NOTE about compute space:
* NP = dn_cfg.schk_count
}
need += sizeof(*cmd);
cmd->id = need;
- if (have >= need)
+ if (have >= need) /* got space, hold the lock */
break;
DN_BH_WUNLOCK();
} else {
error = sooptcopyout(sopt, cmd, sizeof(*cmd));
}
+ /* no enough memory, release the lock and give up */
+ /* XXX marta: here we hold the lock */
goto done;
}
ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, "
free(cmd, M_DUMMYNET);
if (start)
free(start, M_DUMMYNET);
+
return error;
}
+/*
+ * Functions to drain idle objects -- see dummynet_task() for some notes
+ */
/* Callback called on scheduler instance to delete it if idle */
static int
-drain_scheduler_cb(void *_si, void *arg)
+drain_scheduler_cb(void *_si, void *_arg)
{
struct dn_sch_inst *si = _si;
+ int *arg = _arg;
+ int empty;
+
+ if ( (*arg++) > dn_cfg.expire_object_examined)
+ return DNHT_SCAN_END;
if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL)
return 0;
- if (si->sched->fp->flags & DN_MULTIQUEUE) {
- if (si->q_count == 0)
- return si_destroy(si, NULL);
- else
- return 0;
- } else { /* !DN_MULTIQUEUE */
- if ((si+1)->ni.length == 0)
- return si_destroy(si, NULL);
+ /*
+ * if the scheduler is multiqueue, q_count also reflects empty
+ * queues that point to si, so we need to check si->q_count to
+ * tell whether we can remove the instance.
+ */
+ if (si->ni.length == 0) {
+ /* si was marked as idle:
+ * remove it or increment idle_si_wait counter
+ */
+ empty = (si->sched->fp->flags & DN_MULTIQUEUE) ?
+ (si->q_count == 0) : 1;
+ if (empty &&
+ (si->idle_time < dn_cfg.curr_time - dn_cfg.object_idle_tick))
+ return si_destroy(si, NULL);
else
- return 0;
+ dn_cfg.idle_si_wait++;
}
- return 0; /* unreachable */
+ return 0;
}
/* Callback called on scheduler to check if it has instances */
static int
-drain_scheduler_sch_cb(void *_s, void *arg)
+drain_scheduler_sch_cb(void *_s, void *_arg)
{
struct dn_schk *s = _s;
+ int *arg = _arg;
if (s->sch.flags & DN_HAVE_MASK) {
dn_ht_scan_bucket(s->siht, &s->drain_bucket,
- drain_scheduler_cb, NULL);
- s->drain_bucket++;
+ drain_scheduler_cb, _arg);
} else {
if (s->siht) {
- if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL)
+ if (drain_scheduler_cb(s->siht, _arg) == DNHT_SCAN_DEL)
s->siht = NULL;
}
}
- return 0;
+ return ( (*arg++) > dn_cfg.expire_object_examined) ? DNHT_SCAN_END : 0;
}
/* Called every tick, try to delete a 'bucket' of scheduler */
void
dn_drain_scheduler(void)
{
+ int arg = 0;
+
dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch,
- drain_scheduler_sch_cb, NULL);
- dn_cfg.drain_sch++;
+ drain_scheduler_sch_cb, &arg);
}
/* Callback called on queue to delete if it is idle */
static int
-drain_queue_cb(void *_q, void *arg)
+drain_queue_cb(void *_q, void *_arg)
{
struct dn_queue *q = _q;
+ int *arg = _arg;
+
+ if ( (*arg++) > dn_cfg.expire_object_examined)
+ return DNHT_SCAN_END;
if (q->ni.length == 0) {
- dn_delete_queue(q, DN_DESTROY);
- return DNHT_SCAN_DEL; /* queue is deleted */
+ if (q->q_time < dn_cfg.curr_time - dn_cfg.object_idle_tick) {
+ if (dn_delete_queue(q, DN_DESTROY | DN_DEL_SAFE) == 0)
+ return DNHT_SCAN_DEL; /* queue is deleted */
+ } else
+ dn_cfg.idle_queue_wait++;
}
return 0; /* queue isn't deleted */
/* Callback called on flowset used to check if it has queues */
static int
-drain_queue_fs_cb(void *_fs, void *arg)
+drain_queue_fs_cb(void *_fs, void *_arg)
{
struct dn_fsk *fs = _fs;
+ int *arg = _arg;
if (fs->fs.flags & DN_QHT_HASH) {
/* Flowset has a hash table for queues */
dn_ht_scan_bucket(fs->qht, &fs->drain_bucket,
- drain_queue_cb, NULL);
- fs->drain_bucket++;
+ drain_queue_cb, _arg);
} else {
/* No hash table for this flowset, null the pointer
* if the queue is deleted
*/
if (fs->qht) {
- if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL)
+ if (drain_queue_cb(fs->qht, _arg) == DNHT_SCAN_DEL)
fs->qht = NULL;
}
}
- return 0;
+ return ( (*arg++) > dn_cfg.expire_object_examined) ? DNHT_SCAN_END : 0;
}
/* Called every tick, try to delete a 'bucket' of queue */
void
dn_drain_queue(void)
{
+ int arg = 0;
+
/* scan a bucket of flowset */
dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs,
- drain_queue_fs_cb, NULL);
- dn_cfg.drain_fs++;
+ drain_queue_fs_cb, &arg);
}
/*
static void
ip_dn_init(void)
{
- static int init_done = 0;
-
- if (init_done)
+ if (dn_cfg.init_done)
return;
- init_done = 1;
- if (bootverbose)
- printf("DUMMYNET with IPv6 initialized (100131)\n");
-
+ printf("DUMMYNET %p with IPv6 initialized (100409)\n", curvnet);
+ dn_cfg.init_done = 1;
/* Set defaults here. MSVC does not accept initializers,
* and this is also useful for vimages
*/
/* hash tables */
dn_cfg.max_hash_size = 1024; /* max in the hash tables */
- dn_cfg.hash_size = 64; /* default hash size */
- /* create hash tables for schedulers and flowsets.
- * In both we search by key and by pointer.
- */
- dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size,
- offsetof(struct dn_schk, schk_next),
- schk_hash, schk_match, schk_new);
- dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size,
- offsetof(struct dn_fsk, fsk_next),
- fsk_hash, fsk_match, fsk_new);
+ if (dn_cfg.hash_size == 0) /* XXX or <= 0 ? */
+ dn_cfg.hash_size = 64; /* default hash size */
+ /* hash tables for schedulers and flowsets are created
+ * when the first scheduler/flowset is inserted.
+ * This is done to allow to use the right hash_size value.
+ * When the last object is deleted, the table is destroyed,
+ * so a new hash_size value can be used.
+ * XXX rehash is not supported for now
+ */
+ dn_cfg.schedhash = NULL;
+ dn_cfg.fshash = NULL;
/* bucket index to drain object */
dn_cfg.drain_fs = 0;
dn_cfg.drain_sch = 0;
+ if (dn_cfg.expire_object == 0)
+ dn_cfg.expire_object = 50;
+ if (dn_cfg.object_idle_tick == 0)
+ dn_cfg.object_idle_tick = 1000;
+ if (dn_cfg.expire_object_examined == 0)
+ dn_cfg.expire_object_examined = 10;
+ if (dn_cfg.drain_ratio == 0)
+ dn_cfg.drain_ratio = 1;
+
+ // XXX what if we don't have a tsc ?
+#ifdef HAVE_TSC
+ dn_cfg.cycle_task_new = dn_cfg.cycle_task_old = readTSC();
+#endif
heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
SLIST_INIT(&dn_cfg.fsu);
SLIST_INIT(&dn_cfg.schedlist);
DN_LOCK_INIT();
- ip_dn_ctl_ptr = ip_dn_ctl;
- ip_dn_io_ptr = dummynet_io;
- TASK_INIT(&dn_task, 0, dummynet_task, NULL);
+ TASK_INIT(&dn_task, 0, dummynet_task, curvnet);
dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
taskqueue_thread_enqueue, &dn_tq);
taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
callout_init(&dn_timeout, CALLOUT_MPSAFE);
- callout_reset(&dn_timeout, 1, dummynet, NULL);
+ callout_reset_on(&dn_timeout, 1, dummynet, NULL, 0);
/* Initialize curr_time adjustment mechanics. */
getmicrouptime(&dn_cfg.prev_t);
#ifdef KLD_MODULE
static void
-ip_dn_destroy(void)
+ip_dn_destroy(int last)
{
callout_drain(&dn_timeout);
DN_BH_WLOCK();
- ip_dn_ctl_ptr = NULL;
- ip_dn_io_ptr = NULL;
+ if (last) {
+ printf("%s removing last instance\n", __FUNCTION__);
+ ip_dn_ctl_ptr = NULL;
+ ip_dn_io_ptr = NULL;
+ }
dummynet_flush();
DN_BH_WUNLOCK();
return EEXIST ;
}
ip_dn_init();
+ ip_dn_ctl_ptr = ip_dn_ctl;
+ ip_dn_io_ptr = dummynet_io;
return 0;
} else if (type == MOD_UNLOAD) {
#if !defined(KLD_MODULE)
printf("dummynet statically compiled, cannot unload\n");
return EINVAL ;
#else
- ip_dn_destroy();
+ ip_dn_destroy(1 /* last */);
return 0;
#endif
} else
"dummynet", dummynet_modevent, NULL
};
-DECLARE_MODULE(dummynet, dummynet_mod,
- SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY-1);
+#define DN_SI_SUB SI_SUB_PROTO_IFATTACHDOMAIN
+#define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */
+DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD);
MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
MODULE_VERSION(dummynet, 1);
+
+/*
+ * Starting up. Done in order after dummynet_modevent() has been called.
+ * VNET_SYSINIT is also called for each existing vnet and each new vnet.
+ */
+//VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL);
+
+/*
+ * Shutdown handlers up shop. These are done in REVERSE ORDER, but still
+ * after dummynet_modevent() has been called. Not called on reboot.
+ * VNET_SYSUNINIT is also called for each exiting vnet as it exits.
+ * or when the module is unloaded.
+ */
+//VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL);
+
/* end of file */