Summary: PlanetLab Node Manager
Name: NodeManager
Version: 1.3
-Release: 5%{?pldistro:.%{pldistro}}%{?date:.%{date}}
+Release: 7%{?pldistro:.%{pldistro}}%{?date:.%{date}}
License: PlanetLab
Group: System Environment/Daemons
URL: http://cvs.planet-lab.org/cvs/NodeManager
# vuseradd, vuserdel
Requires: vserver-reference
-Requires: util-vserver
+Requires: util-vserver >= 0.30.208-17
# vserver.py
Requires: util-vserver-python
import tools
+# When this variable is true, start after any ensure_created
+Startingup = False
+# Cumulative delay for starts when Startingup is true
+csd_lock = threading.Lock()
+cumstartdelay = 0
+
# shell path -> account class association
shell_acct_class = {}
# account type -> account class association
def __init__(self, rec):
self.name = rec['name']
self.keys = ''
+ self.initscriptchanged = False
self.configure(rec)
@staticmethod
def ensure_created(self, rec):
"""Cause the account specified by <rec> to exist if it doesn't already."""
- self._q.put((self._ensure_created, rec.copy()))
+ self._q.put((self._ensure_created, rec.copy(), Startingup))
- def _ensure_created(self, rec):
+ def _ensure_created(self, rec, startingup):
curr_class = self._get_class()
next_class = type_acct_class[rec['type']]
if next_class != curr_class:
finally: self._create_sem.release()
if not isinstance(self._acct, next_class): self._acct = next_class(rec)
else: self._acct.configure(rec)
- if next_class != curr_class: self._acct.start()
+ if startingup:
+ csd_lock.acquire()
+ global cumstartdelay
+ delay = cumstartdelay
+ cumstartdelay += 2
+ csd_lock.release()
+ self._acct.start(delay=delay)
+ elif next_class != curr_class or self._acct.initscriptchanged:
+ self._acct.start()
def ensure_destroyed(self): self._q.put((self._ensure_destroyed,))
def _ensure_destroyed(self): self._destroy(self._get_class())
data = ticket.verify(tkt)
if data != None:
deliver_ticket(data)
+ logger.log('Got ticket')
except Exception, err:
raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
api_method_list.sort()
raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % (method_name, ', '.join(api_method_list)))
expected_nargs = nargs_dict[method_name]
- if len(args) != expected_nargs: raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % (len(args), expected_nargs))
+ if len(args) != expected_nargs:
+ raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % (len(args),
+ expected_nargs))
else:
# Figure out who's calling.
# XXX - these ought to be imported directly from some .h file
ucred = self.request.getsockopt(socket.SOL_SOCKET, SO_PEERCRED, sizeof_struct_ucred)
xid = struct.unpack('3i', ucred)[2]
caller_name = pwd.getpwuid(xid)[0]
- if method_name not in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'):
+ if method_name not in ('ReCreate', 'Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'):
target_name = args[0]
target_rec = database.db.get(target_name)
- if not (target_rec and target_rec['type'].startswith('sliver.')): raise xmlrpclib.Fault(102, 'Invalid argument: the first argument must be a sliver name.')
- if not (caller_name in (args[0], 'root') or (caller_name, method_name) in target_rec['delegations'] or (caller_name == 'utah_elab_delegate' and target_name.startswith('utah_'))): raise xmlrpclib.Fault(108, 'Permission denied.')
+ if not (target_rec and target_rec['type'].startswith('sliver.')):
+ raise xmlrpclib.Fault(102, 'Invalid argument: the first argument must be a sliver name.')
+ if not (caller_name, method_name) in target_rec['delegations']:
+ # or (caller_name == 'utah_elab_delegate' and target_name.startswith('utah_'))):
+ raise xmlrpclib.Fault(108, 'Permission denied.')
+
result = method(target_rec, *args[1:])
else: result = method(*args)
if result == None: result = 1
# Faiyaz Ahmed <faiyaza@cs.princeton.edu>
# Copyright (C) 2004-2006 The Trustees of Princeton University
#
-# $Id: bwmon.py,v 1.1.2.8 2007/04/25 22:20:58 faiyaza Exp $
+# $Id$
#
import os
import sys
import time
import pickle
-
import socket
-import bwlimit
import logger
+import copy
+import threading
+import tools
+
+import bwlimit
+import database
from sets import Set
self.emailed = True
slicemail(self.name, subject, message + (footer % params))
-def GetSlivers(db):
+def gethtbs(root_xid, default_xid):
+ """
+ Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
+ Turn off HTBs without names.
+ """
+ livehtbs = {}
+ for params in bwlimit.get():
+ (xid, share,
+ minrate, maxrate,
+ minexemptrate, maxexemptrate,
+ usedbytes, usedi2bytes) = params
+
+ name = bwlimit.get_slice(xid)
+
+
+
+ if (name is None) \
+ and (xid != root_xid) \
+ and (xid != default_xid):
+ # Orphaned (not associated with a slice) class
+ name = "%d?" % xid
+ logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
+ bwlimit.off(xid)
+
+ livehtbs[xid] = {'share': share,
+ 'minrate': minrate,
+ 'maxrate': maxrate,
+ 'maxexemptrate': maxexemptrate,
+ 'minexemptrate': minexemptrate,
+ 'usedbytes': usedbytes,
+ 'name': name,
+ 'usedi2bytes': usedi2bytes}
+
+ return livehtbs
+
+def sync(nmdbcopy):
+ """
+ Syncs tc, db, and bwmon.dat. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit.
+ """
# Defaults
global datafile, \
period, \
# All slices
names = []
-
# Incase the limits have changed.
default_MaxRate = int(bwlimit.get_bwcap() / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
(version, slices) = pickle.load(f)
f.close()
# Check version of data file
- if version != "$Id: bwmon.py,v 1.1.2.8 2007/04/25 22:20:58 faiyaza Exp $":
+ if version != "$Id$":
logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile))
raise Exception
except Exception:
- version = "$Id: bwmon.py,v 1.1.2.8 2007/04/25 22:20:58 faiyaza Exp $"
+ version = "$Id$"
slices = {}
# Get/set special slice IDs
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
- for sliver in db.keys():
- live[bwlimit.get_xid(sliver)] = sliver
+ # db keys on name, bwmon keys on xid. db doesnt have xid either.
+ for plcSliver in nmdbcopy.keys():
+ live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
- # Setup new slices.
- # live.xids - runing(slices).xids = new.xids
- newslicesxids = []
- for plcxid in live.keys():
- if plcxid not in slices.keys():
- newslicesxids.append(plcxid)
+ logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__())
+ logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__())
+
+ # Get actual running values from tc.
+ # Update slice totals and bandwidth. {xid: {values}}
+ livehtbs = gethtbs(root_xid, default_xid)
+ logger.log("bwmon: Found %s running HTBs" % livehtbs.keys().__len__())
- #newslicesxids = Set(live.keys()) - Set(slices.keys())
- for newslicexid in newslicesxids:
+ # Get new slices.
+ # live.xids - runing(slices).xids = new.xids
+ #newslicesxids = Set(live.keys()) - Set(slices.keys())
+ newslicesxids = Set(live.keys()) - Set(livehtbs.keys())
+ logger.log("bwmon: Found %s new slices" % newslicesxids.__len__())
+
+ # Incase we rebooted and need to bring up the htbs that are in the db but
+ # not known to tc.
+ #nohtbxids = Set(slices.keys()) - Set(livehtbs.keys())
+ #logger.log("bwmon: Found %s slices that should have htbs but dont." % nohtbxids.__len__())
+ #newslicesxids.update(nohtbxids)
+
+ # Setup new slices
+ for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
- if newslicexid != None and db[live[newslicexid]].has_key('_rspec') == True:
- logger.log("bwmon: New Slice %s" % live[newslicexid])
+ if newslice != None and live[newslice].has_key('_rspec') == True:
+ logger.log("bwmon: New Slice %s" % live[newslice]['name'])
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
- rspec = db[live[newslicexid]]['_rspec']
- slices[newslicexid] = Slice(newslicexid, live[newslicexid], rspec)
- slices[newslicexid].reset(0, 0, 0, 0, rspec)
+ slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
+ slices[newslice].reset(0, 0, 0, 0, live[newslice]['_rspec'])
else:
- logger.log("bwmon Slice %s doesn't have xid. Must be delegated. Skipping." % live[newslicexid])
+ logger.log("bwmon Slice %s doesn't have xid. Must be delegated. Skipping." % live[newslice]['name'])
- # ...mlhuang's abortion....
- # Get actual running values from tc.
- # Update slice totals and bandwidth.
- for params in bwlimit.get():
- (xid, share,
- minrate, maxrate,
- minexemptrate, maxexemptrate,
- usedbytes, usedi2bytes) = params
-
- # Ignore root and default buckets
+ # Delete dead slices.
+ # First delete dead slices that exist in the pickle file, but
+ # aren't instantiated by PLC.
+ dead = Set(slices.keys()) - Set(live.keys())
+ logger.log("bwmon: Found %s dead slices" % (dead.__len__() - 2))
+ for xid in dead:
if xid == root_xid or xid == default_xid:
continue
+ logger.log("bwmon: removing dead slice %s " % xid)
+ if slices.has_key(xid): del slices[xid]
+ if livehtbs.has_key(xid): bwlimit.off(xid)
- name = bwlimit.get_slice(xid)
- if name is None:
- # Orphaned (not associated with a slice) class
- name = "%d?" % xid
- bwlimit.off(xid)
+ # Get actual running values from tc since we've added and removed buckets.
+ # Update slice totals and bandwidth. {xid: {values}}
+ livehtbs = gethtbs(root_xid, default_xid)
+ logger.log("bwmon: now %s running HTBs" % livehtbs.keys().__len__())
+ for (xid, slice) in slices.iteritems():
# Monitor only the specified slices
+ if xid == root_xid or xid == default_xid: continue
if names and name not in names:
continue
- #slices is populated from the pickle file
- #xid is populated from bwlimit (read from /etc/passwd)
- if slices.has_key(xid):
- slice = slices[xid]
- # Old slices werent being instanciated correctly because
- # the HTBs were still pleasent, but the slice in bwmon would
- # have the byte counts set to 0. The next time update was run
- # the real byte count would be sent to update, causing the bw cap.
- if time.time() >= (slice.time + period) or \
- usedbytes < slice.bytes or \
- usedi2bytes < slice.i2bytes or \
- xid in newslicesxids:
- # Reset to defaults every 24 hours or if it appears
- # that the byte counters have overflowed (or, more
- # likely, the node was restarted or the HTB buckets
- # were re-initialized).
- slice.reset(maxrate, \
- maxexemptrate, \
- usedbytes, \
- usedi2bytes, \
- db[slice.name]['_rspec'])
- else:
- # Update byte counts
- slice.update(maxrate, \
- maxexemptrate, \
- usedbytes, \
- usedi2bytes, \
- db[slice.name]['_rspec'])
+ if (time.time() >= (slice.time + period)) or \
+ (livehtbs[xid]['usedbytes'] < slice.bytes) or \
+ (livehtbs[xid]['usedi2bytes'] < slice.i2bytes):
+ # Reset to defaults every 24 hours or if it appears
+ # that the byte counters have overflowed (or, more
+ # likely, the node was restarted or the HTB buckets
+ # were re-initialized).
+ slice.reset(livehtbs[xid]['maxrate'], \
+ livehtbs[xid]['maxexemptrate'], \
+ livehtbs[xid]['usedbytes'], \
+ livehtbs[xid]['usedi2bytes'], \
+ live[xid]['_rspec'])
else:
- # Just in case. Probably (hopefully) this will never happen.
- # New slice, initialize state
- logger.log("bwmon: Deleting orphaned slice xid %s" % xid)
- bwlimit.off(xid)
-
- # Delete dead slices
- dead = Set(slices.keys()) - Set(live.keys())
- for xid in dead:
- if xid == root_xid or xid == default_xid:
- continue
- del slices[xid]
- bwlimit.off(xid)
-
- logger.log("bwmon: Saving %s" % datafile)
+ if debug: logger.log("bwmon: Updating slice %s" % slice.name)
+ # Update byte counts
+ slice.update(livehtbs[xid]['maxrate'], \
+ livehtbs[xid]['maxexemptrate'], \
+ livehtbs[xid]['usedbytes'], \
+ livehtbs[xid]['usedi2bytes'], \
+ live[xid]['_rspec'])
+
+ logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile))
f = open(datafile, "w")
pickle.dump((version, slices), f)
f.close()
-
-#def GetSlivers(data):
-# for sliver in data['slivers']:
-# if sliver.has_key('attributes'):
-# print sliver
-# for attribute in sliver['attributes']:
-# if attribute['name'] == "KByteThresh": print attribute['value']
-
-#def start(options, config):
-# pass
+lock = threading.Event()
+def run():
+ """When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting."""
+ if debug: logger.log("bwmon: Thread started")
+ while True:
+ lock.wait()
+ if debug: logger.log("bwmon: Event received. Running.")
+ database.db_lock.acquire()
+ nmdbcopy = copy.deepcopy(database.db)
+ database.db_lock.release()
+ try: sync(nmdbcopy)
+ except: logger.log_exc()
+ lock.clear()
+
+def start(*args):
+ tools.as_daemon_thread(run)
+
+def GetSlivers(*args):
+ pass
if name not in self: accounts.get(name).ensure_destroyed()
for name, rec in self.iteritems():
if rec['instantiation'] == 'plc-instantiated': accounts.get(name).ensure_created(rec)
+ if rec['instantiation'] == 'nm-controller': accounts.get(name).ensure_created(rec)
- try: bwmon.GetSlivers(self)
- except: logger.log_exc()
-
+ #try: bwmon.GetSlivers(self)
+ #except: logger.log_exc()
+ bwmon.lock.set()
# request a database dump
global dump_requested
dump_requested = True
# chkconfig: 3 86 26
# description: Starts and stops Node Manager daemon
#
-# $Id: vnet.init,v 1.21 2006/02/27 15:41:27 mlhuang Exp $
+# $Id$
# Source function library.
. /etc/init.d/functions
nm=${NM-"python /usr/share/NodeManager/nm.py"}
prog="Node Manager"
-options=${OPTIONS-"-d -s"}
+restartoptions=
pidfile=${PIDFILE-/var/run/nm.pid}
lockfile=${LOCKFILE-/var/lock/subsys/nm}
RETVAL=0
-start()
+do_start()
{
+ options=$1
echo -n $"Starting $prog: "
daemon --check=nm $nm $options
RETVAL=$?
return $RETVAL
}
+start()
+{
+ do_start ${OPTIONS-"-d -s"}
+}
+
stop()
{
echo -n $"Stopping $prog: "
[ $RETVAL -eq 0 ] && rm -f ${lockfile} ${pidfile}
}
+restart()
+{
+ stop
+ do_start ${OPTIONS-"-d"}
+}
+
+
case "$1" in
start)
start
RETVAL=$?
;;
restart|reload)
- stop
- start
+ restart
;;
condrestart)
if [ -f ${pidfile} ] ; then
- stop
- start
+ restart
fi
;;
*)
callback = getattr(module, 'GetSlivers')
callback(data)
+def UpdateHostKey(plc):
+ logger.log('Trying to update ssh host key at PLC...')
+ ssh_host_key = open('/etc/ssh/ssh_host_rsa_key.pub').read().strip()
+ plc.BootUpdateNode(dict(ssh_host_key=ssh_host_key))
+ logger.log('Host key update succeeded')
+
def run():
try:
if options.daemon: tools.daemon()
print "Warning while writing PID file:", err
# Load and start modules
- for module in ['net', 'proper', 'conf_files', 'sm']:
+ for module in ['net', 'proper', 'conf_files', 'sm', 'bwmon']:
try:
m = __import__(module)
m.start(options, config)
plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=options.period/2)
while True:
+ try: UpdateHostKey(plc)
+ except: logger.log_exc()
try: GetSlivers(plc)
except: logger.log_exc()
time.sleep(options.period + random.randrange(0,301))
import logger
import tools
+# special constant that tells vserver to keep its existing settings
+KEEP_LIMIT = vserver.VC_LIM_KEEP
+
+# populate the sliver/vserver specific default allocations table,
+# which is used to look for slice attributes
+DEFAULT_ALLOCATION = {}
+for rlimit in vserver.RLIMITS.keys():
+ rlim = rlimit.lower()
+ DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
class Sliver_VS(accounts.Account, vserver.VServer):
"""This class wraps vserver.VServer to make its interface closer to what we need."""
self.rspec = {}
self.initscript = ''
self.disk_usage_initialized = False
+ self.initscriptchanged = False
self.configure(rec)
@staticmethod
fd = os.open('/etc/rc.vinit', flags, 0755)
os.write(fd, new_initscript)
os.close(fd)
- try: self.chroot_call(install_initscript)
+ try:
+ self.chroot_call(install_initscript)
+ self.initscriptchanged = True
except: logger.log_exc()
accounts.Account.configure(self, rec) # install ssh keys
os._exit(0)
else: os.waitpid(child_pid, 0)
else: logger.log('%s: not starting, is not enabled' % self.name)
+ self.initscriptchanged = False
def stop(self):
logger.log('%s: stopping' % self.name)
finally: Sliver_VS._init_disk_info_sem.release()
logger.log('%s: computing disk usage: ended' % self.name)
self.disk_usage_initialized = True
- vserver.VServer.set_disklimit(self, disk_max)
+ vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
except OSError:
logger.log('%s: failed to set max disk usage' % self.name)
logger.log_exc()
- # N.B. net_*_rate are in kbps because of XML-RPC maxint
- # limitations, convert to bps which is what bwlimit.py expects.
-# net_limits = (self.rspec['net_min_rate'] * 1000,
-# self.rspec['net_max_rate'] * 1000,
-# self.rspec['net_i2_min_rate'] * 1000,
-# self.rspec['net_i2_max_rate'] * 1000,
-# self.rspec['net_share'])
-# logger.log('%s: setting net limits to %s bps' % (self.name, net_limits[:-1]))
-# logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
-# self.set_bwlimit(*net_limits)
+ # get/set the min/soft/hard values for all of the vserver
+ # related RLIMITS. Note that vserver currently only
+ # implements support for hard limits.
+ for limit in vserver.RLIMITS.keys():
+ type = limit.lower()
+ minimum = self.rspec['%s_min'%type]
+ soft = self.rspec['%s_soft'%type]
+ hard = self.rspec['%s_hard'%type]
+ self.set_rlimit_config(limit, hard, soft, minimum)
+
+ self.set_WHITELISTED_config(self.rspec['whitelist'])
+
+ if False: # this code was commented out before
+ # N.B. net_*_rate are in kbps because of XML-RPC maxint
+ # limitations, convert to bps which is what bwlimit.py expects.
+ net_limits = (self.rspec['net_min_rate'] * 1000,
+ self.rspec['net_max_rate'] * 1000,
+ self.rspec['net_i2_min_rate'] * 1000,
+ self.rspec['net_i2_max_rate'] * 1000,
+ self.rspec['net_share'])
+ logger.log('%s: setting net limits to %s bps' % (self.name, net_limits[:-1]))
+ logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
+ self.set_bwlimit(*net_limits)
cpu_min = self.rspec['cpu_min']
cpu_share = self.rspec['cpu_share']
- if self.rspec['enabled'] > 0:
+
+ if self.rspec['enabled'] > 0 and self.rspec['whitelist'] == 1:
if cpu_min >= 50: # at least 5%: keep people from shooting themselves in the foot
logger.log('%s: setting cpu share to %d%% guaranteed' % (self.name, cpu_min/10.0))
self.set_sched_config(cpu_min, vserver.SCHED_CPU_GUARANTEED)
else:
logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
self.set_sched_config(cpu_share, 0)
+
+ if False: # Does not work properly yet.
+ if self.have_limits_changed():
+ logger.log('%s: limits have changed --- restarting' % self.name)
+ stopcount = 10
+ while self.is_running() and stopcount > 0:
+ self.stop()
+ delay = 1
+ time.sleep(delay)
+ stopcount = stopcount - 1
+ self.start()
+
else: # tell vsh to disable remote login by setting CPULIMIT to 0
logger.log('%s: disabling remote login' % self.name)
self.set_sched_config(0, 0)
import delegate
import logger
import sliver_vs
+import string,re
DEFAULT_ALLOCATION = {
'enabled': 1,
+ 'whitelist': 1,
# CPU parameters
'cpu_min': 0, # ms/s
'cpu_share': 32, # proportional share
'net_thresh_kbyte': 4529848, #Kbyte
'net_i2_max_kbyte': 17196646,
'net_i2_thresh_kbyte': 13757316,
- 'disk_max': 5000000 # bytes
+ # disk space limit
+ 'disk_max': 5000000, # bytes
+
+ # NOTE: this table is further populated with resource names and
+ # default amounts via the start() function below. This probably
+ # should be changeg and these values should be obtained via the
+ # API to myplc.
}
start_requested = False # set to True in order to request that all slivers be started
+def whitelistfilter():
+ """creates a regex (re) object based on the slice definitions
+ in /etc/planetlab/whitelist"""
+
+ whitelist = []
+ whitelist_re = re.compile("([a-zA-Z0-9\*]+)_([a-zA-Z0-9\*]+)")
+ linecount = 0
+ try:
+ f = open('/etc/planetlab/whitelist')
+ for line in f.readlines():
+ linecount = linecount+1
+ line = line.strip()
+ # skip comments
+ if len(line)>0 and line[0]=='#':
+ continue
+ m = whitelist_re.search(line)
+ if m == None:
+ logger.log("skipping line #%d in /etc/planetlab/whitelist" % linecount)
+ continue
+ else:
+ whitelist.append(m.group())
+ f.close()
+ except IOError,e:
+ logger.log("IOError -> %s" % e)
+ logger.log("No whitelist file found; setting slice white list to *_*")
+ whitelist = ["*_*"]
+
+ white_re_list = None
+ for w in whitelist:
+ w = string.replace(w,'*','([a-zA-Z0-9]+)')
+ if white_re_list == None:
+ white_re_list = w
+ else:
+ white_re_list = "(%s)|(%s)" % (white_re_list,w)
+
+ if white_re_list == None:
+ white_re_list = "([a-zA-Z0-9]+)_([a-zA-Z0-9]+)"
+
+ logger.log("whitelist regex = %s" % white_re_list)
+ whitelist_re = re.compile(white_re_list)
+ return whitelist_re
+
@database.synchronized
def GetSlivers(data, fullupdate=True):
"""This function has two purposes. One, convert GetSlivers() data
DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000
### Emulab-specific hack begins here
- emulabdelegate = {
- 'instantiation': 'plc-instantiated',
- 'keys': '''ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5Rimz6osRvlAUcaxe0YNfGsLL4XYBN6H30V3l/0alZOSXbGOgWNdEEdohwbh9E8oYgnpdEs41215UFHpj7EiRudu8Nm9mBI51ARHA6qF6RN+hQxMCB/Pxy08jDDBOGPefINq3VI2DRzxL1QyiTX0jESovrJzHGLxFTB3Zs+Y6CgmXcnI9i9t/zVq6XUAeUWeeXA9ADrKJdav0SxcWSg+B6F1uUcfUd5AHg7RoaccTldy146iF8xvnZw0CfGRCq2+95AU9rbMYS6Vid8Sm+NS+VLaAyJaslzfW+CAVBcywCOlQNbLuvNmL82exzgtl6fVzutRFYLlFDwEM2D2yvg4BQ== root@boss.emulab.net''',
- 'name': 'utah_elab_delegate',
- 'timestamp': data['timestamp'],
- 'type': 'delegate',
- 'vref': None
- }
- database.db.deliver_record(emulabdelegate)
+# emulabdelegate = {
+# 'instantiation': 'plc-instantiated',
+# 'keys': '''ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5Rimz6osRvlAUcaxe0YNfGsLL4XYBN6H30V3l/0alZOSXbGOgWNdEEdohwbh9E8oYgnpdEs41215UFHpj7EiRudu8Nm9mBI51ARHA6qF6RN+hQxMCB/Pxy08jDDBOGPefINq3VI2DRzxL1QyiTX0jESovrJzHGLxFTB3Zs+Y6CgmXcnI9i9t/zVq6XUAeUWeeXA9ADrKJdav0SxcWSg+B6F1uUcfUd5AHg7RoaccTldy146iF8xvnZw0CfGRCq2+95AU9rbMYS6Vid8Sm+NS+VLaAyJaslzfW+CAVBcywCOlQNbLuvNmL82exzgtl6fVzutRFYLlFDwEM2D2yvg4BQ== root@boss.emulab.net''',
+ # 'name': 'utah_elab_delegate',
+ # 'timestamp': data['timestamp'],
+ # 'type': 'delegate',
+ # 'vref': None
+ # }
+ # database.db.deliver_record(emulabdelegate)
### Emulab-specific hack ends here
for is_rec in data['initscripts']:
initscripts_by_id[str(is_rec['initscript_id'])] = is_rec['script']
+ # remove slivers not on the whitelist
+ whitelist_regex = whitelistfilter()
+
for sliver in data['slivers']:
rec = sliver.copy()
rec.setdefault('timestamp', data['timestamp'])
keys = rec.pop('keys')
rec.setdefault('keys', '\n'.join([key_struct['key'] for key_struct in keys]))
+ # Handle nm controller here
rec.setdefault('type', attr_dict.get('type', 'sliver.VServer'))
+ if rec['instantiation'] == 'nm-controller':
+ # type isn't returned by GetSlivers() for whatever reason. We're overloading
+ # instantiation here, but i suppose its the ssame thing when you think about it. -FA
+ rec['type'] = 'delegate'
+
rec.setdefault('vref', attr_dict.get('vref', 'default'))
is_id = attr_dict.get('plc_initscript_id')
if is_id is not None and is_id in initscripts_by_id:
rec['initscript'] = initscripts_by_id[is_id]
else:
rec['initscript'] = ''
- rec.setdefault('delegations', []) # XXX - delegation not yet supported
+ rec.setdefault('delegations', [])
# extract the implied rspec
rspec = {}
try: amt = int(attr_dict[resname])
except (KeyError, ValueError): amt = default_amt
rspec[resname] = amt
+
+ # disable sliver
+ m = whitelist_regex.search(sliver['name'])
+ if m == None:
+ rspec['whitelist'] = 0
+ rspec['enabled'] = 0
+
database.db.deliver_record(rec)
if fullupdate: database.db.set_min_timestamp(data['timestamp'])
database.db.sync()
-
- # handle requested startup
- global start_requested
- if start_requested:
- start_requested = False
- cumulative_delay = 0
- for name in database.db.iterkeys():
- accounts.get(name).start(delay=cumulative_delay)
- cumulative_delay += 3
+ accounts.Startingup = False
def deliver_ticket(data): return GetSlivers(data, fullupdate=False)
def start(options, config):
+ for resname, default_amt in sliver_vs.DEFAULT_ALLOCATION.iteritems():
+ DEFAULT_ALLOCATION[resname]=default_amt
+
accounts.register_class(sliver_vs.Sliver_VS)
accounts.register_class(delegate.Delegate)
- global start_requested
- start_requested = options.startup
+ accounts.Startingup = options.startup
database.start()
api.deliver_ticket = deliver_ticket
api.start()