%define slicefamily %{pldistro}-%{distroname}-%{_arch}
%define name NodeManager
-%define version 2.0
-%define taglevel 36
+%define version 2.1
+%define taglevel 1
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
+%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Summary: PlanetLab Node Manager
Name: %{name}
# we do need the slice images in any case
Requires: sliceimage-%{slicefamily}
# our interface to the vserver patch
-Requires: util-vserver >= 0.30.208-17
+#Requires: util-vserver >= 0.30.208-17
# vserver.py
-Requires: util-vserver-python > 0.3-16
+#Requires: util-vserver-python > 0.3-16
# sioc/plnet
Requires: pyplnet >= 4.3
# make manages the C and Python stuff
rm -rf $RPM_BUILD_ROOT
%{__make} %{?_smp_mflags} install DESTDIR="$RPM_BUILD_ROOT"
+PYTHON_SITEARCH=`python -c 'from distutils.sysconfig import get_python_lib; print get_python_lib(1)'`
# install the sliver initscript (that triggers the slice initscript if any)
mkdir -p $RPM_BUILD_ROOT/usr/share/NodeManager/sliver-initscripts/
install -d -m 755 $RPM_BUILD_ROOT/var/lib/nodemanager
install -D -m 644 logrotate/nodemanager $RPM_BUILD_ROOT/%{_sysconfdir}/logrotate.d/nodemanager
+install -D -m 755 sshsh $RPM_BUILD_ROOT/bin/sshsh
+install -D -m 644 bwlimit.py ${RPM_BUILD_ROOT}/${PYTHON_SITEARCH}/bwlimit.py
##########
%post
%{_initrddir}/
%{_sysconfdir}/logrotate.d/nodemanager
/var/lib/
+/bin/sshsh
+%{python_sitearch}/bwlimit.py*
%changelog
- * Fri Jan 13 2012 Marco Yuen <marcoy@cs.princeton.edu> - nodemanager-2.0-34
- - Install bwlimit.py to the python site directory.
+* Fri Apr 13 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - nodemanager-2.1-1
+- first working draft for dealing with libvirt/lxc on f16 nodes
+- not expected to work with mainline nodes (use 2.0 for that for now)
+
+ * Sun Jun 03 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - nodemanager-2.0-36
+ - /var/log/nodemanager shows duration of mainloop
+
+ * Fri Apr 13 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - nodemanager-2.0-35
+ - remove Requires to deprecated vserver-* rpms, use sliceimage-* instead
* Fri Dec 09 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - nodemanager-2.0-34
- Added memory scheduling to core scheduler
import struct
import threading
import xmlrpclib
+import sliver_lxc
try:
from PLC.Parameter import Parameter, Mixed
def Mixed(a = None, b = None, c = None): pass
- import accounts
+ import account
import logger
# TODO: These try/excepts are a hack to allow doc/DocBookLocal.py to
# A better approach will involve more extensive code splitting, I think.
try: import database
except: import logger as database
-try: import sliver_vs
-except: import logger as sliver_vs
+#try: import sliver_vs
+#except: import logger as sliver_vs
import ticket as ticket_module
import tools
@export_to_api(0)
def GetXIDs():
"""Return an dictionary mapping Slice names to XIDs"""
- return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == sliver_vs.Sliver_VS.SHELL])
+ return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == sliver_lxc.Sliver_LXC.SHELL])
@export_to_docbook(roles=['self'],
accepts=[],
"""Create a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
- accounts.get(rec['name']).ensure_created(rec)
+ account.get(rec['name']).ensure_created(rec)
logger.log("api_calls: Create %s"%rec['name'])
else:
raise Exception, "Only PLC can create non delegated slivers."
"""Destroy a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
- accounts.get(rec['name']).ensure_destroyed()
+ account.get(rec['name']).ensure_destroyed()
logger.log("api_calls: Destroy %s"%rec['name'])
else:
raise Exception, "Only PLC can destroy non delegated slivers."
def Start(sliver_name):
"""Configure and start sliver."""
rec = sliver_name
- accounts.get(rec['name']).start(rec)
+ account.get(rec['name']).start(rec)
logger.log("api_calls: Start %s"%rec['name'])
def Stop(sliver_name):
"""Kill all processes belonging to the specified sliver"""
rec = sliver_name
- accounts.get(rec['name']).stop()
+ account.get(rec['name']).stop()
logger.log("api_calls: Stop %s"%rec['name'])
def ReCreate(sliver_name):
"""Stop, Destroy, Create, Start sliver in order to reinstall it."""
rec = sliver_name
- accounts.get(rec['name']).stop()
- accounts.get(rec['name']).ensure_created(rec)
- accounts.get(rec['name']).start(rec)
+ account.get(rec['name']).stop()
+ account.get(rec['name']).ensure_created(rec)
+ account.get(rec['name']).start(rec)
logger.log("api_calls: ReCreate %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
- # $Id$
- # $URL$
-
"""Whole core scheduling
"""
import logger
import os
+import cgroups
glo_coresched_simulate = False
+joinpath = os.path.join
class CoreSched:
""" Whole-core scheduler
self.mems_map={}
self.cpu_siblings={}
- def get_cgroup_var(self, name=None, filename=None):
+ def get_cgroup_var(self, name=None, subsys=None, filename=None):
""" decode cpuset.cpus or cpuset.mems into a list of units that can
be reserved.
"""
assert(filename!=None or name!=None)
if filename==None:
- filename="/dev/cgroup/" + name
+ # filename="/dev/cgroup/" + name
+ filename = reduce(lambda a, b: joinpath(a, b) if b else a, [subsys, name],
+ cgroups.get_base_path())
data = open(filename).readline().strip()
if self.cpus!=[]:
return self.cpus
- self.cpus = self.get_cgroup_var(self.cgroup_var_name)
+ self.cpus = self.get_cgroup_var(self.cgroup_var_name, 'cpuset')
self.cpu_siblings = {}
for item in self.cpus:
this might change as vservers are instantiated, so always compute
it dynamically.
"""
- cgroups = []
- filenames = os.listdir("/dev/cgroup")
- for filename in filenames:
- if os.path.isdir(os.path.join("/dev/cgroup", filename)):
- cgroups.append(filename)
- return cgroups
+ return cgroups.get_cgroups()
+ #cgroups = []
+ #filenames = os.listdir("/dev/cgroup")
+ #for filename in filenames:
+ # if os.path.isdir(os.path.join("/dev/cgroup", filename)):
+ # cgroups.append(filename)
+ #return cgroups
def decodeCoreSpec (self, cores):
""" Decode the value of the core attribute. It's a number, followed by
if glo_coresched_simulate:
print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
else:
- file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
+ cgroups.write(cgroup, var_name, self.listToRange(cpus))
+ #file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
def reserveDefault (self, var_name, cpus):
- if not os.path.exists("/etc/vservers/.defaults/cgroup"):
- os.makedirs("/etc/vservers/.defaults/cgroup")
+ #if not os.path.exists("/etc/vservers/.defaults/cgroup"):
+ # os.makedirs("/etc/vservers/.defaults/cgroup")
- if glo_coresched_simulate:
- print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
- else:
- file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
+ #if glo_coresched_simulate:
+ # print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
+ #else:
+ # file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
+ pass
def listToRange (self, list):
""" take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
if self.mems!=[]:
return self.mems
- self.mems = self.get_cgroup_var(self.cgroup_mem_name)
+ self.mems = self.get_cgroup_var(self.cgroup_mem_name, 'cpuset')
# build a mapping from memory nodes to the cpus they can be used with
return []
siblings = []
- x = int(open(fn,"rt").readline().strip(),16)
+ x = open(fn, 'rt').readline().strip().split(',')[-1]
+ x = int(x, 16)
+
cpuid = 0
while (x>0):
if (x&1)!=0:
#!/bin/bash
#
- # $Id$
- # $URL$
- #
# nm Starts and stops Node Manager daemon
#
-# chkconfig: 3 86 26
+# chkconfig: 3 97 26
# description: Starts and stops Node Manager daemon
#
[ -f /etc/sysconfig/nodemanager ] && . /etc/sysconfig/nodemanager
+# Wait for libvirt to finish initializing
+sleep 10
+
options=${OPTIONS-"-d"}
# turn on verbosity
verboseoptions=${DEBUGOPTIONS-"-v -d"}
- # $Id$
- # $URL$
-
"""Codemux configurator. Monitors slice attributes and configures CoDemux to mux port 80 based on HOST field in HTTP request. Forwards to localhost port belonging to configured slice."""
import logger
import os
-import vserver
+import libvirt
from config import Config
CODEMUXCONF="/etc/codemux/codemux.conf"
try:
# Check to see if sliver is running. If not, continue
- if vserver.VServer(sliver['name']).is_running():
+ if isLXCDomRunning(sliver['name']):
# Check if new or needs updating
if (sliver['name'] not in slicesinconf.keys()) \
or (params not in slicesinconf.get(sliver['name'], [])):
logger.log("codemux: Stopping codemux service")
logger.log_call(["/etc/init.d/codemux", "stop", ])
logger.log_call(["/sbin/chkconfig", "codemux", "off"])
+
+def isLXCDomRunning(domName):
+ try:
+ running = False
+ conn = libvirt.open('lxc://')
+ dom = conn.lookupByName(domName)
+ running = dom.info()[0] == libvirt.VIR_DOMAIN_RUNNING
+ finally:
+ conn.close()
+ return running
+
#!/usr/bin/python
#
- # $Id$
- # $URL$
- #
# Setup script for the Node Manager application
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
- # $Id$
- #
from distutils.core import setup, Extension
setup(
py_modules=[
- 'accounts',
+ 'account',
'api',
'api_calls',
'bwmon',
+ 'bwlimit',
+ 'cgroups',
'conf_files',
'config',
'controller',
'nodemanager',
'plcapi',
'safexmlrpc',
+ 'sliver_libvirt',
+ 'sliver_lxc',
'sliver_vs',
'slivermanager',
'ticket',
# the util-vserver-pl module
import vserver
- import accounts
+ import account
import logger
import tools
DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
- class Sliver_VS(accounts.Account, vserver.VServer):
+ class Sliver_VS(account.Account, vserver.VServer):
"""This class wraps vserver.VServer to make its interface closer to what we need."""
SHELL = '/bin/vsh'
self.create(name, rec)
logger.log("sliver_vs: %s: second chance..."%name)
vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
-
self.keys = ''
self.rspec = {}
self.slice_id = rec['slice_id']
#self.initscriptchanged = True
self.refresh_slice_vinit()
- accounts.Account.configure(self, rec) # install ssh keys
+ account.Account.configure(self, rec) # install ssh keys
# unconditionnally install and enable the generic vinit script
# mimicking chkconfig for enabling the generic vinit script
if code:
logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
if self.is_running():
- # Only need to rerun the initscript if the vserver is
+ # Only need to rerun the initscript if the vserver is
# already running. If the vserver isn't running, then the
# initscript will automatically be started by
# /etc/rc.d/vinit when the vserver is started.
else:
logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
- # bind mount root side dir to sliver side
- # needs to be done before sliver starts
- def expose_ssh_dir (self):
- try:
- root_ssh="/home/%s/.ssh"%self.name
- sliver_ssh="/vservers/%s/home/%s/.ssh"%(self.name,self.name)
- # any of both might not exist yet
- for path in [root_ssh,sliver_ssh]:
- if not os.path.exists (path):
- os.mkdir(path)
- if not os.path.isdir (path):
- raise Exception
- mounts=file('/proc/mounts').read()
- if mounts.find(sliver_ssh)<0:
- # xxx perform mount
- subprocess.call("mount --bind -o ro %s %s"%(root_ssh,sliver_ssh),shell=True)
- logger.log("expose_ssh_dir: %s mounted into slice %s"%(root_ssh,self.name))
- except:
- logger.log_exc("expose_ssh_dir with slice %s failed"%self.name)
-
def start(self, delay=0):
if self.rspec['enabled'] <= 0:
logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
import logger
import api, api_calls
import database
- import accounts
+ import account
import controller
-import sliver_vs
+import sliver_lxc
try: from bwlimit import bwmin, bwmax
except ImportError: bwmin, bwmax = 8, 1000*1000*1000
if is_system_sliver(sliver):
sliver['reservation_alive']=True
continue
-
+
# regular slivers
if not active_lease:
# with 'idle_or_shared', just let the field out, behave like a shared node
if rec['instantiation'].lower() == 'nm-controller':
rec.setdefault('type', attributes.get('type', 'controller.Controller'))
else:
- rec.setdefault('type', attributes.get('type', 'sliver.VServer'))
+ rec.setdefault('type', attributes.get('type', 'sliver.LXC'))
# set the vserver reference. If none, set to default.
rec.setdefault('vref', attributes.get('vref', 'default'))
return GetSlivers(data, fullupdate=False)
def start():
- for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems():
- DEFAULT_ALLOCATION[resname]=default_amount
+ # No default allocation values for LXC yet, think if its necessary given
+ # that they are also default allocation values in this module
+ #for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems():
+ # DEFAULT_ALLOCATION[resname]=default_amount
- accounts.register_class(sliver_lxc.Sliver_LXC)
- accounts.register_class(controller.Controller)
+ account.register_class(sliver_vs.Sliver_VS)
+ account.register_class(controller.Controller)
database.start()
api_calls.deliver_ticket = deliver_ticket
api.start()
- # $Id$
- # $URL$
-
"""A few things that didn't seem to fit anywhere else."""
import os, os.path
import errno
import threading
import subprocess
+import shutil
import logger
def write_file(filename, do_write, **kw_args):
"""Write file <filename> atomically by opening a temporary file, using <do_write> to write that file, and then renaming the temporary file."""
- os.rename(write_temp_file(do_write, **kw_args), filename)
+ shutil.move(write_temp_file(do_write, **kw_args), filename)
def write_temp_file(do_write, mode=None, uidgid=None):
fd, temporary_filename = tempfile.mkstemp()
os.close(fd)
if os.path.exists(target):
os.unlink(target)
- os.rename(name,target)
+ shutil.move(name,target)
if chmod: os.chmod(target,chmod)
return True