From: Thierry Parmentelat Date: Thu, 6 Jul 2017 13:08:07 +0000 (+0200) Subject: Add 'php/phpxmlrpc/' from commit 'cd5dbb4a511e7a616a61187a5de1a611a9748cbd' X-Git-Tag: plcapi-5.4-0~3 X-Git-Url: http://git.onelab.eu/?p=plcapi.git;a=commitdiff_plain;h=21d187714285d9818fd94509b015ba069facb7ef;hp=cd5dbb4a511e7a616a61187a5de1a611a9748cbd Add 'php/phpxmlrpc/' from commit 'cd5dbb4a511e7a616a61187a5de1a611a9748cbd' git-subtree-dir: php/phpxmlrpc git-subtree-mainline: 9bd41316bc9541bbedfe45377089d4e4927129b1 git-subtree-split: cd5dbb4a511e7a616a61187a5de1a611a9748cbd --- diff --git a/.gitignore b/.gitignore index 1305331..7c86429 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,3 @@ -/.idea -composer.phar -composer.lock -/vendor/* -/tests/coverage/* -/build/* +*.pyc +TAGS +.gitignore diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4a80642 --- /dev/null +++ b/LICENSE @@ -0,0 +1,31 @@ +Copyright 2008 Princeton University + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of + its contributors may be used to endorse or promote + products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRINCETON +UNIVERSITY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ebd0eb8 --- /dev/null +++ b/Makefile @@ -0,0 +1,96 @@ +# +# (Re)builds Python metafiles (__init__.py) and documentation +# +# Mark Huang +# Copyright (C) 2005 The Trustees of Princeton University +# + +# python-pycurl and python-psycopg2 avail. from fedora 5 +# we used to ship our own version of psycopg2 and pycurl, for fedora4 +# starting with 5.0, support for these two modules is taken out + +# Other stuff - doc not implicit, it's redone by myplc-docs + +# autoconf compatible variables +DESTDIR := / +datadir := /usr/share +bindir := /usr/bin + +PWD := $(shell pwd) + +all: + python setup.py build + +install: + python setup.py install \ + --install-purelib=$(DESTDIR)/$(datadir)/plc_api \ + --install-scripts=$(DESTDIR)/$(datadir)/plc_api \ + --install-data=$(DESTDIR)/$(datadir)/plc_api + +clean: + find . -name '*.pyc' | xargs rm -f + python setup.py clean && rm -rf build + +index: + echo "This step is obsolete" + +########## + +force: + +.PHONY: all install force clean index tags + +#################### devel tools +tags: + find . '(' -name '*.py' -o -name '*.sql' -o -name '*.php' -o -name Makefile -o -name '[0-9][0-9][0-9]*' ')' | fgrep -v '.git/' | xargs etags + +.PHONY: tags + +########## sync +# 2 forms are supported +# (*) if your plc root context has direct ssh access: +# make sync PLC=private.one-lab.org +# (*) otherwise, for test deployments, use on your testmaster +# $ run export +# and cut'n paste the export lines before you run make sync + +ifdef PLC +SSHURL:=root@$(PLC):/ +SSHCOMMAND:=ssh root@$(PLC) +else +ifdef PLCHOSTLXC +SSHURL:=root@$(PLCHOSTLXC):/vservers/$(GUESTNAME) +SSHCOMMAND:=ssh root@$(PLCHOSTLXC) ssh -o StrictHostKeyChecking=no -o LogLevel=quiet $(GUESTHOSTNAME) +endif +endif + +LOCAL_RSYNC_EXCLUDES := --exclude '*.pyc' --exclude Accessors_site.py +RSYNC_EXCLUDES := --exclude .svn --exclude .git --exclude '*~' --exclude TAGS $(LOCAL_RSYNC_EXCLUDES) +RSYNC_COND_DRY_RUN := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,) +RSYNC := rsync -a -v $(RSYNC_COND_DRY_RUN) $(RSYNC_EXCLUDES) + +sync: +ifeq (,$(SSHURL)) + @echo "sync: I need more info from the command line, e.g." + @echo " make sync PLC=boot.planetlab.eu" + @echo " make sync PLCHOSTLXC=.. GUESTHOSTNAME=.. GUESTNAME=.." + @exit 1 +else + +$(RSYNC) plcsh PLC planetlab5.sql migrations aspects $(SSHURL)/usr/share/plc_api/ + +$(RSYNC) db-config.d/ $(SSHURL)/etc/planetlab/db-config.d/ + +$(RSYNC) plc.d/ $(SSHURL)/etc/plc.d/ + +$(RSYNC) apache/plc.wsgi $(SSHURL)/usr/share/plc_api/apache/ + $(SSHCOMMAND) /etc/plc.d/httpd stop + $(SSHCOMMAND) /etc/plc.d/httpd start +endif + +#################### convenience, for debugging only +# make +foo : prints the value of $(foo) +# make ++foo : idem but verbose, i.e. foo=$(foo) +++%: varname=$(subst +,,$@) +++%: + @echo "$(varname)=$($(varname))" ++%: varname=$(subst +,,$@) ++%: + @echo "$($(varname))" + diff --git a/PLC/API.py b/PLC/API.py new file mode 100644 index 0000000..c40f28e --- /dev/null +++ b/PLC/API.py @@ -0,0 +1,272 @@ +# +# PLCAPI XML-RPC and SOAP interfaces +# +# Aaron Klingaman +# Mark Huang +# +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import os +import sys +import traceback +import string + +import xmlrpclib + +# See "2.2 Characters" in the XML specification: +# +# #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# avoiding +# [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDDF] + +invalid_codepoints = range(0x0, 0x8) + [0xB, 0xC] + range(0xE, 0x1F) +# broke with f24, somehow we get a unicode as an incoming string to be translated +str_xml_escape_table = string.maketrans("".join((chr(x) for x in invalid_codepoints)), + "?" * len(invalid_codepoints)) +# loosely inspired from +# http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings +unicode_xml_escape_table = { invalid : u"?" for invalid in invalid_codepoints} + +def xmlrpclib_escape(s, replace = string.replace): + """ + xmlrpclib does not handle invalid 7-bit control characters. This + function augments xmlrpclib.escape, which by default only replaces + '&', '<', and '>' with entities. + """ + + # This is the standard xmlrpclib.escape function + s = replace(s, "&", "&") + s = replace(s, "<", "<") + s = replace(s, ">", ">",) + + # Replace invalid 7-bit control characters with '?' + if isinstance(s, str): + return s.translate(str_xml_escape_table) + else: + return s.translate(unicode_xml_escape_table) + +def test_xmlrpclib_escape(): + inputs = [ + # full ASCII + "".join( (chr(x) for x in range(128))), + # likewise but as a unicode string up to 256 + u"".join( (unichr(x) for x in range(256))), + ] + for input in inputs: + print "==================== xmlrpclib_escape INPUT" + print type(input), '->', input + print "==================== xmlrpclib_escape OUTPUT" + print xmlrpclib_escape(input) + +def xmlrpclib_dump(self, value, write): + """ + xmlrpclib cannot marshal instances of subclasses of built-in + types. This function overrides xmlrpclib.Marshaller.__dump so that + any value that is an instance of one of its acceptable types is + marshalled as that type. + + xmlrpclib also cannot handle invalid 7-bit control characters. See + above. + """ + + # Use our escape function + args = [self, value, write] + if isinstance(value, (str, unicode)): + args.append(xmlrpclib_escape) + + try: + # Try for an exact match first + f = self.dispatch[type(value)] + except KeyError: + # Try for an isinstance() match + for Type, f in self.dispatch.iteritems(): + if isinstance(value, Type): + f(*args) + return + raise TypeError, "cannot marshal %s objects" % type(value) + else: + f(*args) + +# You can't hide from me! +xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump + +# SOAP support is optional +try: + import SOAPpy + from SOAPpy.Parser import parseSOAPRPC + from SOAPpy.Types import faultType + from SOAPpy.NS import NS + from SOAPpy.SOAPBuilder import buildSOAP +except ImportError: + SOAPpy = None + +from PLC.Config import Config +from PLC.Faults import * +import PLC.Methods +import PLC.Accessors + +def import_deep(name): + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +class PLCAPI: + + # flat list of method names + native_methods = PLC.Methods.native_methods + + # other_methods_map : dict {methodname: fullpath} + # e.g. 'Accessors' -> 'PLC.Accessors.Accessors' + other_methods_map={} + for subdir in [ 'Accessors' ]: + path="PLC."+subdir + # scan e.g. PLC.Accessors.__all__ + pkg = __import__(path).__dict__[subdir] + for modulename in getattr(pkg,"__all__"): + fullpath=path+"."+modulename + for method in getattr(import_deep(fullpath),"methods"): + other_methods_map[method] = fullpath + + all_methods = native_methods + other_methods_map.keys() + + def __init__(self, config = "/etc/planetlab/plc_config", encoding = "utf-8"): + self.encoding = encoding + + # Better just be documenting the API + if config is None: + return + + # Load configuration + self.config = Config(config) +# print("config has keys {}" +# .format(vars(self.config).keys())) + + # Initialize database connection + if self.config.PLC_DB_TYPE == "postgresql": + from PLC.PostgreSQL import PostgreSQL + self.db = PostgreSQL(self) + else: + raise PLCAPIError, "Unsupported database type " + self.config.PLC_DB_TYPE + + # Aspects modify the API by injecting code before, after or + # around method calls. -- http://github.com/baris/pyaspects/blob/master/README + # + if self.config.PLC_RATELIMIT_ENABLED: + from aspects import apply_ratelimit_aspect + apply_ratelimit_aspect() + + if getattr(self.config, "PLC_NETCONFIG_ENABLED", False): + from aspects.netconfigaspects import apply_netconfig_aspect + apply_netconfig_aspect() + + # Enable Caching. Only for GetSlivers for the moment. + # TODO: we may consider to do this in an aspect like the ones above. + try: + if self.config.PLC_GETSLIVERS_CACHE: + getslivers_cache = True + except AttributeError: + getslivers_cache = False + + if getslivers_cache: + os.environ['DJANGO_SETTINGS_MODULE']='plc_django_settings' + from cache_utils.decorators import cached + from PLC.Methods.GetSlivers import GetSlivers + + @cached(7200) + def cacheable_call(cls, auth, node_id_or_hostname): + return cls.raw_call(auth, node_id_or_hostname) + + GetSlivers.call = cacheable_call + + + + def callable(self, method): + """ + Return a new instance of the specified method. + """ + + # Look up method + if method not in self.all_methods: + raise PLCInvalidAPIMethod, method + + # Get new instance of method + try: + classname = method.split(".")[-1] + if method in self.native_methods: + fullpath="PLC.Methods." + method + else: + fullpath=self.other_methods_map[method] + module = __import__(fullpath, globals(), locals(), [classname]) + return getattr(module, classname)(self) + except ImportError, AttributeError: + raise PLCInvalidAPIMethod, "import error %s for %s" % (AttributeError,fullpath) + + def call(self, source, method, *args): + """ + Call the named method from the specified source with the + specified arguments. + """ + + function = self.callable(method) + function.source = source + return function(*args) + + def handle(self, source, data): + """ + Handle an XML-RPC or SOAP request from the specified source. + """ + + # Parse request into method name and arguments + try: + interface = xmlrpclib + (args, method) = xmlrpclib.loads(data) + methodresponse = True + except Exception, e: + if SOAPpy is not None: + interface = SOAPpy + (r, header, body, attrs) = parseSOAPRPC(data, header = 1, body = 1, attrs = 1) + method = r._name + args = r._aslist() + # XXX Support named arguments + else: + raise e + + try: + result = self.call(source, method, *args) + except PLCFault, fault: + # Handle expected faults + if interface == xmlrpclib: + result = fault + methodresponse = None + elif interface == SOAPpy: + result = faultParameter(NS.ENV_T + ":Server", "Method Failed", method) + result._setDetail("Fault %d: %s" % (fault.faultCode, fault.faultString)) + + # Return result + if interface == xmlrpclib: + if not isinstance(result, PLCFault): + result = (result,) + data = xmlrpclib.dumps(result, methodresponse = True, encoding = self.encoding, allow_none = 1) + elif interface == SOAPpy: + data = buildSOAP(kw = {'%sResponse' % method: {'Result': result}}, encoding = self.encoding) + + return data + + def handle_json(self, source, data): + """ + Handle a JSON request + """ + method, args = json.loads(data) + try: + result = self.call(source, method, *args) + except Exception, e: + result = str(e) + + return json.dumps(result) + +# one simple unit test +if __name__ == '__main__': + test_xmlrpclib_escape() diff --git a/PLC/Accessor.py b/PLC/Accessor.py new file mode 100644 index 0000000..7352f47 --- /dev/null +++ b/PLC/Accessor.py @@ -0,0 +1,106 @@ +# +# Thierry Parmentelat - INRIA +# +# +# just a placeholder for storing accessor-related tag checkers +# this is filled by the accessors factory +# +# NOTE. If you ever come to manually delete a TagType that was created +# by the Factory, you need to restart your python instance / web server +# as the cached information then becomes wrong + +from PLC.Logger import logger + +from PLC.TagTypes import TagTypes, TagType +from PLC.Roles import Roles, Role + +# implementation +class Accessor (object) : + """This is placeholder for storing accessor-related tag checkers. +Methods in this class are defined by the accessors factory + +This is implemented as a singleton, so we can cache results over time""" + + _instance = None + + tag_locators={} + + def __init__ (self, api): + self.api=api + # 'tagname'=>'tag_id' + self.cache={} + self.hash_name_to_role=dict ( [ (role['name'],role) for role in Roles(api)] ) + + def has_cache (self,tagname): return self.cache.has_key(tagname) + def get_cache (self,tagname): return self.cache[tagname] + def set_cache (self,tagname,tag_type): self.cache[tagname]=tag_type + + def locate_or_create_tag (self, tagname, category, description, roles, enforce=False): + "search tag type from tagname & create if needed" + + # cached ? + if self.has_cache (tagname): + return self.get_cache(tagname) + # search + tag_types = TagTypes (self.api, {'tagname':tagname}) + if tag_types: + tag_type = tag_types[0] + # enforce should only be set by the 'service plc start accessors' sequence + if enforce: + try: + tag_type.update({'category':category,'description':description}) + tag_type.sync() + roles_to_add = set(roles).difference(set(tag_type['roles'])) + for rolename in roles_to_add: + tag_type.add_role(self.hash_name_to_role[rolename]) + roles_to_delete = set(tag_type['roles']).difference(set(roles)) + for rolename in roles_to_delete: + tag_type.remove_role(self.hash_name_to_role[rolename]) + except: + logger.exception("WARNING, Could not enforce tag type, tagname={}\n" + .format(tagname)) + + + else: + # not found: create it + tag_type_fields = {'tagname':tagname, + 'category' : category, + 'description' : description} + tag_type = TagType (self.api, tag_type_fields) + tag_type.sync() + for role in roles: + try: + role_obj=Roles (self.api, role)[0] + tag_type.add_role(role_obj) + except: + # xxx todo find a more appropriate way of notifying this + logger.exception("Accessor.locate_or_create_tag: " + "Could not add role {} to tag_type {}" + .format(role,tagname)) + self.set_cache(tagname,tag_type) + return tag_type + + # a locator is a function that retrieves - or creates - a tag_type instance + @staticmethod + def register_tag_locator (name, tag_locator): + Accessor.tag_locators[name]=tag_locator + + @staticmethod + def retrieve_tag_locator (name): + return Accessor.tag_locators[name] + + # this is designed to be part of the 'service plc start' sequence + # it ensures the creation of all the tagtypes defined + # in the various accessors, and enforces consistency to the DB + # it's not easy to have define_accessors do this because at + # load-time as we do not have an instance of API yet + def run_all_tag_locators (self): + for (name, tag_locator) in Accessor.tag_locators.items(): + tag_locator(self,enforce=True) + +#################### +# make it a singleton so we can cache stuff in there over time +def AccessorSingleton (api): + if not Accessor._instance: + Accessor._instance = Accessor(api) + return Accessor._instance diff --git a/PLC/Accessors/Accessors-5.0-rc16.readme b/PLC/Accessors/Accessors-5.0-rc16.readme new file mode 100644 index 0000000..620272b --- /dev/null +++ b/PLC/Accessors/Accessors-5.0-rc16.readme @@ -0,0 +1,28 @@ +Starting with 5.0-rc16, tag types do not have a so-called +'min_role_id' any more`, but rather a set of roles exactly like a +person this impacts the way accessors are defined, as +'define_accessors' does not support min_role_id anymore in addition, +there was a rather confusing redundancy between 'min_role_id' and +'set_roles', as the latter was used for implementing the access rights +to the 'Set' method + +If you have defined accessors local to your site in Accessors_site.py, +that actually use the min_role_id feature, then here is how to tweak +them from this release on. + +If you want to keep the same kind of behaviour, just replace +min_role_id with set_roles as per the table below + +min_role_id | set_roles +------------------------------------ +10 | ['admin'] +20 | ['admin','pi'] +30 | ['admin','pi','user'] +40 | ['admin','pi','user','tech'] + +Now you might wish to take advantage of the new flexibility instead. + +Also please note that 'node' is now an explicit role, so if e.g. a +slicetag needs to be set from the node directly, then it needs to have +the 'node' role as well. + diff --git a/PLC/Accessors/Accessors_example_ple.py b/PLC/Accessors/Accessors_example_ple.py new file mode 100644 index 0000000..6f8b40d --- /dev/null +++ b/PLC/Accessors/Accessors_example_ple.py @@ -0,0 +1,48 @@ +# Thierry Parmentelat - INRIA +# + +methods=[] + +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice + +from PLC.Accessors.Factory import define_accessors, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +#### example 1 : attach vlan ids on interfaces +# The third argument expose_in_api is a boolean flag that tells whether this tag may be handled +# through the Add/Get/Update methods as a native field +# +#define_accessors(current_module, Interface, "Vlan", "vlan", +# "interface/general", "tag for setting VLAN id", +# get_roles=all_roles, set_roles=tech_roles) + +##### example 2 : +# the slice page uses the category field in the following way +# it considers all tag types for which 'category' matches 'node*/ui*' +# for these, the category field is split into pieces using / +# the parts may define the following settings: +# header: to use instead of the full tagname (in which case a footnote appears with the 'description') +# type: exported as the type for the javascript table (used for how-to-sort) +# rank: to be used for sorting columns (defaults to tagname) + +#################### MySlice tags +define_accessors(current_module, Node, "Reliability", "reliability", + # category + "node/monitor/ui/header=R/type=int/rank=ad", + # description : used to add a footnote to the table if header is set in category + "average reliability (% uptime) over the last week", + set_roles=tech_roles, expose_in_api=True) + +define_accessors(current_module, Node, "Load", "load", + "node/monitor/ui/header=l/type=sortAlphaNumericBottom", + "average load (% CPU utilization) over the last week", + set_roles=tech_roles, expose_in_api=True) + +define_accessors(current_module, Node, "ASNumber", "asnumber", + "node/location/ui/header=AS/type=sortAlphaNumericBottom/rank=z", + "Autonomous System id", + set_roles=tech_roles, expose_in_api=True) diff --git a/PLC/Accessors/Accessors_ipv6.py b/PLC/Accessors/Accessors_ipv6.py new file mode 100644 index 0000000..fb736db --- /dev/null +++ b/PLC/Accessors/Accessors_ipv6.py @@ -0,0 +1,24 @@ +# Author: +# Guilherme Sperb Machado - UZH +# Created: 01/Aug/2014 +# Last modified: 01/Sep/2014 + +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice + +from PLC.Accessors.Factory import define_accessors, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +#### IPv6 addr/prefix to distribute to slivers on the node! +define_accessors(current_module, Interface, "SliversIPv6Prefix", "sliversipv6prefix", + "interface/ipv6", "The IPv6 Range/Prefix for the Slivers", + set_roles=tech_roles) + +#### IPv6 address assigned to the sliver of a particular node! +define_accessors(current_module, Slice, "IPv6Address", "ipv6_address", + "slice/usertools","IPv6 address assigned to the sliver in a particular node", + set_roles=all_roles, expose_in_api=True) + diff --git a/PLC/Accessors/Accessors_myslice.py b/PLC/Accessors/Accessors_myslice.py new file mode 100644 index 0000000..7851773 --- /dev/null +++ b/PLC/Accessors/Accessors_myslice.py @@ -0,0 +1,21 @@ +# +# Thierry Parmentelat - INRIA +# +#from PLC.Nodes import Node +#from PLC.Interfaces import Interface +#from PLC.Slices import Slice +#from PLC.Sites import Site +from PLC.Persons import Person + +from PLC.Accessors.Factory import define_accessors, admin_roles, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +define_accessors(current_module, Person, "Columnconf", "columnconf", + "person/myslice", "column configuration", + get_roles=all_roles, set_roles=all_roles, expose_in_api=True) + +define_accessors(current_module, Person, "Showconf", "showconf", + "person/myslice", "show configuration", + get_roles=all_roles, set_roles=all_roles, expose_in_api=True) diff --git a/PLC/Accessors/Accessors_site.py b/PLC/Accessors/Accessors_site.py new file mode 100644 index 0000000..b51d42e --- /dev/null +++ b/PLC/Accessors/Accessors_site.py @@ -0,0 +1,48 @@ +# +# Thierry Parmentelat - INRIA +# +# Accessors_site.py is the place where you can define your own local tag accessors +# this will not be overwritten through rpm upgrades +# +# Historical note: now that Sites are taggable too, the name may be confusing, +# think of this as Accessors_local.py +# +# to ensure creation of new tag_types, just run +# service plc start accessors +# also for the running API to take the new accessors into account, you need to +# apachectl restart +# or to stay on the safe side, simply do +# service plc restart +# +# methods denotes the set of methods (names) that get inserted into the API +# it is updated by define_accessors + +methods=[] + +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice +from PLC.Sites import Site +from PLC.Persons import Person + +from PLC.Accessors.Factory import define_accessors, all_roles, person_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +#### example : attach vlan ids on interfaces +# +#define_accessors(current_module, Interface, "Vlan", "vlan", +# "interface/general", "tag for setting VLAN id", +# get_roles=all_roles, set_roles=tech_roles) +# +# The optional expose_in_api is a boolean flag that tells whether this tag may be handled +# through the Add/Get/Update methods as a native field +# e.g. +#define_accessors(current_module, Node, "Foo", "foo", +# "node/example", "my own description for foo", +# get_roles=all_roles, set_roles=all_roles) +# will let you do +# GetNodes ( {'foo':'*bar*'},['hostname','foo']) +# +# diff --git a/PLC/Accessors/Accessors_sliverauth.py b/PLC/Accessors/Accessors_sliverauth.py new file mode 100644 index 0000000..5fcfbf3 --- /dev/null +++ b/PLC/Accessors/Accessors_sliverauth.py @@ -0,0 +1,38 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice +from PLC.Sites import Site +from PLC.Persons import Person + +from PLC.Accessors.Factory import define_accessors, admin_roles, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +# this is how to request the features +define_accessors(current_module, Slice, "OmfControl","omf_control", + "slice/usertools","Pre-install and configure OMF Resource Controller in slice if set", + set_roles=all_roles, expose_in_api=True) + + +define_accessors(current_module, Slice, "SliverHMAC","enable_hmac", + "slice/usertools","Create HMAC keys for node in slice (slivers)", + set_roles=all_roles, expose_in_api=True) + +# this is where the crypto stuff gets stored +# this ends up in a sliver tag - the node creates that +# the accessors engine does not know how to create sliver accessors +# like e.g. GetSliverHmac(node,slice) +# but they are mentioned here as they are related to the above + +# Security capability to empower a slice to make an authenticated API call, set by silverauth NM plugin. +define_accessors(current_module, Slice, "Hmac","hmac", + "slice/auth", "Sliver authorization key, for authenticated API call", + set_roles=['admin','node']) +# sliver-dependant ssh key, used to authenticate the experimental plane with OMF tools +define_accessors(current_module, Slice, "SshKey", "ssh_key", + 'slice/auth', "Sliver public ssh key", + set_roles= ['admin','node']) diff --git a/PLC/Accessors/Accessors_standard.py b/PLC/Accessors/Accessors_standard.py new file mode 100644 index 0000000..55ff66e --- /dev/null +++ b/PLC/Accessors/Accessors_standard.py @@ -0,0 +1,115 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice +from PLC.Sites import Site +from PLC.Persons import Person + +from PLC.Accessors.Factory import define_accessors, admin_roles, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +# NOTE. +# The 'Get' and 'Set' accessors defined here automagically create the corresponding TagType in the database +# for safety, some crucial tags are forced to be created at plc startup time, through the db-config.d mechanism +# + +# These following accessors are mostly of interest for implementing the +# The GetSliceFamily and GetNodeFlavour methods take into account various tags, +# esp. arch, fcdistro, pldistro, vref (for slices) and deployment (for nodes) +# as well as the global PLC_FLAVOUR config category +# in order to return all configuration details for a given node or slice + +### slice vref +define_accessors(current_module, Slice, "Vref", "vref", + "slice/config", "vserver reference image name", + set_roles=["admin","pi","user","node"], expose_in_api=True) +# this contains the actual script text +# if set, it supersedes 'initscript' +define_accessors(current_module, Slice, "InitscriptCode","initscript_code", + "slice/usertools", "Slice initialization script code", + set_roles=["admin","pi","user"], expose_in_api=True) +# this may contain a *name* that refers to the GetInitScripts +# it was initially designed to share scripts among slices +define_accessors(current_module, Slice, "Initscript","initscript", + "slice/usertools", "Slice initialization script name", + set_roles=["admin","pi","user"], expose_in_api=True) + +# BootManager might need to set any of these 3, so 'node' needs to be in set_roles +# needs 'pi' and 'tech' for managing their node +# needs 'user' for managing their slices +# needs 'admin' so the Set method is accessible +define_accessors(current_module, [Slice,Node], "Arch", "arch", + "node/slice/config", "node arch or slivers arch", + set_roles=all_roles, expose_in_api=True) +define_accessors(current_module, [Slice,Node], "Pldistro", "pldistro", + "node/slice/config/sfa", "PlanetLab distribution to use for node or slivers", + set_roles=all_roles, expose_in_api=True) +# fc of course historically was for fedora core +define_accessors(current_module, [Slice,Node], "Fcdistro", "fcdistro", + "node/slice/config", "Linux distribution to use for node or slivers", + set_roles=all_roles, expose_in_api=True) + +# the virtualization model to use - this is only used by the bootmanager for +# picking the right options e.g. prior to reinstalling +# see PLC_FLAVOUR_VIRT_MAP to see how the default gets computed +define_accessors(current_module, Node, "Virt", "virt", + "node/operation", 'typically "vs" or "lxc"', + set_roles=all_roles, expose_in_api=True) +# node deployment (alpha, beta, ...) +define_accessors(current_module, Node, "Deployment", "deployment", + "node/operation", 'typically "alpha", "beta", or "production"', + set_roles=["admin"], expose_in_api=True) +# extensions - leave this to admin only until the semantics is made more clear +define_accessors(current_module, Node, "Extensions", "extensions", + "node/config", "space-separated list of extensions to install", + set_roles=["admin"],expose_in_api=True) +# access HRN - this is the ideal definition of roles, even if AddNodeTag cannot handle this +define_accessors(current_module, [Node,Person,Slice,Site] , "Hrn", "hrn", + "node/person/slice/site/sfa", "SFA human readable name", + set_roles=all_roles, expose_in_api=True) + +# test nodes perform their installation from an uncompressed bootstrapfs +define_accessors(current_module, Node, "PlainBootstrapfs", "plain-bootstrapfs", + "node/config", "use uncompressed bootstrapfs when set", + set_roles=tech_roles) + +# the tags considered when creating a boot CD +define_accessors(current_module, Node, "Serial", "serial", + "node/bootcd", "serial to use when creating the boot CD -- see GetBootMedium", + set_roles=tech_roles) +define_accessors(current_module, Node, "Cramfs", "cramfs", + "node/bootcd", "boot CD to use cramfs if set -- see GetBootMedium", + set_roles=tech_roles) +define_accessors(current_module, Node, "Kvariant", "kvariant", + "node/bootcd", "the variant to use for creating the boot CD -- see GetBootMedium", + set_roles=tech_roles) +define_accessors(current_module, Node, "Kargs", "kargs", + "node/bootcd", "extra args to pass the kernel on the Boot CD -- see GetBootMedium", + set_roles=tech_roles) +define_accessors(current_module, Node, "NoHangcheck", "no-hangcheck", + "node/bootcd", "disable hangcheck on the boot CD if set -- see GetBootMedium", + set_roles=tech_roles) + +# interface +# xxx - don't expose yet in api interface and slices dont know how to use that yet +define_accessors(current_module, Interface, "Ifname", "ifname", + "interface/config", "linux name", + set_roles=tech_roles, expose_in_api=True) +define_accessors(current_module, Interface, "Driver", "driver", + "interface/config", "driver name", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Alias", "alias", + "interface/config", "interface alias", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Backdoor", "backdoor", + "interface/hidden", "For testing new settings", + set_roles=admin_roles) + +# we need to identify objects created through SFA interfaces +define_accessors(current_module, [Person,Slice,Site] , "SfaCreated", "sfa_created", + "person/slice/site/sfa", "Tag objects created through SFA interfaces", + set_roles=all_roles, expose_in_api=True) diff --git a/PLC/Accessors/Accessors_vicci.py b/PLC/Accessors/Accessors_vicci.py new file mode 100644 index 0000000..fc86b41 --- /dev/null +++ b/PLC/Accessors/Accessors_vicci.py @@ -0,0 +1,13 @@ +# Accessors for Vicci, used to toggle between Vicci simplified UI and full planetlab UI + +from PLC.Persons import Person + +from PLC.Accessors.Factory import define_accessors, admin_roles, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +define_accessors(current_module, Person, "Advanced", "advanced", + "person/vicci", "advanced mode", + get_roles=all_roles, set_roles=all_roles, expose_in_api=True) + diff --git a/PLC/Accessors/Accessors_wireless.py b/PLC/Accessors/Accessors_wireless.py new file mode 100644 index 0000000..3f3948a --- /dev/null +++ b/PLC/Accessors/Accessors_wireless.py @@ -0,0 +1,58 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Nodes import Node +from PLC.Interfaces import Interface +from PLC.Slices import Slice + +from PLC.Accessors.Factory import define_accessors, all_roles, tech_roles + +import sys +current_module = sys.modules[__name__] + +#### Wireless +define_accessors(current_module, Interface, "Mode", "mode", + "interface/wifi", "Wifi operation mode - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Essid", "essid", + "interface/wifi", "Wireless essid - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Nw", "nw", + "interface/wifi", "Wireless nw - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Freq", "freq", + "interface/wifi", "Wireless freq - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Channel", "channel", + "interface/wifi", "Wireless channel - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Sens", "sens", + "interface/wifi", "Wireless sens - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Rate", "rate", + "interface/wifi", "Wireless rate - see iwconfig", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Key", "key", + "interface/wifi", "Wireless key - see iwconfig key", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Key1", "key1", + "interface/wifi", "Wireless key1 - see iwconfig key[1]", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Key2", "key2", + "interface/wifi", "Wireless key2 - see iwconfig key[2]", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Key3", "key3", + "interface/wifi", "Wireless key3 - see iwconfig key[3]", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Key4", "key4", + "interface/wifi", "Wireless key4 - see iwconfig key[4]", + set_roles=tech_roles) +define_accessors(current_module, Interface, "SecurityMode", "securitymode", + "interface/wifi", "Wireless securitymode - see iwconfig enc", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Iwconfig", "iwconfig", + "interface/wifi", "Wireless iwconfig - see ifup-wireless", + set_roles=tech_roles) +define_accessors(current_module, Interface, "Iwpriv", "iwpriv", + "interface/wifi", "Wireless iwpriv - see ifup-wireless", + set_roles=tech_roles) diff --git a/PLC/Accessors/Factory.py b/PLC/Accessors/Factory.py new file mode 100644 index 0000000..fec0d63 --- /dev/null +++ b/PLC/Accessors/Factory.py @@ -0,0 +1,245 @@ +# +# Thierry Parmentelat - INRIA +# +from types import NoneType + +from PLC.Faults import * + +from PLC.Auth import Auth +from PLC.Parameter import Parameter, Mixed +from PLC.Method import Method +from PLC.Accessor import Accessor, AccessorSingleton + +from PLC.Nodes import Nodes, Node +from PLC.NodeTags import NodeTags, NodeTag +from PLC.Interfaces import Interfaces, Interface +from PLC.InterfaceTags import InterfaceTags, InterfaceTag +from PLC.Slices import Slices, Slice +from PLC.SliceTags import SliceTags, SliceTag +from PLC.Sites import Sites, Site +from PLC.SiteTags import SiteTags, SiteTag +from PLC.Persons import Persons, Person +from PLC.PersonTags import PersonTags, PersonTag + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +# known classes : { class -> details } +taggable_classes = { Node : {'table_class' : Nodes, + 'joins_class' : NodeTags, 'join_class' : NodeTag, + 'secondary_key': 'hostname'}, + Interface : {'table_class' : Interfaces, + 'joins_class': InterfaceTags, 'join_class': InterfaceTag, + 'secondary_key' : 'ip'}, + Slice: {'table_class' : Slices, + 'joins_class': SliceTags, 'join_class': SliceTag, + 'secondary_key':'name'}, + Site: {'table_class' : Sites, + 'joins_class': SiteTags, 'join_class': SiteTag, + 'secondary_key':'login_base'}, + Person: {'table_class' : Persons, + 'joins_class': PersonTags, 'join_class': PersonTag, + 'secondary_key':'email'}, + } + +# xxx probably defined someplace else +admin_roles = ['admin'] +person_roles = [ 'admin', 'pi', 'tech', 'user' ] +all_roles = [ 'admin', 'pi', 'tech', 'user', 'node' ] +tech_roles = [ 'admin', 'pi', 'tech' ] + +# +# generates 2 method classes: +# Get (auth, id_or_name) -> value or None +# Set (auth, id_or_name, value) -> value +# value is always a string, no cast nor typecheck for now +# +# The expose_in_api flag tells whether this tag may be handled +# through the Add/Get/Update methods as a native field +# +# note: set_roles get attached as 'roles' to the tagtype instance, +# also get_roles and set_roles get attached to the created methods +# +# in addition a convenience method like e.g. LocateNodeArch is defined +# in the Accessor class; its purpose is to retrieve the tag, or to create it if needed +# +# Legacy NOTE: +# prior to plcapi-5.0-19, this used to accept an additional argument +# named min_role_id; this was redundant and confusing, it has been +# removed, we now use set_roles to restrict write access on the corresponding tag + +# the convention here is that methodsuffix should be mixed case, e.g. MyStuff +# while tagname is expected to be lowercase +# you then end up with e.g. GetPersonMyStuff + +# the entry point accepts a single class or a list of classes +def define_accessors (module, objclasses, *args, **kwds): + if not isinstance(objclasses,list): + objclasses=[objclasses] + for objclass in objclasses: + define_accessors_ (module, objclass, *args, **kwds) + +# this is for one class +def define_accessors_ (module, objclass, methodsuffix, tagname, + category, description, + get_roles=all_roles, set_roles=admin_roles, + expose_in_api = False): + + if objclass not in taggable_classes: + try: + raise PLCInvalidArgument,"PLC.Accessors.Factory: unknown class %s"%objclass.__name__ + except: + raise PLCInvalidArgument,"PLC.Accessors.Factory: unknown class ??" + + # side-effect on, say, Node.tags, if required + if expose_in_api: + getattr(objclass,'tags')[tagname]=Parameter(str,"accessor") + + classname=objclass.__name__ + get_name = "Get" + classname + methodsuffix + set_name = "Set" + classname + methodsuffix + locator_name = "Locate" + classname + methodsuffix + + # accessor method objects under PLC.Method.Method + get_class = type (get_name, (Method,), + {"__doc__":"Accessor 'get' method designed for %s objects using tag %s"%\ + (classname,tagname)}) + set_class = type (set_name, (Method,), + {"__doc__":"Accessor 'set' method designed for %s objects using tag %s"%\ + (classname,tagname)}) + + # accepts + get_accepts = [ Auth () ] + primary_key=objclass.primary_key + secondary_key = taggable_classes[objclass]['secondary_key'] + get_accepts += [ Mixed (objclass.fields[primary_key], objclass.fields[secondary_key]) ] + # for set, idem set of arguments + one additional arg, the new value + set_accepts = get_accepts + [ Parameter (str,"New tag value") ] + + # returns + get_returns = Mixed (Parameter (str), Parameter(NoneType)) + set_returns = Parameter(NoneType) + + # store in classes + setattr(get_class,'roles',get_roles) + setattr(get_class,'accepts',get_accepts) + setattr(get_class,'returns', get_returns) +# that was useful for legacy method only, but we now need type_checking +# setattr(get_class,'skip_type_check',True) + + setattr(set_class,'roles',set_roles) + setattr(set_class,'accepts',set_accepts) + setattr(set_class,'returns', set_returns) +# that was useful for legacy method only, but we now need type_checking +# setattr(set_class,'skip_type_check',True) + + table_class = taggable_classes[objclass]['table_class'] + joins_class = taggable_classes[objclass]['joins_class'] + join_class = taggable_classes[objclass]['join_class'] + + # locate the tag and create it if needed + # this method is attached to the Accessor class + def tag_locator (self, enforce=False): + return self.locate_or_create_tag (tagname=tagname, + category=category, + description=description, + roles=set_roles, + enforce=enforce) + + # attach it to the Accessor class + Accessor.register_tag_locator(locator_name,tag_locator) + + # body of the get method + def get_call (self, auth, id_or_name): + # locate the tag, see above + tag_locator = Accessor.retrieve_tag_locator(locator_name) + tag_type = tag_locator(AccessorSingleton(self.api)) + tag_type_id=tag_type['tag_type_id'] + + filter = {'tag_type_id':tag_type_id} + if isinstance (id_or_name,int): + filter[primary_key]=id_or_name + else: + filter[secondary_key]=id_or_name + joins = joins_class (self.api,filter,['value']) + if not joins: + # xxx - we return None even if id_or_name is not valid + return None + else: + return joins[0]['value'] + + # attach it + setattr (get_class,"call",get_call) + + # body of the set method + def set_call (self, auth, id_or_name, value): + # locate the object + if isinstance (id_or_name, int): + filter={primary_key:id_or_name} + else: + filter={secondary_key:id_or_name} +# we need the full monty b/c of the permission system +# objs = table_class(self.api, filter,[primary_key,secondary_key]) + objs = table_class(self.api, filter) + if not objs: + raise PLCInvalidArgument, "Cannot set tag on %s %r"%(objclass.__name__,id_or_name) + # the object being tagged + obj=objs[0] + primary_id = obj[primary_key] + + # locate the tag, see above + tag_locator = Accessor.retrieve_tag_locator(locator_name) + tag_type = tag_locator(AccessorSingleton(self.api)) + tag_type_id = tag_type['tag_type_id'] + + # check authorization + if not hasattr(objclass,'caller_may_write_tag'): + raise PLCAuthenticationFailure, "class %s misses method caller_may_write_tag"%objclass.__name__ + obj.caller_may_write_tag (self.api,self.caller,tag_type) + + # locate the join object (e.g. NodeTag or similar) + filter = {'tag_type_id':tag_type_id} + if isinstance (id_or_name,int): + filter[primary_key]=id_or_name + else: + filter[secondary_key]=id_or_name + joins = joins_class (self.api,filter) + # setting to something non void + if value is not None: + if not joins: + join = join_class (self.api) + join['tag_type_id']=tag_type_id + join[primary_key]=primary_id + join['value']=value + join.sync() + else: + joins[0]['value']=value + joins[0].sync() + # providing an empty value means clean up + else: + if joins: + join=joins[0] + join.delete() + # log it + self.event_objects= { objclass.__name__ : [primary_id] } + self.message=objclass.__name__ + if secondary_key in objs[0]: + self.message += " %s "%objs[0][secondary_key] + else: + self.message += " %d "%objs[0][primary_key] + self.message += "updated" + return value + + # attach it + setattr (set_class,"call",set_call) + + # define in module + setattr(module,get_name,get_class) + setattr(module,set_name,set_class) + # add in .methods + try: + methods=getattr(module,'methods') + except: + methods=[] + methods += [get_name,set_name] + setattr(module,'methods',methods) diff --git a/PLC/Accessors/__init__.py b/PLC/Accessors/__init__.py new file mode 100644 index 0000000..e2f7db4 --- /dev/null +++ b/PLC/Accessors/__init__.py @@ -0,0 +1,11 @@ +# each module to define in "methods" the set of methods that it defines + +__all__ = """ +Accessors_standard +Accessors_myslice +Accessors_wireless +Accessors_sliverauth +Accessors_site +Accessors_ipv6 +Accessors_vicci +""".split() diff --git a/PLC/AddressTypes.py b/PLC/AddressTypes.py new file mode 100644 index 0000000..e4ad4de --- /dev/null +++ b/PLC/AddressTypes.py @@ -0,0 +1,72 @@ +# +# Functions for interacting with the address_types table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table + +class AddressType(Row): + """ + Representation of a row in the address_types table. To use, + instantiate with a dict of values. + """ + + table_name = 'address_types' + primary_key = 'address_type_id' + join_tables = ['address_address_type'] + fields = { + 'address_type_id': Parameter(int, "Address type identifier"), + 'name': Parameter(str, "Address type", max = 20), + 'description': Parameter(str, "Address type description", max = 254), + } + + def validate_name(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Address type must be specified" + + # Make sure address type does not already exist + conflicts = AddressTypes(self.api, [name]) + for address_type_id in conflicts: + if 'address_type_id' not in self or self['address_type_id'] != address_type_id: + raise PLCInvalidArgument, "Address type name already in use" + + return name + +class AddressTypes(Table): + """ + Representation of the address_types table in the database. + """ + + def __init__(self, api, address_type_filter = None, columns = None): + Table.__init__(self, api, AddressType, columns) + + sql = "SELECT %s FROM address_types WHERE True" % \ + ", ".join(self.columns) + + if address_type_filter is not None: + if isinstance(address_type_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), address_type_filter) + strs = filter(lambda x: isinstance(x, StringTypes), address_type_filter) + address_type_filter = Filter(AddressType.fields, {'address_type_id': ints, 'name': strs}) + sql += " AND (%s) %s" % address_type_filter.sql(api, "OR") + elif isinstance(address_type_filter, dict): + address_type_filter = Filter(AddressType.fields, address_type_filter) + sql += " AND (%s) %s" % address_type_filter.sql(api, "AND") + elif isinstance(address_type_filter, (int, long)): + address_type_filter = Filter(AddressType.fields, {'address_type_id': address_type_filter}) + sql += " AND (%s) %s" % address_type_filter.sql(api, "AND") + elif isinstance(address_type_filter, StringTypes): + address_type_filter = Filter(AddressType.fields, {'name': address_type_filter}) + sql += " AND (%s) %s" % address_type_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong address type filter %r"%address_type_filter + + self.selectall(sql) diff --git a/PLC/Addresses.py b/PLC/Addresses.py new file mode 100644 index 0000000..6d6acee --- /dev/null +++ b/PLC/Addresses.py @@ -0,0 +1,99 @@ +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table +from PLC.Filter import Filter +from PLC.AddressTypes import AddressType, AddressTypes + +class Address(Row): + """ + Representation of a row in the addresses table. To use, instantiate + with a dict of values. + """ + + table_name = 'addresses' + primary_key = 'address_id' + join_tables = ['address_address_type', 'site_address'] + fields = { + 'address_id': Parameter(int, "Address identifier"), + 'line1': Parameter(str, "Address line 1", max = 254), + 'line2': Parameter(str, "Address line 2", max = 254, nullok = True), + 'line3': Parameter(str, "Address line 3", max = 254, nullok = True), + 'city': Parameter(str, "City", max = 254), + 'state': Parameter(str, "State or province", max = 254), + 'postalcode': Parameter(str, "Postal code", max = 64), + 'country': Parameter(str, "Country", max = 128), + 'address_type_ids': Parameter([int], "Address type identifiers"), + 'address_types': Parameter([str], "Address types"), + } + + def add_address_type(self, address_type, commit = True): + """ + Add address type to existing address. + """ + + assert 'address_id' in self + assert isinstance(address_type, AddressType) + assert 'address_type_id' in address_type + + address_id = self['address_id'] + address_type_id = address_type['address_type_id'] + + if address_type_id not in self['address_type_ids']: + assert address_type['name'] not in self['address_types'] + + self.api.db.do("INSERT INTO address_address_type (address_id, address_type_id)" \ + " VALUES(%(address_id)d, %(address_type_id)d)", + locals()) + + if commit: + self.api.db.commit() + + self['address_type_ids'].append(address_type_id) + self['address_types'].append(address_type['name']) + + def remove_address_type(self, address_type, commit = True): + """ + Add address type to existing address. + """ + + assert 'address_id' in self + assert isinstance(address_type, AddressType) + assert 'address_type_id' in address_type + + address_id = self['address_id'] + address_type_id = address_type['address_type_id'] + + if address_type_id in self['address_type_ids']: + assert address_type['name'] in self['address_types'] + + self.api.db.do("DELETE FROM address_address_type" \ + " WHERE address_id = %(address_id)d" \ + " AND address_type_id = %(address_type_id)d", + locals()) + + if commit: + self.api.db.commit() + + self['address_type_ids'].remove(address_type_id) + self['address_types'].remove(address_type['name']) + +class Addresses(Table): + """ + Representation of row(s) from the addresses table in the + database. + """ + + def __init__(self, api, address_filter = None, columns = None): + Table.__init__(self, api, Address, columns) + + sql = "SELECT %s FROM view_addresses WHERE True" % \ + ", ".join(self.columns) + + if address_filter is not None: + if isinstance(address_filter, (list, tuple, set, int, long)): + address_filter = Filter(Address.fields, {'address_id': address_filter}) + elif isinstance(address_filter, dict): + address_filter = Filter(Address.fields, address_filter) + sql += " AND (%s) %s" % address_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/Auth.py b/PLC/Auth.py new file mode 100644 index 0000000..3be444b --- /dev/null +++ b/PLC/Auth.py @@ -0,0 +1,333 @@ +# +# PLCAPI authentication parameters +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import crypt +try: + from hashlib import sha1 as sha +except ImportError: + import sha +import hmac +import time +import os + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Persons +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.Sessions import Session, Sessions +from PLC.Peers import Peer, Peers +from PLC.Keys import Keys +from PLC.Boot import notify_owners + +class Auth(Parameter): + """ + Base class for all API authentication methods, as well as a class + that can be used to represent all supported API authentication + methods. + """ + + def __init__(self, auth = None): + if auth is None: + auth = {'AuthMethod': Parameter(str, "Authentication method to use", optional = False)} + Parameter.__init__(self, auth, "API authentication structure") + + def check(self, method, auth, *args): + global auth_methods + + # Method.type_check() should have checked that all of the + # mandatory fields were present. + assert 'AuthMethod' in auth + + if auth['AuthMethod'] in auth_methods: + expected = auth_methods[auth['AuthMethod']]() + else: + sm = "'" + "', '".join(auth_methods.keys()) + "'" + raise PLCInvalidArgument("must be " + sm, "AuthMethod") + + # Re-check using the specified authentication method + method.type_check("auth", auth, expected, (auth,) + args) + +class GPGAuth(Auth): + """ + Proposed PlanetLab federation authentication structure. + """ + + def __init__(self): + Auth.__init__(self, { + 'AuthMethod': Parameter(str, "Authentication method to use, always 'gpg'", optional = False), + 'name': Parameter(str, "Peer or user name", optional = False), + 'signature': Parameter(str, "Message signature", optional = False) + }) + + def check(self, method, auth, *args): + try: + peers = Peers(method.api, [auth['name']]) + if peers: + if 'peer' not in method.roles: + raise PLCAuthenticationFailure, "GPGAuth: Not allowed to call method, missing 'peer' role" + + method.caller = peer = peers[0] + gpg_keys = [ peer['key'] ] + else: + persons = Persons(method.api, {'email': auth['name'], 'enabled': True, 'peer_id': None}) + if not persons: + raise PLCAuthenticationFailure, "GPGAuth: No such user '%s'" % auth['name'] + + method.caller = person = persons[0] + if not set(person['roles']).intersection(method.roles): + raise PLCAuthenticationFailure, "GPGAuth: Not allowed to call method, missing role" + + keys = Keys(method.api, {'key_id': person['key_ids'], 'key_type': "gpg", 'peer_id': None}) + gpg_keys = [ key['key'] for key in keys ] + + if not gpg_keys: + raise PLCAuthenticationFailure, "GPGAuth: No GPG key on record for peer or user '%s'"%auth['name'] + + for gpg_key in gpg_keys: + try: + from PLC.GPG import gpg_verify + gpg_verify(args, gpg_key, auth['signature'], method.name) + return + except PLCAuthenticationFailure, fault: + pass + + raise fault + + except PLCAuthenticationFailure, fault: + # XXX Send e-mail + raise fault + +class SessionAuth(Auth): + """ + Secondary authentication method. After authenticating with a + primary authentication method, call GetSession() to generate a + session key that may be used for subsequent calls. + """ + + def __init__(self): + Auth.__init__(self, { + 'AuthMethod': Parameter(str, "Authentication method to use, always 'session'", optional = False), + 'session': Parameter(str, "Session key", optional = False) + }) + + def check(self, method, auth, *args): + # Method.type_check() should have checked that all of the + # mandatory fields were present. + assert auth.has_key('session') + + # Get session record + sessions = Sessions(method.api, [auth['session']], expires = None) + if not sessions: + raise PLCAuthenticationFailure, "SessionAuth: No such session" + session = sessions[0] + + try: + if session['node_id'] is not None: + nodes = Nodes(method.api, {'node_id': session['node_id'], 'peer_id': None}) + if not nodes: + raise PLCAuthenticationFailure, "SessionAuth: No such node" + node = nodes[0] + + if 'node' not in method.roles: + # using PermissionDenied rather than AuthenticationFailure here because + # if that fails we don't want to delete the session.. + raise PLCPermissionDenied, "SessionAuth: Not allowed to call method %s, missing 'node' role"%method.name + + method.caller = node + + elif session['person_id'] is not None and session['expires'] > time.time(): + persons = Persons(method.api, {'person_id': session['person_id'], 'enabled': True, 'peer_id': None}) + if not persons: + raise PLCAuthenticationFailure, "SessionAuth: No such enabled account" + person = persons[0] + + if not set(person['roles']).intersection(method.roles): + method_message="method %s has roles [%s]"%(method.name,','.join(method.roles)) + person_message="caller %s has roles [%s]"%(person['email'],','.join(person['roles'])) + # not PLCAuthenticationFailure b/c that would end the session.. + raise PLCPermissionDenied, "SessionAuth: missing role, %s -- %s"%(method_message,person_message) + + method.caller = person + + else: + raise PLCAuthenticationFailure, "SessionAuth: Invalid session" + + except PLCAuthenticationFailure, fault: + session.delete() + raise fault + +class BootAuth(Auth): + """ + PlanetLab version 3.x node authentication structure. Used by the + Boot Manager to make authenticated calls to the API based on a + unique node key or boot nonce value. + + The original parameter serialization code did not define the byte + encoding of strings, or the string encoding of all other types. We + define the byte encoding to be UTF-8, and the string encoding of + all other types to be however Python version 2.3 unicode() encodes + them. + """ + + def __init__(self): + Auth.__init__(self, { + 'AuthMethod': Parameter(str, "Authentication method to use, always 'hmac'", optional = False), + 'node_id': Parameter(int, "Node identifier", optional = False), + 'value': Parameter(str, "HMAC of node key and method call", optional = False) + }) + + def canonicalize(self, args): + values = [] + + for arg in args: + if isinstance(arg, list) or isinstance(arg, tuple): + # The old implementation did not recursively handle + # lists of lists. But neither did the old API itself. + values += self.canonicalize(arg) + elif isinstance(arg, dict): + # Yes, the comments in the old implementation are + # misleading. Keys of dicts are not included in the + # hash. + values += self.canonicalize(arg.values()) + else: + # We use unicode() instead of str(). + values.append(unicode(arg)) + + return values + + def check(self, method, auth, *args): + # Method.type_check() should have checked that all of the + # mandatory fields were present. + assert auth.has_key('node_id') + + if 'node' not in method.roles: + raise PLCAuthenticationFailure, "BootAuth: Not allowed to call method, missing 'node' role" + + try: + nodes = Nodes(method.api, {'node_id': auth['node_id'], 'peer_id': None}) + if not nodes: + raise PLCAuthenticationFailure, "BootAuth: No such node" + node = nodes[0] + + # Jan 2011 : removing support for old boot CDs + if node['key']: + key = node['key'] + else: + raise PLCAuthenticationFailure, "BootAuth: No node key" + + # Yes, this is the "canonicalization" method used. + args = self.canonicalize(args) + args.sort() + msg = "[" + "".join(args) + "]" + + # We encode in UTF-8 before calculating the HMAC, which is + # an 8-bit algorithm. + # python 2.6 insists on receiving a 'str' as opposed to a 'unicode' + digest = hmac.new(str(key), msg.encode('utf-8'), sha).hexdigest() + + if digest != auth['value']: + raise PLCAuthenticationFailure, "BootAuth: Call could not be authenticated" + + method.caller = node + + except PLCAuthenticationFailure, fault: + if nodes: + notify_owners(method, node, 'authfail', include_pis = True, include_techs = True, fault = fault) + raise fault + +class AnonymousAuth(Auth): + """ + PlanetLab version 3.x anonymous authentication structure. + """ + + def __init__(self): + Auth.__init__(self, { + 'AuthMethod': Parameter(str, "Authentication method to use, always 'anonymous'", False), + }) + + def check(self, method, auth, *args): + if 'anonymous' not in method.roles: + raise PLCAuthenticationFailure, "AnonymousAuth: method cannot be called anonymously" + + method.caller = None + +class PasswordAuth(Auth): + """ + PlanetLab version 3.x password authentication structure. + """ + + def __init__(self): + Auth.__init__(self, { + 'AuthMethod': Parameter(str, "Authentication method to use, always 'password' or 'capability'", optional = False), + 'Username': Parameter(str, "PlanetLab username, typically an e-mail address", optional = False), + 'AuthString': Parameter(str, "Authentication string, typically a password", optional = False), + }) + + def check(self, method, auth, *args): + # Method.type_check() should have checked that all of the + # mandatory fields were present. + assert auth.has_key('Username') + + # Get record (must be enabled) + persons = Persons(method.api, {'email': auth['Username'].lower(), 'enabled': True, 'peer_id': None}) + if len(persons) != 1: + raise PLCAuthenticationFailure, "PasswordAuth: No such account" + + person = persons[0] + + if auth['Username'] == method.api.config.PLC_API_MAINTENANCE_USER: + # "Capability" authentication, whatever the hell that was + # supposed to mean. It really means, login as the special + # "maintenance user" using password authentication. Can + # only be used on particular machines (those in a list). + sources = method.api.config.PLC_API_MAINTENANCE_SOURCES.split() + if method.source is not None and method.source[0] not in sources: + raise PLCAuthenticationFailure, "PasswordAuth: Not allowed to login to maintenance account" + + # Not sure why this is not stored in the DB + password = method.api.config.PLC_API_MAINTENANCE_PASSWORD + + if auth['AuthString'] != password: + raise PLCAuthenticationFailure, "PasswordAuth: Maintenance account password verification failed" + else: + # Compare encrypted plaintext against encrypted password stored in the DB + plaintext = auth['AuthString'].encode(method.api.encoding) + password = person['password'] + + # Protect against blank passwords in the DB + if password is None or password[:12] == "" or \ + crypt.crypt(plaintext, password[:12]) != password: + raise PLCAuthenticationFailure, "PasswordAuth: Password verification failed" + + if not set(person['roles']).intersection(method.roles): + method_message="method %s has roles [%s]"%(method.name,','.join(method.roles)) + person_message="caller %s has roles [%s]"%(person['email'],','.join(person['roles'])) + raise PLCAuthenticationFailure, "PasswordAuth: missing role, %s -- %s"%(method_message,person_message) + + method.caller = person + +auth_methods = {'session': SessionAuth, + 'password': PasswordAuth, + 'capability': PasswordAuth, + 'gpg': GPGAuth, + 'hmac': BootAuth, + 'hmac_dummybox': BootAuth, + 'anonymous': AnonymousAuth} + +path = os.path.dirname(__file__) + "/Auth.d" +try: + extensions = os.listdir(path) +except OSError, e: + extensions = [] +for extension in extensions: + if extension.startswith("."): + continue + if not extension.endswith(".py"): + continue + execfile("%s/%s" % (path, extension)) +del extensions diff --git a/PLC/AuthorizeHelpers.py b/PLC/AuthorizeHelpers.py new file mode 100644 index 0000000..ec8daaf --- /dev/null +++ b/PLC/AuthorizeHelpers.py @@ -0,0 +1,209 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Persons import Persons,Person +from PLC.Sites import Sites,Site +from PLC.Nodes import Nodes,Node +from PLC.Interfaces import Interfaces,Interface +from PLC.Slices import Slices,Slice + +class AuthorizeHelpers: + + @staticmethod + def person_tag_type_common_roles (api, person, tag_type): + return list (set(person['roles']).intersection(set(tag_type['roles']))) + + @staticmethod + def caller_may_access_tag_type (api, caller, tag_type): + if isinstance(caller,Person): + return len(AuthorizeHelpers.person_tag_type_common_roles(api,caller,tag_type))!=0 + elif isinstance(caller,Node): + return 'node' in tag_type['roles'] + else: + raise PLCInvalidArgument, "caller_may_access_tag_type - unexpected arg" + + @staticmethod + def person_may_access_person (api, caller_person, subject_person): + # keep it simple for now - could be a bit more advanced for PIs maybe + try: return caller_person['person_id'] == subject_person['person_id'] + except: return False + + @staticmethod + def person_in_site (api, person, site): + return site['site_id'] in person['site_ids'] + + @staticmethod + def person_in_slice (api, caller_person, slice): + return caller_person['person_id'] in slice['person_ids'] + + @staticmethod + def slice_in_site (api, slice, site): + return slice['site_id']==site['site_id'] + + @staticmethod + def node_id_in_slice (api, node_id_or_hostname, slice): + if isinstance (node_id_or_hostname,int): + return node_id_or_hostname in slice['node_ids'] + else: + try: return Nodes(api,node_id_or_hostname)[0]['node_id'] in slice['node_ids'] + except:return False + + @staticmethod + def node_in_slice (api, caller_node, slice): + return caller_node['node_id'] in slice['node_ids'] + + @staticmethod + def node_id_in_site (api, node_id_or_hostname, site): + if isinstance (node_id_or_hostname,int): + return node_id_or_hostname in site['node_ids'] + else: + try: return Nodes(api,node_id_or_hostname)[0]['node_id'] in site['node_ids'] + except:return False + + + @staticmethod + def node_match_id (api, node, node_id_or_hostname): + if isinstance (node_id_or_hostname,int): + return node['node_id']==node_id_or_hostname + else: + return node['hostname']==node_id_or_hostname + + @staticmethod + def interface_belongs_to_person (api,interface, person): + try: + node=Nodes(api,[interface['node_id']])[0] + return AuthorizeHelpers.node_belongs_to_person (api, node, person) + except: + return False + + @staticmethod + def node_belongs_to_person (api, node, person): + try: + site=Sites(api,[node['site_id']])[0] + return AuthorizeHelpers.person_in_site (api, person, site) + except: + import traceback + return False + + # does the slice belong to the site that the (pi) user is in ? + @staticmethod + def slice_belongs_to_pi (api, slice, pi): + return slice['site_id'] in pi['site_ids'] + + @staticmethod + def caller_is_node (api, caller, node): + return 'node_id' in caller and caller['node_id']==node['node_id'] + + +# authorization methods - check if a given caller can set tag on this object +# called in {Add,Update,Delete}Tags methods, and in the accessors created in factory +# attach these as .caller_may_write_tag so accessors can find it + +def caller_may_write_node_tag (node, api, caller, tag_type): + if 'roles' in caller and 'admin' in caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (api, caller, tag_type): + raise PLCPermissionDenied, "Role mismatch for writing tag %s"%(tag_type['tagname']) + elif AuthorizeHelpers.node_belongs_to_person (api, node, caller): + pass + elif AuthorizeHelpers.caller_is_node (api, caller, node): + pass + else: + raise PLCPermissionDenied, "Writing node tag: must belong in the same site as %s"%\ + (node['hostname']) + +setattr(Node,'caller_may_write_tag',caller_may_write_node_tag) + + +def caller_may_write_interface_tag (interface, api, caller, tag_type): + if 'roles' in caller and 'admin' in caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (api, caller, tag_type): + raise PLCPermissionDenied, "Role mismatch for writing tag %s"%(tag_type['tagname']) + elif AuthorizeHelpers.interface_belongs_to_person (api, interface, caller): + pass + else: + raise PLCPermissionDenied, "Writing interface tag: must belong in the same site as %s"%\ + (interface['ip']) + +setattr(Interface,'caller_may_write_tag',caller_may_write_interface_tag) + + +def caller_may_write_site_tag (site, api, caller, tag_type): + if 'roles' in caller and 'admin' in caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (api, caller, tag_type): + raise PLCPermissionDenied, "Role mismatch for writing tag %s"%(tag_type['tagname']) + elif AuthorizeHelpers.person_in_site (api, caller, site): + pass + else: + raise PLCPermissionDenied, "Writing site tag: must be part of site"%site['login_base'] + +setattr(Site,'caller_may_write_tag',caller_may_write_site_tag) + + +def caller_may_write_person_tag (person, api, caller, tag_type): + if 'roles' in caller and 'admin' in caller['roles']: + pass + # user can change tags on self + elif AuthorizeHelpers.person_may_access_person (api, caller, person): + pass + else: + raise PLCPermissionDenied, "Writing person tag: you can only change your own tags" + +setattr(Person,'caller_may_write_tag',caller_may_write_person_tag) + + +def caller_may_write_slice_tag (slice, api, caller, tag_type, node_id_or_hostname=None, nodegroup_id_or_name=None): + granted=False + reason="" + if 'roles' in caller and 'admin' in caller['roles']: + granted=True + # does caller have right role(s) ? this knows how to deal with caller being a node + elif not AuthorizeHelpers.caller_may_access_tag_type (api, caller, tag_type): + reason="caller may not access this tag type" + granted=False + # node callers: check the node is in the slice + elif isinstance(caller, Node): + # nodes can only set their own sliver tags + if node_id_or_hostname is None: + reason="wrong node caller" + granted=False + elif not AuthorizeHelpers.node_match_id (api, caller, node_id_or_hostname): + reason="node mismatch" + granted=False + elif not AuthorizeHelpers.node_in_slice (api, caller, slice): + reason="slice not in node" + granted=False + else: + granted=True + # caller is a non-admin person + else: + # only admins can handle slice tags on a nodegroup + if nodegroup_id_or_name: + raise PLCPermissionDenied, "Cannot set slice tag %s on nodegroup - restricted to admins"%\ + (tag_type['tagname']) + # if a node is specified it is expected to be in the slice + if node_id_or_hostname: + if not AuthorizeHelpers.node_id_in_slice (api, node_id_or_hostname, slice): + raise PLCPermissionDenied, "%s, node must be in slice when setting sliver tag" + # try all roles to find a match - tech are ignored b/c not in AddSliceTag.roles anyways + for role in AuthorizeHelpers.person_tag_type_common_roles(api,caller,tag_type): + reason="user not in slice; or slice does not belong to pi's site" + # regular users need to be in the slice + if role=='user': + if AuthorizeHelpers.person_in_slice(api, caller, slice): + granted=True ; break + # for convenience, pi's can tweak all the slices in their site + elif role=='pi': + if AuthorizeHelpers.slice_belongs_to_pi (api, slice, caller): + granted=True ; break + if not granted: +# try: print "DEBUG: caller=%s"%caller +# except: pass + raise PLCPermissionDenied, "Cannot write slice tag %s - %s"%(tag_type['tagname'],reason) + +setattr(Slice,'caller_may_write_tag',caller_may_write_slice_tag) + + diff --git a/PLC/Boot.py b/PLC/Boot.py new file mode 100644 index 0000000..001211e --- /dev/null +++ b/PLC/Boot.py @@ -0,0 +1,59 @@ +# +# Boot Manager support +# +# Mark Huang +# Copyright (C) 2007 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Logger import logger +from PLC.Messages import Message, Messages +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.sendmail import sendmail + +def notify_owners(method, node, message_id, + include_pis = False, include_techs = False, include_support = False, + fault = None): + messages = Messages(method.api, [message_id], enabled = True) + if not messages: + logger.error("No such message template '%s'" % message_id) + return 1 + message = messages[0] + + To = [] + + if method.api.config.PLC_MAIL_BOOT_ADDRESS: + To.append(("Boot Messages", method.api.config.PLC_MAIL_BOOT_ADDRESS)) + + if include_support and method.api.config.PLC_MAIL_SUPPORT_ADDRESS: + To.append(("%s Support" % method.api.config.PLC_NAME, + method.api.config.PLC_MAIL_SUPPORT_ADDRESS)) + + if include_pis or include_techs: + sites = Sites(method.api, [node['site_id']]) + if not sites: + raise PLCAPIError, "No site associated with node" + site = sites[0] + + persons = Persons(method.api, site['person_ids']) + for person in persons: + if (include_pis and 'pi' in person['roles'] and person['enabled']) or \ + (include_techs and 'tech' in person['roles'] and person['enabled']) : + To.append(("%s %s" % (person['first_name'], person['last_name']), person['email'])) + + # Send email + params = {'node_id': node['node_id'], + 'hostname': node['hostname'], + 'PLC_WWW_HOST': method.api.config.PLC_WWW_HOST, + 'PLC_WWW_SSL_PORT': method.api.config.PLC_WWW_SSL_PORT, + 'fault': fault} + + sendmail(method.api, To = To, + Subject = message['subject'] % params, + Body = message['template'] % params) + + # Logging variables + method.object_type = "Node" + method.object_ids = [node['node_id']] + method.message = "Sent message %s" % message_id diff --git a/PLC/BootStates.py b/PLC/BootStates.py new file mode 100644 index 0000000..2bbcf0d --- /dev/null +++ b/PLC/BootStates.py @@ -0,0 +1,51 @@ +# +# Functions for interacting with the boot_states table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class BootState(Row): + """ + Representation of a row in the boot_states table. To use, + instantiate with a dict of values. + """ + + table_name = 'boot_states' + primary_key = 'boot_state' + join_tables = ['nodes'] + fields = { + 'boot_state': Parameter(str, "Boot state", max = 20), + } + + def validate_boot_state(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Boot state must be specified" + + # Make sure boot state does not already exist + conflicts = BootStates(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Boot state name already in use" + + return name + +class BootStates(Table): + """ + Representation of the boot_states table in the database. + """ + + def __init__(self, api, boot_states = None): + Table.__init__(self, api, BootState) + + sql = "SELECT %s FROM boot_states" % \ + ", ".join(BootState.fields) + + if boot_states: + sql += " WHERE boot_state IN (%s)" % ", ".join( [ api.db.quote (s) for s in boot_states ] ) + + self.selectall(sql) diff --git a/PLC/ConfFiles.py b/PLC/ConfFiles.py new file mode 100644 index 0000000..32a1b54 --- /dev/null +++ b/PLC/ConfFiles.py @@ -0,0 +1,153 @@ +# +# Functions for interacting with the conf_files table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Nodes import Node, Nodes +from PLC.NodeGroups import NodeGroup, NodeGroups + +class ConfFile(Row): + """ + Representation of a row in the conf_files table. To use, + instantiate with a dict of values. + """ + + table_name = 'conf_files' + primary_key = 'conf_file_id' + join_tables = ['conf_file_node', 'conf_file_nodegroup'] + fields = { + 'conf_file_id': Parameter(int, "Configuration file identifier"), + 'enabled': Parameter(bool, "Configuration file is active"), + 'source': Parameter(str, "Relative path on the boot server where file can be downloaded", max = 255), + 'dest': Parameter(str, "Absolute path where file should be installed", max = 255), + 'file_permissions': Parameter(str, "chmod(1) permissions", max = 20), + 'file_owner': Parameter(str, "chown(1) owner", max = 50), + 'file_group': Parameter(str, "chgrp(1) owner", max = 50), + 'preinstall_cmd': Parameter(str, "Shell command to execute prior to installing", max = 1024, nullok = True), + 'postinstall_cmd': Parameter(str, "Shell command to execute after installing", max = 1024, nullok = True), + 'error_cmd': Parameter(str, "Shell command to execute if any error occurs", max = 1024, nullok = True), + 'ignore_cmd_errors': Parameter(bool, "Install file anyway even if an error occurs"), + 'always_update': Parameter(bool, "Always attempt to install file even if unchanged"), + 'node_ids': Parameter(int, "List of nodes linked to this file"), + 'nodegroup_ids': Parameter(int, "List of node groups linked to this file"), + } + + def add_node(self, node, commit = True): + """ + Add configuration file to node. + """ + + assert 'conf_file_id' in self + assert isinstance(node, Node) + assert 'node_id' in node + + conf_file_id = self['conf_file_id'] + node_id = node['node_id'] + + if node_id not in self['node_ids']: + self.api.db.do("INSERT INTO conf_file_node (conf_file_id, node_id)" \ + " VALUES(%(conf_file_id)d, %(node_id)d)", + locals()) + + if commit: + self.api.db.commit() + + self['node_ids'].append(node_id) + node['conf_file_ids'].append(conf_file_id) + + def remove_node(self, node, commit = True): + """ + Remove configuration file from node. + """ + + assert 'conf_file_id' in self + assert isinstance(node, Node) + assert 'node_id' in node + + conf_file_id = self['conf_file_id'] + node_id = node['node_id'] + + if node_id in self['node_ids']: + self.api.db.do("DELETE FROM conf_file_node" \ + " WHERE conf_file_id = %(conf_file_id)d" \ + " AND node_id = %(node_id)d", + locals()) + + if commit: + self.api.db.commit() + + self['node_ids'].remove(node_id) + node['conf_file_ids'].remove(conf_file_id) + + def add_nodegroup(self, nodegroup, commit = True): + """ + Add configuration file to node group. + """ + + assert 'conf_file_id' in self + assert isinstance(nodegroup, NodeGroup) + assert 'nodegroup_id' in nodegroup + + conf_file_id = self['conf_file_id'] + nodegroup_id = nodegroup['nodegroup_id'] + + if nodegroup_id not in self['nodegroup_ids']: + self.api.db.do("INSERT INTO conf_file_nodegroup (conf_file_id, nodegroup_id)" \ + " VALUES(%(conf_file_id)d, %(nodegroup_id)d)", + locals()) + + if commit: + self.api.db.commit() + + self['nodegroup_ids'].append(nodegroup_id) + nodegroup['conf_file_ids'].append(conf_file_id) + + def remove_nodegroup(self, nodegroup, commit = True): + """ + Remove configuration file from node group. + """ + + assert 'conf_file_id' in self + assert isinstance(nodegroup, NodeGroup) + assert 'nodegroup_id' in nodegroup + + conf_file_id = self['conf_file_id'] + nodegroup_id = nodegroup['nodegroup_id'] + + if nodegroup_id in self['nodegroup_ids']: + self.api.db.do("DELETE FROM conf_file_nodegroup" \ + " WHERE conf_file_id = %(conf_file_id)d" \ + " AND nodegroup_id = %(nodegroup_id)d", + locals()) + + if commit: + self.api.db.commit() + + self['nodegroup_ids'].remove(nodegroup_id) + nodegroup['conf_file_ids'].remove(conf_file_id) + +class ConfFiles(Table): + """ + Representation of the conf_files table in the database. + """ + + def __init__(self, api, conf_file_filter = None, columns = None): + Table.__init__(self, api, ConfFile, columns) + + sql = "SELECT %s FROM view_conf_files WHERE True" % \ + ", ".join(self.columns) + + if conf_file_filter is not None: + if isinstance(conf_file_filter, (list, tuple, set, int, long)): + conf_file_filter = Filter(ConfFile.fields, {'conf_file_id': conf_file_filter}) + elif isinstance(conf_file_filter, dict): + conf_file_filter = Filter(ConfFile.fields, conf_file_filter) + sql += " AND (%s) %s" % conf_file_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/Config.py b/PLC/Config.py new file mode 100644 index 0000000..c1dbe70 --- /dev/null +++ b/PLC/Config.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +# +# PLCAPI configuration store. Supports XML-based configuration file +# format exported by MyPLC. +# +# Mark Huang +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import os +import sys + +from PLC.Faults import * +from PLC.Debug import profile + +# If we have been checked out into a directory at the same +# level as myplc, where plc_config.py lives. If we are in a +# MyPLC environment, plc_config.py has already been installed +# in site-packages. +myplc = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + \ + os.sep + "myplc" + +class Config: + """ + Parse the bash/Python/PHP version of the configuration file. Very + fast but no type conversions. + """ + + def __init__(self, file = "/etc/planetlab/plc_config"): + # Load plc_config + try: + execfile(file, self.__dict__) + except: + # Try myplc directory + try: + execfile(myplc + os.sep + "plc_config", self.__dict__) + except: + raise PLCAPIError("Could not find plc_config in " + \ + file + ", " + \ + myplc + os.sep + "plc_config") + +class XMLConfig: + """ + Parse the XML configuration file directly. Takes longer but is + presumably more accurate. + """ + + def __init__(self, file = "/etc/planetlab/plc_config.xml"): + try: + from plc_config import PLCConfiguration + except: + sys.path.append(myplc) + from plc_config import PLCConfiguration + + # Load plc_config.xml + try: + cfg = PLCConfiguration(file) + except: + # Try myplc directory + try: + cfg = PLCConfiguration(myplc + os.sep + "plc_config.xml") + except: + raise PLCAPIError("Could not find plc_config.xml in " + \ + file + ", " + \ + myplc + os.sep + "plc_config.xml") + + for (category, variablelist) in cfg.variables().values(): + for variable in variablelist.values(): + # Try to cast each variable to an appropriate Python + # type. + if variable['type'] == "int": + value = int(variable['value']) + elif variable['type'] == "double": + value = float(variable['value']) + elif variable['type'] == "boolean": + if variable['value'] == "true": + value = True + else: + value = False + else: + value = variable['value'] + + # Variables are split into categories such as + # "plc_api", "plc_db", etc. Within each category are + # variables such as "host", "port", etc. For backward + # compatibility, refer to variables by their shell + # names. + shell_name = category['id'].upper() + "_" + variable['id'].upper() + setattr(self, shell_name, value) + +if __name__ == '__main__': + import pprint + pprint = pprint.PrettyPrinter() + pprint.pprint(Config().__dict__.items()) diff --git a/PLC/Debug.py b/PLC/Debug.py new file mode 100644 index 0000000..b120871 --- /dev/null +++ b/PLC/Debug.py @@ -0,0 +1,44 @@ +# log system for PLCAPI +import time +import sys +import syslog + +from PLC.Logger import logger + +def profile(callable): + """ + Prints the runtime of the specified callable. Use as a decorator, e.g., + + @profile + def foo(...): + ... + + Or, equivalently, + + def foo(...): + ... + foo = profile(foo) + + Or inline: + + result = profile(foo)(...) + """ + + def wrapper(*args, **kwds): + start = time.time() + result = callable(*args, **kwds) + end = time.time() + args = map(str, args) + args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.items()] + logger.info("%s (%s): %f s" % (callable.__name__, ", ".join(args), end - start)) + return result + + return wrapper + +if __name__ == "__main__": + def sleep(seconds = 1): + time.sleep(seconds) + + sleep = profile(sleep) + + sleep(1) diff --git a/PLC/EventObjects.py b/PLC/EventObjects.py new file mode 100644 index 0000000..5f046bf --- /dev/null +++ b/PLC/EventObjects.py @@ -0,0 +1,56 @@ +# +# Functions for interacting with the events table in the database +# +# Tony Mack +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table + +class EventObject(Row): + """ + Representation of a row in the event_object table. + """ + + table_name = 'event_object' + primary_key = 'event_id' + fields = { + 'event_id': Parameter(int, "Event identifier"), + 'person_id': Parameter(int, "Identifier of person responsible for event, if any"), + 'node_id': Parameter(int, "Identifier of node responsible for event, if any"), + 'fault_code': Parameter(int, "Event fault code"), + 'call_name': Parameter(str, "Call responsible for this event"), + 'call': Parameter(str, "Call responsible for this event, including paramters"), + 'message': Parameter(str, "High level description of this event"), + 'runtime': Parameter(float, "Runtime of event"), + 'time': Parameter(int, "Date and time that the event took place, in seconds since UNIX epoch", ro = True), + 'object_id': Parameter(int, "ID of objects affected by this event"), + 'object_type': Parameter(str, "What type of object is this event affecting") + } + +class EventObjects(Table): + """ + Representation of row(s) from the event_object table in the database. + """ + + def __init__(self, api, event_filter = None, columns = None): + Table.__init__(self, api, EventObject, columns) + + sql = "SELECT %s FROM view_event_objects WHERE True" % \ + ", ".join(self.columns) + + if event_filter is not None: + if isinstance(event_filter, (list, tuple, set, int, long)): + event_filter = Filter(EventObject.fields, {'event_id': event_filter}) + sql += " AND (%s) %s" % event_filter.sql(api, "OR") + elif isinstance(event_filter, dict): + event_filter = Filter(EventObject.fields, event_filter) + sql += " AND (%s) %s" % event_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong event object filter %r"%event_filter + + self.selectall(sql) diff --git a/PLC/Events.py b/PLC/Events.py new file mode 100644 index 0000000..cb5d0e2 --- /dev/null +++ b/PLC/Events.py @@ -0,0 +1,77 @@ +# +# Functions for interacting with the events table in the database +# +# Tony Mack +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table + +class Event(Row): + """ + Representation of a row in the events table. + """ + + table_name = 'events' + primary_key = 'event_id' + fields = { + 'event_id': Parameter(int, "Event identifier"), + 'person_id': Parameter(int, "Identifier of person responsible for event, if any"), + 'node_id': Parameter(int, "Identifier of node responsible for event, if any"), + 'auth_type': Parameter(int, "Type of auth used. i.e. AuthMethod"), + 'fault_code': Parameter(int, "Event fault code"), + 'call_name': Parameter(str, "Call responsible for this event"), + 'call': Parameter(str, "Call responsible for this event, including paramters"), + 'message': Parameter(str, "High level description of this event"), + 'runtime': Parameter(float, "Runtime of event"), + 'time': Parameter(int, "Date and time that the event took place, in seconds since UNIX epoch", ro = True), + 'object_ids': Parameter([int], "IDs of objects affected by this event"), + 'object_types': Parameter([str], "What type of object were affected by this event") + } + + def add_object(self, object_type, object_id, commit = True): + """ + Relate object to this event. + """ + + assert 'event_id' in self + + event_id = self['event_id'] + + if 'object_ids' not in self: + self['object_ids'] = [] + + if object_id not in self['object_ids']: + self.api.db.do("INSERT INTO event_object (event_id, object_id, object_type)" \ + " VALUES(%(event_id)d, %(object_id)d, %(object_type)s)", + locals()) + + if commit: + self.api.db.commit() + + self['object_ids'].append(object_id) + +class Events(Table): + """ + Representation of row(s) from the events table in the database. + """ + + def __init__(self, api, event_filter = None, columns = None): + Table.__init__(self, api, Event, columns) + + sql = "SELECT %s FROM view_events WHERE True" % \ + ", ".join(self.columns) + + if event_filter is not None: + if isinstance(event_filter, (list, tuple, set, int, long)): + event_filter = Filter(Event.fields, {'event_id': event_filter}) + elif isinstance(event_filter, dict): + event_filter = Filter(Event.fields, event_filter) + else: + raise PLCInvalidArgument, "Wrong event object filter %r"%event_filter + sql += " AND (%s) %s" % event_filter.sql(api) + self.selectall(sql) diff --git a/PLC/Faults.py b/PLC/Faults.py new file mode 100644 index 0000000..cebe5c5 --- /dev/null +++ b/PLC/Faults.py @@ -0,0 +1,66 @@ +# +# PLCAPI XML-RPC faults +# +# Aaron Klingaman +# Mark Huang +# +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import xmlrpclib + +class PLCFault(xmlrpclib.Fault): + def __init__(self, faultCode, faultString, extra = None): + if extra: + faultString += ": " + extra + xmlrpclib.Fault.__init__(self, faultCode, faultString) + +class PLCInvalidAPIMethod(PLCFault): + def __init__(self, method, role = None, extra = None): + faultString = "Invalid method " + method + if role: + faultString += " for role " + role + PLCFault.__init__(self, 100, faultString, extra) + +class PLCInvalidArgumentCount(PLCFault): + def __init__(self, got, min, max = min, extra = None): + if min != max: + expected = "%d-%d" % (min, max) + else: + expected = "%d" % min + faultString = "Expected %s arguments, got %d" % \ + (expected, got) + PLCFault.__init__(self, 101, faultString, extra) + +class PLCInvalidArgument(PLCFault): + def __init__(self, extra = None, name = None): + if name is not None: + faultString = "Invalid %s value" % name + else: + faultString = "Invalid argument" + PLCFault.__init__(self, 102, faultString, extra) + +class PLCAuthenticationFailure(PLCFault): + def __init__(self, extra = None): + faultString = "Failed to authenticate call" + PLCFault.__init__(self, 103, faultString, extra) + +class PLCDBError(PLCFault): + def __init__(self, extra = None): + faultString = "Database error" + PLCFault.__init__(self, 106, faultString, extra) + +class PLCPermissionDenied(PLCFault): + def __init__(self, extra = None): + faultString = "Permission denied" + PLCFault.__init__(self, 108, faultString, extra) + +class PLCNotImplemented(PLCFault): + def __init__(self, extra = None): + faultString = "Not fully implemented" + PLCFault.__init__(self, 109, faultString, extra) + +class PLCAPIError(PLCFault): + def __init__(self, extra = None): + faultString = "Internal API error" + PLCFault.__init__(self, 111, faultString, extra) diff --git a/PLC/Filter.py b/PLC/Filter.py new file mode 100644 index 0000000..d3a17b4 --- /dev/null +++ b/PLC/Filter.py @@ -0,0 +1,272 @@ +# +# Thierry Parmentelat - INRIA +# +from types import StringTypes +import time + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed, python_type +from PLC.Logger import logger + +class Filter(Parameter, dict): + """ + A type of parameter that represents a filter on one or more + columns of a database table. + Special features provide support for negation, upper and lower bounds, + sorting and clipping and more... + + + fields should be a dictionary of field names and types. + As of PLCAPI-4.3-26, we provide support for filtering on + sequence types as well, with the special '&' and '|' modifiers. + example : fields = {'node_id': Parameter(int, "Node identifier"), + 'hostname': Parameter(int, "Fully qualified hostname", max = 255), + ...} + + + filter should be a dictionary of field names and values + representing the criteria for filtering. + example : filter = { 'hostname' : '*.edu' , site_id : [34,54] } + + + Special features: + + * a field starting with the ~ character means negation. + example : filter = { '~peer_id' : None } + + * a field starting with < [ ] or > means lower than or greater than + < > uses strict comparison + [ ] is for using <= or >= instead + example : filter = { ']event_id' : 2305 } + example : filter = { '>time' : 1178531418 } + in this example the integer value denotes a unix timestamp + + * if a value is a sequence type, then it should represent + a list of possible values for that field + example : filter = { 'node_id' : [12,34,56] } + + * a (string) value containing either a * or a % character is + treated as a (sql) pattern; * are replaced with % that is the + SQL wildcard character. + example : filter = { 'hostname' : '*.jp' } + + * a field starting with '&' or '|' should refer to a sequence type + the semantics is then that the object value (expected to be a list) + should contain all (&) or any (|) value specified in the corresponding + filter value. See other examples below. + example : filter = { '|role_ids' : [ 20, 40 ] } + example : filter = { '|roles' : ['tech', 'pi'] } + example : filter = { '&roles' : ['admin', 'tech'] } + example : filter = { '&roles' : 'tech' } + + * the filter's keys starting with '-' are special and relate to sorting and clipping + * '-SORT' : a field name, or an ordered list of field names that are used for sorting + these fields may start with + (default) or - for denoting increasing or decreasing order + example : filter = { '-SORT' : [ '+node_id', '-hostname' ] } + * '-OFFSET' : the number of first rows to be ommitted + * '-LIMIT' : the amount of rows to be returned + example : filter = { '-OFFSET' : 100, '-LIMIT':25} + + * similarly the two special keys below allow to change the semantics of multi-keys filters + * '-AND' : select rows that match ALL the criteria (default) + * '-OR' : select rows that match ANY criteria + The value attached to these keys is ignored. + Please note however that because a Filter is a dict, you cannot provide two criteria on a given key. + + + Here are a few realistic examples + + GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' , + '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } ) + would return regular (usual) nodes matching '*.edu' in alphabetical order from 31th to 55th + + GetNodes ( { '~peer_id' : None } ) + returns the foreign nodes - that have an integer peer_id + + GetPersons ( { '|role_ids' : [ 20 , 40] } ) + would return all persons that have either pi (20) or tech (40) roles + + GetPersons ( { '&role_ids' : 10 } ) + GetPersons ( { '&role_ids' : 10 } ) + GetPersons ( { '|role_ids' : [ 10 ] } ) + GetPersons ( { '|role_ids' : [ 10 ] } ) + all 4 forms are equivalent and would return all admin users in the system + """ + + debug=False +# debug=True + + def __init__(self, fields = {}, filter = {}, doc = "Attribute filter"): + # Store the filter in our dict instance + dict.__init__(self, filter) + + # Declare ourselves as a type of parameter that can take + # either a value or a list of values for each of the specified + # fields. + self.fields = dict ( [ ( field, Mixed (expected, [expected])) + for (field,expected) in fields.iteritems() ] ) + + # Null filter means no filter + Parameter.__init__(self, self.fields, doc = doc, nullok = True) + + def sql(self, api, join_with = "AND"): + """ + Returns a SQL conditional that represents this filter. + """ + + if self.has_key('-AND'): + del self['-AND'] + join_with='AND' + if self.has_key('-OR'): + del self['-OR'] + join_with='OR' + + self.join_with=join_with + + # So that we always return something + if join_with == "AND": + conditionals = ["True"] + elif join_with == "OR": + conditionals = ["False"] + else: + assert join_with in ("AND", "OR") + + # init + sorts = [] + clips = [] + + for field, value in self.iteritems(): + # handle negation, numeric comparisons + # simple, 1-depth only mechanism + + modifiers={'~' : False, + '<' : False, '>' : False, + '[' : False, ']' : False, + '-' : False, + '&' : False, '|' : False, + } + def check_modifiers(field): + if field[0] in modifiers.keys(): + modifiers[field[0]] = True + field = field[1:] + return check_modifiers(field) + return field + field = check_modifiers(field) + + # filter on fields + if not modifiers['-']: + if field not in self.fields: + raise PLCInvalidArgument, "Invalid filter field '%s'" % field + + # handling array fileds always as compound values + if modifiers['&'] or modifiers['|']: + if not isinstance(value, (list, tuple, set)): + value = [value,] + + def get_op_and_val(value): + if value is None: + operator = "IS" + value = "NULL" + elif isinstance(value, StringTypes) and \ + (value.find("*") > -1 or value.find("%") > -1): + operator = "ILIKE" + # insert *** in pattern instead of either * or % + # we dont use % as requests are likely to %-expansion later on + # actual replacement to % done in PostgreSQL.py + value = value.replace ('*','***') + value = value.replace ('%','***') + value = str(api.db.quote(value)) + else: + operator = "=" + if modifiers['<']: + operator='<' + if modifiers['>']: + operator='>' + if modifiers['[']: + operator='<=' + if modifiers[']']: + operator='>=' + value = str(api.db.quote(value)) + return (operator, value) + + if isinstance(value, (list, tuple, set)): + # handling filters like '~slice_id':[] + # this should return true, as it's the opposite of 'slice_id':[] which is false + # prior to this fix, 'slice_id':[] would have returned ``slice_id IN (NULL) '' which is unknown + # so it worked by coincidence, but the negation '~slice_ids':[] would return false too + if not value: + if modifiers['&'] or modifiers['|']: + operator = "=" + value = "'{}'" + else: + field="" + operator="" + value = "FALSE" + clause = "%s %s %s" % (field, operator, value) + else: + vals = {} + for val in value: + base_op, val = get_op_and_val(val) + if base_op in vals: + vals[base_op].append(val) + else: + vals[base_op] = [val] + subclauses = [] + for operator in vals.keys(): + if operator == '=': + if modifiers['&']: + subclauses.append("(%s @> ARRAY[%s])" % (field, ",".join(vals[operator]))) + elif modifiers['|']: + subclauses.append("(%s && ARRAY[%s])" % (field, ",".join(vals[operator]))) + else: + subclauses.append("(%s IN (%s))" % (field, ",".join(vals[operator]))) + elif operator == 'IS': + subclauses.append("(%s IS NULL)" % field) + else: + for value in vals[operator]: + subclauses.append("(%s %s %s)" % (field, operator, value)) + clause = "(" + " OR ".join(subclauses) + ")" + else: + operator, value = get_op_and_val(value) + + clause = "%s %s %s" % (field, operator, value) + + if modifiers['~']: + clause = " ( NOT %s ) " % (clause) + + conditionals.append(clause) + # sorting and clipping + else: + if field not in ('SORT','OFFSET','LIMIT'): + raise PLCInvalidArgument, "Invalid filter, unknown sort and clip field %r"%field + # sorting + if field == 'SORT': + if not isinstance(value,(list,tuple,set)): + value=[value] + for field in value: + order = 'ASC' + if field[0] == '+': + field = field[1:] + elif field[0] == '-': + field = field[1:] + order = 'DESC' + if field not in self.fields: + raise PLCInvalidArgument, "Invalid field %r in SORT filter"%field + sorts.append("%s %s"%(field,order)) + # clipping + elif field == 'OFFSET': + clips.append("OFFSET %d"%value) + # clipping continued + elif field == 'LIMIT' : + clips.append("LIMIT %d"%value) + + where_part = (" %s " % join_with).join(conditionals) + clip_part = "" + if sorts: + clip_part += " ORDER BY " + ",".join(sorts) + if clips: + clip_part += " " + " ".join(clips) + if Filter.debug: + logger.debug('Filter.sql: where_part={} - clip_part={}' + .format(where_part, clip_part)) + return where_part, clip_part diff --git a/PLC/GPG.py b/PLC/GPG.py new file mode 100644 index 0000000..1dcc0cf --- /dev/null +++ b/PLC/GPG.py @@ -0,0 +1,178 @@ +# +# Python "binding" for GPG. I'll write GPGME bindings eventually. The +# intent is to use GPG to sign method calls, as a way of identifying +# and authenticating peers. Calls should still go over an encrypted +# transport such as HTTPS, with certificate checking. +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import os +import xmlrpclib +import shutil +from types import StringTypes +from StringIO import StringIO +from subprocess import Popen, PIPE, call +from tempfile import NamedTemporaryFile, mkdtemp +from lxml import etree + +from PLC.Faults import * + +def canonicalize(args, methodname = None, methodresponse = False): + """ + Returns a canonicalized XML-RPC representation of the specified + method call (methodname != None) or response (methodresponse = + True). + """ + + xml = xmlrpclib.dumps(args, methodname, methodresponse, encoding = 'utf-8', allow_none = 1) + dom = etree.fromstring(xml) + canonical=etree.tostring(dom) + # pre-f20 version was using Canonicalize from PyXML + # from xml.dom.ext import Canonicalize + # Canonicalize(), though it claims to, does not encode unicode + # nodes to UTF-8 properly and throws an exception unless you write + # the stream to a file object, so just encode it ourselves. + return canonical.encode('utf-8') + +def gpg_export(keyring, armor = True): + """ + Exports the specified public keyring file. + """ + + homedir = mkdtemp() + args = ["gpg", "--batch", "--no-tty", + "--homedir", homedir, + "--no-default-keyring", + "--keyring", keyring, + "--export"] + if armor: + args.append("--armor") + + p = Popen(args, stdin = PIPE, stdout = PIPE, stderr = PIPE, close_fds = True) + export = p.stdout.read() + err = p.stderr.read() + rc = p.wait() + + # Clean up + shutil.rmtree(homedir) + + if rc: + raise PLCAuthenticationFailure, "GPG export failed with return code %d: %s" % (rc, err) + + return export + +def gpg_sign(args, secret_keyring, keyring, methodname = None, methodresponse = False, detach_sign = True): + """ + Signs the specified method call (methodname != None) or response + (methodresponse == True) using the specified GPG keyring files. If + args is not a tuple representing the arguments to the method call + or the method response value, then it should be a string + representing a generic message to sign (detach_sign == True) or + sign/encrypt (detach_sign == False) specified). Returns the + detached signature (detach_sign == True) or signed/encrypted + message (detach_sign == False). + """ + + # Accept either an opaque string blob or a Python tuple + if isinstance(args, StringTypes): + message = args + elif isinstance(args, tuple): + message = canonicalize(args, methodname, methodresponse) + + # Use temporary trustdb + homedir = mkdtemp() + + cmd = ["gpg", "--batch", "--no-tty", + "--homedir", homedir, + "--no-default-keyring", + "--secret-keyring", secret_keyring, + "--keyring", keyring, + "--armor"] + + if detach_sign: + cmd.append("--detach-sign") + else: + cmd.append("--sign") + + p = Popen(cmd, stdin = PIPE, stdout = PIPE, stderr = PIPE) + p.stdin.write(message) + p.stdin.close() + signature = p.stdout.read() + err = p.stderr.read() + rc = p.wait() + + # Clean up + shutil.rmtree(homedir) + + if rc: + raise PLCAuthenticationFailure, "GPG signing failed with return code %d: %s" % (rc, err) + + return signature + +def gpg_verify(args, key, signature = None, methodname = None, methodresponse = False): + """ + Verifies the signature of the specified method call (methodname != + None) or response (methodresponse = True) using the specified + public key material. If args is not a tuple representing the + arguments to the method call or the method response value, then it + should be a string representing a generic message to verify (if + signature is specified) or verify/decrypt (if signature is not + specified). + """ + + # Accept either an opaque string blob or a Python tuple + if isinstance(args, StringTypes): + message = args + else: + message = canonicalize(args, methodname, methodresponse) + + # Write public key to temporary file + if os.path.exists(key): + keyfile = None + keyfilename = key + else: + keyfile = NamedTemporaryFile(suffix = '.pub') + keyfile.write(key) + keyfile.flush() + keyfilename = keyfile.name + + # Import public key into temporary keyring + homedir = mkdtemp() + call(["gpg", "--batch", "--no-tty", "--homedir", homedir, "--import", keyfilename], + stdin = PIPE, stdout = PIPE, stderr = PIPE) + + cmd = ["gpg", "--batch", "--no-tty", + "--homedir", homedir] + + if signature is not None: + # Write detached signature to temporary file + sigfile = NamedTemporaryFile() + sigfile.write(signature) + sigfile.flush() + cmd += ["--verify", sigfile.name, "-"] + else: + # Implicit signature + sigfile = None + cmd.append("--decrypt") + + p = Popen(cmd, stdin = PIPE, stdout = PIPE, stderr = PIPE) + p.stdin.write(message) + p.stdin.close() + if signature is None: + message = p.stdout.read() + err = p.stderr.read() + rc = p.wait() + + # Clean up + shutil.rmtree(homedir) + if sigfile: + sigfile.close() + if keyfile: + keyfile.close() + + if rc: + raise PLCAuthenticationFailure, "GPG verification failed with return code %d: %s" % (rc, err) + + return message diff --git a/PLC/Ilinks.py b/PLC/Ilinks.py new file mode 100644 index 0000000..d99f13c --- /dev/null +++ b/PLC/Ilinks.py @@ -0,0 +1,49 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes + +class Ilink(Row): + """ + Representation of a row in the ilink table. + To use, instantiate with a dict of values. + """ + + table_name = 'ilink' + primary_key = 'ilink_id' + fields = { + 'ilink_id': Parameter(int, "ilink identifier"), + 'tag_type_id': TagType.fields['tag_type_id'], + 'src_interface_id': Parameter(int, "source interface identifier"), + 'dst_interface_id': Parameter(int, "destination interface identifier"), + 'value': Parameter( str, "optional ilink value"), + } + +class Ilinks(Table): + """ + Representation of row(s) from the ilink table in the + database. + """ + + def __init__(self, api, ilink_filter = None, columns = None): + Table.__init__(self, api, Ilink, columns) + + sql = "SELECT %s FROM view_ilinks WHERE True" % \ + ", ".join(self.columns) + + if ilink_filter is not None: + if isinstance(ilink_filter, (list, tuple, set, int, long)): + ilink_filter = Filter(Ilink.fields, {'ilink_id': ilink_filter}) + elif isinstance(ilink_filter, dict): + ilink_filter = Filter(Ilink.fields, ilink_filter) + else: + raise PLCInvalidArgument, "Wrong ilink filter %r"%ilink_filter + sql += " AND (%s) %s" % ilink_filter.sql(api) + + + self.selectall(sql) diff --git a/PLC/InitScripts.py b/PLC/InitScripts.py new file mode 100644 index 0000000..c248c83 --- /dev/null +++ b/PLC/InitScripts.py @@ -0,0 +1,73 @@ +# +# Functions for interacting with the initscripts table in the database +# +# Tony Mack +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table + +class InitScript(Row): + """ + Representation of a row in the initscripts table. To use, + instantiate with a dict of values. + """ + + table_name = 'initscripts' + primary_key = 'initscript_id' + join_tables = [] + fields = { + 'initscript_id': Parameter(int, "Initscript identifier"), + 'name': Parameter(str, "Initscript name", max = 254), + 'enabled': Parameter(bool, "Initscript is active"), + 'script': Parameter(str, "Initscript"), + } + + def validate_name(self, name): + """ + validates the script name + """ + + conflicts = InitScripts(self.api, [name]) + for initscript in conflicts: + if 'initscript_id' not in self or self['initscript_id'] != initscript['initscript_id']: + raise PLCInvalidArgument, "Initscript name already in use" + + return name + + +class InitScripts(Table): + """ + Representation of the initscripts table in the database. + """ + + def __init__(self, api, initscript_filter = None, columns = None): + Table.__init__(self, api, InitScript, columns) + + sql = "SELECT %s FROM initscripts WHERE True" % \ + ", ".join(self.columns) + + if initscript_filter is not None: + if isinstance(initscript_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), initscript_filter) + strs = filter(lambda x: isinstance(x, StringTypes), initscript_filter) + initscript_filter = Filter(InitScript.fields, {'initscript_id': ints, 'name': strs }) + sql += " AND (%s) %s" % initscript_filter.sql(api, "OR") + elif isinstance(initscript_filter, dict): + initscript_filter = Filter(InitScript.fields, initscript_filter) + sql += " AND (%s) %s" % initscript_filter.sql(api, "AND") + elif isinstance(initscript_filter, (int, long)): + initscript_filter = Filter(InitScript.fields, {'initscript_id': initscript_filter}) + sql += " AND (%s) %s" % initscript_filter.sql(api, "AND") + elif isinstance(initscript_filter, StringTypes): + initscript_filter = Filter(InitScript.fields, {'name': initscript_filter}) + sql += " AND (%s) %s" % initscript_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong initscript filter %r"%initscript_filter + + self.selectall(sql) diff --git a/PLC/InterfaceTags.py b/PLC/InterfaceTags.py new file mode 100644 index 0000000..af1deb4 --- /dev/null +++ b/PLC/InterfaceTags.py @@ -0,0 +1,54 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.TagTypes import TagType, TagTypes +from PLC.Interfaces import Interface + +class InterfaceTag(Row): + """ + Representation of a row in the interface_tag. + To use, instantiate with a dict of values. + """ + + table_name = 'interface_tag' + primary_key = 'interface_tag_id' + fields = { + 'interface_tag_id': Parameter(int, "Interface setting identifier"), + 'interface_id': Interface.fields['interface_id'], + 'ip': Interface.fields['ip'], + 'tag_type_id': TagType.fields['tag_type_id'], + 'tagname': TagType.fields['tagname'], + 'description': TagType.fields['description'], + 'category': TagType.fields['category'], + 'value': Parameter(str, "Interface setting value"), + ### relations + + } + +class InterfaceTags(Table): + """ + Representation of row(s) from the interface_tag table in the + database. + """ + + def __init__(self, api, interface_tag_filter = None, columns = None): + Table.__init__(self, api, InterfaceTag, columns) + + sql = "SELECT %s FROM view_interface_tags WHERE True" % \ + ", ".join(self.columns) + + if interface_tag_filter is not None: + if isinstance(interface_tag_filter, (list, tuple, set, int, long)): + interface_tag_filter = Filter(InterfaceTag.fields, {'interface_tag_id': interface_tag_filter}) + elif isinstance(interface_tag_filter, dict): + interface_tag_filter = Filter(InterfaceTag.fields, interface_tag_filter) + else: + raise PLCInvalidArgument, "Wrong interface setting filter %r"%interface_tag_filter + sql += " AND (%s) %s" % interface_tag_filter.sql(api) + + + self.selectall(sql) diff --git a/PLC/Interfaces.py b/PLC/Interfaces.py new file mode 100644 index 0000000..0e6c728 --- /dev/null +++ b/PLC/Interfaces.py @@ -0,0 +1,303 @@ +# +# Functions for interacting with the interfaces table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +import socket +import struct + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.NetworkTypes import NetworkType, NetworkTypes +from PLC.NetworkMethods import NetworkMethod, NetworkMethods +import PLC.Nodes + +def valid_ipv4(ip): + try: + ip = socket.inet_ntoa(socket.inet_aton(ip)) + return True + except socket.error: + return False + +def valid_ipv6(ip): + try: + ip = socket.inet_ntop(socket.AF_INET6, socket.inet_pton(socket.AF_INET6, ip)) + return True + except socket.error: + return False + +def valid_ip(ip): + return valid_ipv4(ip) or valid_ipv6(ip) + +def in_same_network_ipv4(address1, address2, netmask): + """ + Returns True if two IPv4 addresses are in the same network. Faults + if an address is invalid. + """ + address1 = struct.unpack('>L', socket.inet_aton(address1))[0] + address2 = struct.unpack('>L', socket.inet_aton(address2))[0] + netmask = struct.unpack('>L', socket.inet_aton(netmask))[0] + + return (address1 & netmask) == (address2 & netmask) + +def in_same_network_ipv6(address1, address2, netmask): + """ + Returns True if two IPv6 addresses are in the same network. Faults + if an address is invalid. + """ + address1 = struct.unpack('>2Q', socket.inet_pton(socket.AF_INET6, address1))[0] + address2 = struct.unpack('>2Q', socket.inet_pton(socket.AF_INET6, address2))[0] + netmask = struct.unpack('>2Q', socket.inet_pton(socket.AF_INET6, netmask))[0] + + return (address1 & netmask) == (address2 & netmask) + +def in_same_network(address1, address2, netmask): + return in_same_network_ipv4(address1, address2, netmask) or \ + in_same_network_ipv6(address1, address2, netmask) + +class Interface(Row): + """ + Representation of a row in the interfaces table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'interfaces' + primary_key = 'interface_id' + join_tables = ['interface_tag'] + fields = { + 'interface_id': Parameter(int, "Node interface identifier"), + 'method': Parameter(str, "Addressing method (e.g., 'static' or 'dhcp')"), + 'type': Parameter(str, "Address type (e.g., 'ipv4')"), + 'ip': Parameter(str, "IP address", nullok = True), + 'mac': Parameter(str, "MAC address", nullok = True), + 'gateway': Parameter(str, "IP address of primary gateway", nullok = True), + 'network': Parameter(str, "Subnet address", nullok = True), + 'broadcast': Parameter(str, "Network broadcast address", nullok = True), + 'netmask': Parameter(str, "Subnet mask", nullok = True), + 'dns1': Parameter(str, "IP address of primary DNS server", nullok = True), + 'dns2': Parameter(str, "IP address of secondary DNS server", nullok = True), + 'bwlimit': Parameter(int, "Bandwidth limit", min = 0, nullok = True), + 'hostname': Parameter(str, "(Optional) Hostname", nullok = True), + 'node_id': Parameter(int, "Node associated with this interface"), + 'is_primary': Parameter(bool, "Is the primary interface for this node"), + 'interface_tag_ids' : Parameter([int], "List of interface settings"), + 'last_updated': Parameter(int, "Date and time when node entry was created", ro = True), + } + + view_tags_name = "view_interface_tags" + tags = {} + + def validate_method(self, method): + network_methods = [row['method'] for row in NetworkMethods(self.api)] + if method not in network_methods: + raise PLCInvalidArgument, "Invalid addressing method %s"%method + return method + + def validate_type(self, type): + network_types = [row['type'] for row in NetworkTypes(self.api)] + if type not in network_types: + raise PLCInvalidArgument, "Invalid address type %s"%type + return type + + def validate_ip(self, ip): + if ip and not valid_ip(ip): + raise PLCInvalidArgument, "Invalid IP address %s"%ip + return ip + + def validate_mac(self, mac): + if not mac: + return mac + + try: + bytes = mac.split(":") + if len(bytes) < 6: + raise Exception + for i, byte in enumerate(bytes): + byte = int(byte, 16) + if byte < 0 or byte > 255: + raise Exception + bytes[i] = "%02x" % byte + mac = ":".join(bytes) + except: + raise PLCInvalidArgument, "Invalid MAC address %s"%mac + + return mac + + validate_gateway = validate_ip + validate_network = validate_ip + validate_broadcast = validate_ip + validate_netmask = validate_ip + validate_dns1 = validate_ip + validate_dns2 = validate_ip + + def validate_bwlimit(self, bwlimit): + if not bwlimit: + return bwlimit + + if bwlimit < 500000: + raise PLCInvalidArgument, 'Minimum bw is 500 kbs' + + return bwlimit + + def validate_hostname(self, hostname): + # Optional + if not hostname: + return hostname + + if not PLC.Nodes.valid_hostname(hostname): + raise PLCInvalidArgument, "Invalid hostname %s"%hostname + + return hostname + + def validate_node_id(self, node_id): + nodes = PLC.Nodes.Nodes(self.api, [node_id]) + if not nodes: + raise PLCInvalidArgument, "No such node %d"%node_id + + return node_id + + def validate_is_primary(self, is_primary): + """ + Set this interface to be the primary one. + """ + + if is_primary: + nodes = PLC.Nodes.Nodes(self.api, [self['node_id']]) + if not nodes: + raise PLCInvalidArgument, "No such node %d"%node_id + node = nodes[0] + + if node['interface_ids']: + conflicts = Interfaces(self.api, node['interface_ids']) + for interface in conflicts: + if ('interface_id' not in self or \ + self['interface_id'] != interface['interface_id']) and \ + interface['is_primary']: + raise PLCInvalidArgument, "Can only set one primary interface per node" + + return is_primary + + def validate(self): + """ + Flush changes back to the database. + """ + + # Basic validation + Row.validate(self) + + assert 'method' in self + method = self['method'] + + if method == "proxy" or method == "tap": + if 'mac' in self and self['mac']: + raise PLCInvalidArgument, "For %s method, mac should not be specified" % method + if 'ip' not in self or not self['ip']: + raise PLCInvalidArgument, "For %s method, ip is required" % method + if method == "tap" and ('gateway' not in self or not self['gateway']): + raise PLCInvalidArgument, "For tap method, gateway is required and should be " \ + "the IP address of the node that proxies for this address" + # Should check that the proxy address is reachable, but + # there's no way to tell if the only primary interface is + # DHCP! + + elif method == "static": + if self['type'] == 'ipv4': + for key in ['gateway', 'dns1']: + if key not in self or not self[key]: + if 'is_primary' in self and self['is_primary'] is True: + raise PLCInvalidArgument, "For static method primary network, %s is required" % key + else: + globals()[key] = self[key] + for key in ['ip', 'network', 'broadcast', 'netmask']: + if key not in self or not self[key]: + raise PLCInvalidArgument, "For static method, %s is required" % key + globals()[key] = self[key] + if not in_same_network(ip, network, netmask): + raise PLCInvalidArgument, "IP address %s is inconsistent with network %s/%s" % \ + (ip, network, netmask) + if not in_same_network(broadcast, network, netmask): + raise PLCInvalidArgument, "Broadcast address %s is inconsistent with network %s/%s" % \ + (broadcast, network, netmask) + if 'gateway' in globals() and not in_same_network(ip, gateway, netmask): + raise PLCInvalidArgument, "Gateway %s is not reachable from %s/%s" % \ + (gateway, ip, netmask) + elif self['type'] == 'ipv6': + for key in ['ip', 'gateway']: + if key not in self or not self[key]: + raise PLCInvalidArgument, "For static ipv6 method, %s is required" % key + globals()[key] = self[key] + elif method == "ipmi": + if 'ip' not in self or not self['ip']: + raise PLCInvalidArgument, "For ipmi method, ip is required" + + validate_last_updated = Row.validate_timestamp + + def update_timestamp(self, col_name, commit = True): + """ + Update col_name field with current time + """ + + assert 'interface_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET %s = CURRENT_TIMESTAMP " % (self.table_name, col_name) + \ + " where interface_id = %d" % (self['interface_id']) ) + self.sync(commit) + + def update_last_updated(self, commit = True): + self.update_timestamp('last_updated', commit) + + def delete(self,commit=True): + ### need to cleanup ilinks + self.api.db.do("DELETE FROM ilink WHERE src_interface_id=%d OR dst_interface_id=%d" % \ + (self['interface_id'],self['interface_id'])) + + Row.delete(self) + +class Interfaces(Table): + """ + Representation of row(s) from the interfaces table in the + database. + """ + + def __init__(self, api, interface_filter = None, columns = None): + Table.__init__(self, api, Interface, columns) + + # the view that we're selecting upon: start with view_nodes + view = "view_interfaces" + # as many left joins as requested tags + for tagname in self.tag_columns: + view= "%s left join %s using (%s)"%(view,Interface.tagvalue_view_name(tagname), + Interface.primary_key) + + sql = "SELECT %s FROM %s WHERE True" % \ + (", ".join(self.columns.keys()+self.tag_columns.keys()),view) + + if interface_filter is not None: + if isinstance(interface_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), interface_filter) + strs = filter(lambda x: isinstance(x, StringTypes), interface_filter) + interface_filter = Filter(Interface.fields, {'interface_id': ints, 'ip': strs}) + sql += " AND (%s) %s" % interface_filter.sql(api, "OR") + elif isinstance(interface_filter, dict): + allowed_fields=dict(Interface.fields.items()+Interface.tags.items()) + interface_filter = Filter(allowed_fields, interface_filter) + sql += " AND (%s) %s" % interface_filter.sql(api) + elif isinstance(interface_filter, int): + interface_filter = Filter(Interface.fields, {'interface_id': [interface_filter]}) + sql += " AND (%s) %s" % interface_filter.sql(api) + elif isinstance (interface_filter, StringTypes): + interface_filter = Filter(Interface.fields, {'ip':[interface_filter]}) + sql += " AND (%s) %s" % interface_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong interface filter %r"%interface_filter + + self.selectall(sql) diff --git a/PLC/KeyTypes.py b/PLC/KeyTypes.py new file mode 100644 index 0000000..df15643 --- /dev/null +++ b/PLC/KeyTypes.py @@ -0,0 +1,51 @@ +# +# Functions for interacting with the key_types table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class KeyType(Row): + """ + Representation of a row in the key_types table. To use, + instantiate with a dict of values. + """ + + table_name = 'key_types' + primary_key = 'key_type' + join_tables = ['keys'] + fields = { + 'key_type': Parameter(str, "Key type", max = 20), + } + + def validate_key_type(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Key type must be specified" + + # Make sure key type does not alredy exist + conflicts = KeyTypes(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Key type name already in use" + + return name + +class KeyTypes(Table): + """ + Representation of the key_types table in the database. + """ + + def __init__(self, api, key_types = None): + Table.__init__(self, api, KeyType) + + sql = "SELECT %s FROM key_types" % \ + ", ".join(KeyType.fields) + + if key_types: + sql += " WHERE key_type IN (%s)" % ", ".join( [ api.db.quote (t) for t in key_types ] ) + + self.selectall(sql) diff --git a/PLC/Keys.py b/PLC/Keys.py new file mode 100644 index 0000000..ebabd19 --- /dev/null +++ b/PLC/Keys.py @@ -0,0 +1,119 @@ +import re + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.KeyTypes import KeyType, KeyTypes + +class Key(Row): + """ + Representation of a row in the keys table. To use, instantiate with a + dict of values. Update as you would a dict. Commit to the database + with sync(). + """ + + table_name = 'keys' + primary_key = 'key_id' + join_tables = ['person_key', 'peer_key'] + fields = { + 'key_id': Parameter(int, "Key identifier"), + 'key_type': Parameter(str, "Key type"), + 'key': Parameter(str, "Key value", max = 4096), + 'person_id': Parameter(int, "User to which this key belongs", nullok = True), + 'peer_id': Parameter(int, "Peer to which this key belongs", nullok = True), + 'peer_key_id': Parameter(int, "Foreign key identifier at peer", nullok = True), + } + + def validate_key_type(self, key_type): + key_types = [row['key_type'] for row in KeyTypes(self.api)] + if key_type not in key_types: + raise PLCInvalidArgument, "Invalid key type" + return key_type + + def validate_key(self, key): + # Key must not be blacklisted + rows = self.api.db.selectall("SELECT 1 from keys" \ + " WHERE key = %(key)s" \ + " AND is_blacklisted IS True", + locals()) + if rows: + raise PLCInvalidArgument, "Key is blacklisted and cannot be used" + + return key + + def validate(self): + # Basic validation + Row.validate(self) + + assert 'key' in self + key = self['key'] + + if self['key_type'] == 'ssh': + # Accept only SSH version 2 keys without options. From + # sshd(8): + # + # Each protocol version 2 public key consists of: options, + # keytype, base64 encoded key, comment. The options field + # is optional...The comment field is not used for anything + # (but may be convenient for the user to identify the + # key). For protocol version 2 the keytype is ``ssh-dss'' + # or ``ssh-rsa''. + + good_ssh_key = r'^.*(?:ssh-dss|ssh-rsa)[ ]+[A-Za-z0-9+/=]+(?: .*)?$' + if not re.match(good_ssh_key, key, re.IGNORECASE): + raise PLCInvalidArgument, "Invalid SSH version 2 public key" + + def blacklist(self, commit = True): + """ + Permanently blacklist key (and all other identical keys), + preventing it from ever being added again. Because this could + affect multiple keys associated with multiple accounts, it + should be admin only. + """ + + assert 'key_id' in self + assert 'key' in self + + # Get all matching keys + rows = self.api.db.selectall("SELECT key_id FROM keys WHERE key = %(key)s", + self) + key_ids = [row['key_id'] for row in rows] + assert key_ids + assert self['key_id'] in key_ids + + # Keep the keys in the table + self.api.db.do("UPDATE keys SET is_blacklisted = True" \ + " WHERE key_id IN (%s)" % ", ".join(map(str, key_ids))) + + # But disassociate them from all join tables + for table in self.join_tables: + self.api.db.do("DELETE FROM %s WHERE key_id IN (%s)" % \ + (table, ", ".join(map(str, key_ids)))) + + if commit: + self.api.db.commit() + +class Keys(Table): + """ + Representation of row(s) from the keys table in the + database. + """ + + def __init__(self, api, key_filter = None, columns = None): + Table.__init__(self, api, Key, columns) + + sql = "SELECT %s FROM view_keys WHERE is_blacklisted IS False" % \ + ", ".join(self.columns) + + if key_filter is not None: + if isinstance(key_filter, (list, tuple, set, int, long)): + key_filter = Filter(Key.fields, {'key_id': key_filter}) + elif isinstance(key_filter, dict): + key_filter = Filter(Key.fields, key_filter) + else: + raise PLCInvalidArgument, "Wrong key filter %r"%key_filter + sql += " AND (%s) %s" % key_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/LDAP.py b/PLC/LDAP.py new file mode 100644 index 0000000..da54456 --- /dev/null +++ b/PLC/LDAP.py @@ -0,0 +1,117 @@ +# +# LDAP interface. +# Tony Mack +# Copyright (C) 2006 The Trustees of Princeton University +# + +import ldap +import traceback +from PLC.Debug import profile +from PLC.Faults import * + + +class LDAP: + def __init__(self, api): + self.api = api + self.debug = False +# self.debug = True + self.connection = None + self.async = False + + def bind(self, async=False): + self.async = async + if self.connection is None: + try: + if self.api.config.PLC_LDAP_SECURE: + url = 'ldaps://%s' % \ + (self.api.config.PLC_LDAP_HOST, self.api.config.PLC_LDAP_PORT) + else: + url = 'ldap://%s' % \ + (self.api.config.PLC_LDAP_HOST, self.api.config.PLC_LDAP_PORT) + self.connection = ldap.open(url) + dn = self.api.config.PLC_LDAP_ROOT_DN + pw = self.api.config.PLC_LDAP_ROOT_PASSWORD + if async: + self.connection.bind(dn, pw, ldap.AUTH_SIMPLE) + else: + self.connection.bind_s(dn, pw, ldap.AUTH_SIMPLE) + except ldap.LDAPError, e: + raise PLCLDAPError, "Unable to bind to server: %s" % e + return connection + + def close(self): + """ + Close the connection + """ + if self.connection is not None: + self.connection.unbind() + self.connection = None + + def pl_to_ldap(self, filter): + """ + Convert pl fields to ldap fields + """ + ldap_filter = {'objectClass': '*'} + if 'first_name' in filter and 'last_name' in filter: + ldap_filter['cn'] = "%s %s" % \ + (filter['first_name'], filter['last_name']) + for key in filter: + if key == 'email': + ldap_filter['mail'] = filter['email'] + if key == 'objectClass': + ldap_filter['objectClass'] = filter['objectClass'] + + return ldap_filter + + def to_ldap_filter(search_filter): + search_filter = pl_to_ldap(search_filter) + values = [] + for (key,value) in search_filter.items(): + values.append("(%s=%s)" % (key,value)) + + return "(&%s)" % "".join(values) + + def to_list_of_dicts(results_list): + """ + Convert ldap search results to a list of dicts + """ + results = [] + for (dn, result_dict) in result_list: + result_dict['dn'] = dn + results.append(result_dict) + return results + + def search(self, search_filter): + """ + Search the ldap directory + """ + self.bind() + dn = self.api.config.PLC_LDAP_SUFFIX + scope = ldap.SCOPE_SUBTREE + filter = to_ldap_filter(search_filter) + # always do synchronous searchers + search = self.connection.search_s + results = to_list_of_dicts(search(dn, scope, filter)) + self.close() + return results + + def add(self, record, type): + """ + Add to the ldap directory + """ + self.bind() + self.close() + + def update(self, record): + """ + Update a record in the ldap directory + """ + self.bind() + self.close() + + def remove(self, record): + """ + Remove a record from the ldap directory + """ + self.bind() + self.close() diff --git a/PLC/LeaseFilter.py b/PLC/LeaseFilter.py new file mode 100644 index 0000000..e192d43 --- /dev/null +++ b/PLC/LeaseFilter.py @@ -0,0 +1,226 @@ +# +# Thierry Parmentelat -- INRIA +# +# Utilities for filtering on leases + +import time +import calendar + +from types import StringTypes +from PLC.Faults import * +from PLC.Filter import Filter +from PLC.Parameter import Parameter, Mixed +from PLC.Timestamp import Timestamp + +# supersede the generic Filter class to support time intersection + + +class LeaseFilter (Filter): + + # general notes on input parameters + # int_timestamp: number of seconds since the epoch + # str_timestamp: see Timestamp.sql_validate + # timeslot: a tuple (from, until), each being either int_timestamp or + # str_timestamp + + local_fields = { + 'alive': Mixed( + Parameter( + int, "int_timestamp: leases alive at that time"), + Parameter( + str, "str_timestamp: leases alive at that time"), + Parameter( + tuple, "timeslot: the leases alive during this timeslot")), + 'clip': Mixed( + Parameter( + int, "int_timestamp: leases alive after that time"), + Parameter( + str, "str_timestamp: leases alive after at that time"), + Parameter( + tuple, "timeslot: the leases alive during this timeslot")), + ########## macros + # {'day' : 0} : all leases from today and on + # {'day' : 1} : all leases today (localtime at the myplc) + # {'day' : 2} : all leases today and tomorrow (localtime at the myplc) + # etc.. + 'day': Parameter(int, "clip on a number of days from today and on;" + " 0 means no limit in the future"), + } + + def __init__(self, fields={}, filter={}, + doc="Lease filter -- adds the 'alive' and 'clip'" + "capabilities for filtering on leases"): + Filter.__init__(self, fields, filter, doc) + self.fields.update(LeaseFilter.local_fields) + + # canonical type + @staticmethod + def quote(timestamp): return Timestamp.cast_long(timestamp) + + # basic SQL utilities + @staticmethod + def sql_time_intersect(f1, u1, f2, u2): + # either f2 is in [f1,u1], or u2 is in [f1,u1], or f2<=f1<=u1<=u2 + return ("(({f1} <= {f2}) AND ({f2} <= {u1})) " + + "OR (({f1} <= {u2}) AND ({u2} <= {u1})) " + + "OR (({f2}<={f1}) AND ({u1}<={u2}))").format(**locals()) + + @staticmethod + def time_in_range(timestamp, f1, u1): + return Timestamp.cast_long(f1) <= Timestamp.cast_long(timestamp) \ + and Timestamp.cast_long(timestamp) <= Timestamp.cast_long(u1) + + @staticmethod + def sql_time_in_range(timestamp, f1, u1): + # is timestamp in [f1, u1] + return "(({f1} <= {timestamp}) AND ({timestamp} <= {u1}))"\ + .format(**locals()) + + @staticmethod + def sql_timeslot_after(f1, u1, mark): + # is the lease alive after mark, i.e. u1 >= mark + return "({u1} >= {mark})".format(**locals()) + + # hooks for the local fields + def sql_alive(self, alive): + if isinstance(alive, int) or isinstance(alive, StringTypes): + # the lease is alive at that time if from <= alive <= until + alive = LeaseFilter.quote(alive) + return LeaseFilter.sql_time_in_range(alive, 't_from', 't_until') + elif isinstance(alive, tuple): + (f, u) = alive + f = LeaseFilter.quote(f) + u = LeaseFilter.quote(u) + return LeaseFilter.sql_time_intersect(f, u, 't_from', 't_until') + else: + raise PLCInvalidArgument("LeaseFilter: alive field {}" + .format(alive)) + + def sql_clip(self, clip): + if isinstance(clip, int) or isinstance(clip, StringTypes): + start = LeaseFilter.quote(clip) + return LeaseFilter.sql_timeslot_after('t_from', 't_until', start) + elif isinstance(clip, tuple): + (f, u) = clip + f = LeaseFilter.quote(f) + u = LeaseFilter.quote(u) + return LeaseFilter.sql_time_intersect(f, u, 't_from', 't_until') + else: + raise PLCInvalidArgument("LeaseFilter: clip field {}" + .format(clip)) + + # the whole key to implementing day is to compute today's beginning + def today_start(self): + # a struct_time + st = time.localtime() + seconds_today = st.tm_hour * 3600 + st.tm_min * 60 + st.tm_sec + return int(time.time()) - seconds_today + + # supersede the generic Filter 'sql' method + def sql(self, api, join_with="AND"): + # implement 'day' as a clip + if 'day' in self: + if 'clip' in self: + raise PLCInvalidArgument("LeaseFilter cannot have both 'clip' and 'day'") + today = self.today_start() + nb_days = self['day'] + if nb_days == 0: + self['clip'] = today + else: + self['clip'] = (today, today + nb_days * 24 * 3600) + del self['day'] + + # preserve locally what belongs to us, hide it from the superclass + # self.local is a dict local_key : user_value + # self.negation is a dict local_key : string + self.local = {} + self.negation = {} + for (k, v) in LeaseFilter.local_fields.items(): + if k in self: + self.local[k] = self[k] + del self[k] + self.negation[k] = "" + elif ('~' + k) in self: + self.local[k] = self['~' + k] + del self['~' + k] + self.negation[k] = "NOT " + # run the generic filtering code + (where_part, clip_part) = Filter.sql(self, api, join_with) + for (k, v) in self.local.items(): + try: + # locate hook function associated with key + method = LeaseFilter.__dict__['sql_' + k] + where_part += " {} {}({})"\ + .format(self.join_with, + self.negation[k], + method(self, self.local[k])) + except Exception, e: + raise PLCInvalidArgument( + "LeaseFilter: something wrong with filter" + "key {}, val was {} -- {}".format(k, v, e)) + return (where_part, clip_part) + +# xxx not sure where this belongs yet +# given a set of nodes, and a timeslot, +# returns the available leases that have at least a given duration + + +def free_leases(api, node_ids, t_from, t_until, min_duration): + + # get the leases for these nodes and timeslot + filter = {'node_id': node_ids, + 'clip': (t_from, t_until), + # sort by node, and inside one node, chronologically + '-SORT': ('node_id', 't_from'), + } + leases = Leases(api, filter) + + result = [] + + # sort node_ids + node_ids.sort() + + # scan nodes from the input + input_node_id = 0 + # scan nodes from the leases + lease_node_id = 0 + + return '?? what now ??' + + +def node_free_leases(node_id, node_leases, t_from, t_until): + + # no lease yet : return one solid lease + if not node_leases: + return [{'node_id': node_id, + 't_from': t_from, + 't_until': t_until}] + + result = [] + current_time = t_from + is_on = LeaseFilter.time_in_range( + node_leases[0]['t_from'], t_from, t_until) + + while True: + # print 'DBG','current_time',current_time,'is_on',is_on,'result',result + # lease is active + if is_on: + current_time = node_leases[0]['t_until'] + is_on = False + del node_leases[0] + if not node_leases: + return result + # free, has no remaining lease + elif not node_leases: + result.append( + {'node_id': node_id, + 't_from': current_time, 't_until': t_until}) + return result + # free and has remaining leases + else: + next_time = node_leases[0]['t_from'] + result.append( + {'node_id': node_id, + 't_from': current_time, 't_until': next_time}) + current_time = next_time + is_on = True diff --git a/PLC/Leases.py b/PLC/Leases.py new file mode 100644 index 0000000..c1b2235 --- /dev/null +++ b/PLC/Leases.py @@ -0,0 +1,90 @@ +# +# Functions for interacting with the leases table in the database +# +# Thierry Parmentelat -- INRIA +# + +from datetime import datetime + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.LeaseFilter import LeaseFilter +from PLC.Timestamp import Timestamp + +class Lease(Row): + """ + Representation of a row in the leases table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'leases' + primary_key = 'lease_id' + join_tables = [ ] + fields = { + # native + 'lease_id': Parameter(int, "Lease identifier"), + 't_from': Timestamp.Parameter("timeslot start"), + 't_until': Timestamp.Parameter("timeslot end"), + 'node_id': Node.fields['node_id'], + 'slice_id': Slice.fields['slice_id'], + + # derived + 'hostname': Node.fields['hostname'], + 'node_type': Node.fields['node_type'], + 'name': Slice.fields['name'], + 'site_id': Slice.fields['site_id'], + 'duration': Parameter(int, "duration in seconds"), + 'expired' : Parameter(bool, "time slot is over"), + } + + related_fields = { } + + def validate_time (self, timestamp, round_up): + # convert to long + timestamp = Timestamp.cast_long(timestamp) + # retrieve configured granularity + granularity = self.api.config.PLC_RESERVATION_GRANULARITY + # the trick for rounding up rather than down + if round_up: + timestamp += (granularity-1) + # round down + timestamp = (timestamp/granularity) * granularity + # return a SQL string + return Timestamp.sql_validate_utc(timestamp) + + # round UP + def validate_t_from(self, timestamp): + return self.validate_time(timestamp, round_up=True) + # round DOWN + def validate_t_until (self, timestamp): + return self.validate_time(timestamp, round_up=False) + +class Leases(Table): + """ + Representation of row(s) from the leases table in the + database. + """ + + def __init__(self, api, lease_filter = None, columns = None): + Table.__init__(self, api, Lease, columns) + + # the view that we're selecting upon: start with view_leases + view = "view_leases" + sql = "SELECT %s FROM %s WHERE true" % (", ".join(self.columns.keys()),view) + + + if lease_filter is not None: + if isinstance(lease_filter, (list, tuple, set, int, long)): + lease_filter = Filter(Lease.fields, {'lease_id': lease_filter}) + elif isinstance(lease_filter, dict): + lease_filter = LeaseFilter(Lease.fields, lease_filter) + else: + raise PLCInvalidArgument, "Wrong lease filter %r"%lease_filter + sql += " AND (%s) %s" % lease_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/Logger.py b/PLC/Logger.py new file mode 100644 index 0000000..ca85ac9 --- /dev/null +++ b/PLC/Logger.py @@ -0,0 +1,45 @@ +import logging +import logging.config + +# we essentially need one all-purpose logger +# that goes into /var/log/plcapi.log + +plcapi_logging_config = { + 'version' : 1, + 'disable_existing_loggers' : True, + 'formatters': { + 'standard': { + 'format': '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s', + 'datefmt': '%m-%d %H:%M:%S' + }, + 'shorter': { + 'format': '%(asctime)s %(levelname)s %(message)s', + 'datefmt': '%d %H:%M:%S' + }, + }, + 'handlers': { + 'plcapi': { + 'level': 'INFO', + 'class': 'logging.FileHandler', + 'formatter': 'standard', + 'filename' : '/var/log/plcapi.log', + }, + }, + 'loggers': { + 'plcapi': { + 'handlers': ['plcapi'], + 'level': 'INFO', + 'propagate': False, + }, + }, +} + +logging.config.dictConfig(plcapi_logging_config) + +# general case: +# from PLC.Logger import logger +logger = logging.getLogger('plcapi') + +#################### test +if __name__ == '__main__': + logger.info("in plcapi") diff --git a/PLC/Messages.py b/PLC/Messages.py new file mode 100644 index 0000000..3696926 --- /dev/null +++ b/PLC/Messages.py @@ -0,0 +1,50 @@ +# +# Functions for interacting with the messages table in the database +# +# Tony Mack +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Parameter import Parameter +from PLC.Table import Row, Table +from PLC.Filter import Filter + +class Message(Row): + """ + Representation of a row in the messages table. + """ + + table_name = 'messages' + primary_key = 'message_id' + fields = { + 'message_id': Parameter(str, "Message identifier"), + 'subject': Parameter(str, "Message summary", nullok = True), + 'template': Parameter(str, "Message template", nullok = True), + 'enabled': Parameter(bool, "Message is enabled"), + } + +class Messages(Table): + """ + Representation of row(s) from the messages table in the database. + """ + + def __init__(self, api, message_filter = None, columns = None, enabled = None): + Table.__init__(self, api, Message, columns) + + sql = "SELECT %s from messages WHERE True" % \ + ", ".join(self.columns) + + if enabled is not None: + sql += " AND enabled IS %s" % enabled + + if message_filter is not None: + if isinstance(message_filter, (list, tuple, set, int, long)): + message_filter = Filter(Message.fields, {'message_id': message_filter}) + sql += " AND (%s) %s" % message_filter.sql(api, "OR") + elif isinstance(message_filter, dict): + message_filter = Filter(Message.fields, message_filter) + sql += " AND (%s) %s" % message_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong message filter %r"%message_filter + + self.selectall(sql) diff --git a/PLC/Method.py b/PLC/Method.py new file mode 100644 index 0000000..571c3eb --- /dev/null +++ b/PLC/Method.py @@ -0,0 +1,393 @@ +# +# Base class for all PLCAPI functions +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# +import xmlrpclib +from types import * +import textwrap +import os +import time +import pprint + +from types import StringTypes + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed, python_type, xmlrpc_type +from PLC.Auth import Auth +from PLC.Debug import profile +from PLC.Events import Event, Events +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons + +# we inherit object because we use new-style classes for legacy methods +class Method (object): + """ + Base class for all PLCAPI functions. At a minimum, all PLCAPI + functions must define: + + roles = [list of roles] + accepts = [Parameter(arg1_type, arg1_doc), Parameter(arg2_type, arg2_doc), ...] + returns = Parameter(return_type, return_doc) + call(arg1, arg2, ...): method body + + Argument types may be Python types (e.g., int, bool, etc.), typed + values (e.g., 1, True, etc.), a Parameter, or lists or + dictionaries of possibly mixed types, values, and/or Parameters + (e.g., [int, bool, ...] or {'arg1': int, 'arg2': bool}). + + Once function decorators in Python 2.4 are fully supported, + consider wrapping calls with accepts() and returns() functions + instead of performing type checking manually. + """ + + # Defaults. Could implement authentication and type checking with + # decorators, but they are not supported in Python 2.3 and it + # would be hard to generate documentation without writing a code + # parser. + + roles = [] + accepts = [] + returns = bool + status = "current" + + def call(self, *args): + """ + Method body for all PLCAPI functions. Must override. + """ + + return True + + def __init__(self, api,caller=None): + self.name = self.__class__.__name__ + self.api = api + + if caller: + # let a method call another one by propagating its caller + self.caller=caller + else: + # Auth may set this to a Person instance (if an anonymous + # method, will remain None). + self.caller = None + + + # API may set this to a (addr, port) tuple if known + self.source = None + + def __call__(self, *args, **kwds): + """ + Main entry point for all PLCAPI functions. Type checks + arguments, authenticates, and executes call(). + """ + + try: + start = time.time() + + # legacy code cannot be type-checked, due to the way Method.args() works + # as of 5.0-rc16 we don't use skip_type_check anymore + if not hasattr(self,"skip_type_check"): + (min_args, max_args, defaults) = self.args() + + # Check that the right number of arguments were passed in + if len(args) < len(min_args) or len(args) > len(max_args): + raise PLCInvalidArgumentCount(len(args), len(min_args), len(max_args)) + + for name, value, expected in zip(max_args, args, self.accepts): + self.type_check(name, value, expected, args) + + result = self.call(*args, **kwds) + runtime = time.time() - start + + if self.api.config.PLC_API_DEBUG or hasattr(self, 'message'): + self.log(None, runtime, *args) + + return result + + except PLCFault, fault: + + caller = "" + if isinstance(self.caller, Person): + caller = 'person_id %s' % self.caller['person_id'] + elif isinstance(self.caller, Node): + caller = 'node_id %s' % self.caller['node_id'] + + # Prepend caller and method name to expected faults + fault.faultString = caller + ": " + self.name + ": " + fault.faultString + runtime = time.time() - start + + if self.api.config.PLC_API_DEBUG: + self.log(fault, runtime, *args) + + raise fault + + def log(self, fault, runtime, *args): + """ + Log the transaction + """ + + # Do not log system or Get calls + #if self.name.startswith('system') or self.name.startswith('Get'): + # return False + # Do not log ReportRunlevel + if self.name.startswith('system'): + return False + if self.name.startswith('ReportRunlevel'): + return False + + # Create a new event + event = Event(self.api) + event['fault_code'] = 0 + if fault: + event['fault_code'] = fault.faultCode + event['runtime'] = runtime + + # Redact passwords and sessions + newargs = args + if args: + newargs = [] + for arg in args: + if not isinstance(arg, dict): + newargs.append(arg) + continue + # what type of auth this is + if arg.has_key('AuthMethod'): + auth_methods = ['session', 'password', 'capability', 'gpg', 'hmac','anonymous'] + auth_method = arg['AuthMethod'] + if auth_method in auth_methods: + event['auth_type'] = auth_method + for password in 'AuthString', 'session', 'password': + if arg.has_key(password): + arg = arg.copy() + arg[password] = "Removed by API" + newargs.append(arg) + + # Log call representation + # XXX Truncate to avoid DoS + event['call'] = self.name + pprint.saferepr(newargs) + event['call_name'] = self.name + + # Both users and nodes can call some methods + if isinstance(self.caller, Person): + event['person_id'] = self.caller['person_id'] + elif isinstance(self.caller, Node): + event['node_id'] = self.caller['node_id'] + + event.sync(commit = False) + + if hasattr(self, 'event_objects') and isinstance(self.event_objects, dict): + for key in self.event_objects.keys(): + for object_id in self.event_objects[key]: + event.add_object(key, object_id, commit = False) + + + # Set the message for this event + if fault: + event['message'] = fault.faultString + elif hasattr(self, 'message'): + event['message'] = self.message + + # Commit + event.sync() + + def help(self, indent = " "): + """ + Text documentation for the method. + """ + + (min_args, max_args, defaults) = self.args() + + text = "%s(%s) -> %s\n\n" % (self.name, ", ".join(max_args), xmlrpc_type(self.returns)) + + text += "Description:\n\n" + lines = [indent + line.strip() for line in self.__doc__.strip().split("\n")] + text += "\n".join(lines) + "\n\n" + + text += "Allowed Roles:\n\n" + if not self.roles: + roles = ["any"] + else: + roles = self.roles + text += indent + ", ".join(roles) + "\n\n" + + def param_text(name, param, indent, step): + """ + Format a method parameter. + """ + + text = indent + + # Print parameter name + if name: + param_offset = 32 + text += name.ljust(param_offset - len(indent)) + else: + param_offset = len(indent) + + # Print parameter type + param_type = python_type(param) + text += xmlrpc_type(param_type) + "\n" + + # Print parameter documentation right below type + if isinstance(param, Parameter): + wrapper = textwrap.TextWrapper(width = 70, + initial_indent = " " * param_offset, + subsequent_indent = " " * param_offset) + text += "\n".join(wrapper.wrap(param.doc)) + "\n" + param = param.type + + text += "\n" + + # Indent struct fields and mixed types + if isinstance(param, dict): + for name, subparam in param.iteritems(): + text += param_text(name, subparam, indent + step, step) + elif isinstance(param, Mixed): + for subparam in param: + text += param_text(name, subparam, indent + step, step) + elif isinstance(param, (list, tuple, set)): + for subparam in param: + text += param_text("", subparam, indent + step, step) + + return text + + text += "Parameters:\n\n" + for name, param in zip(max_args, self.accepts): + text += param_text(name, param, indent, indent) + + text += "Returns:\n\n" + text += param_text("", self.returns, indent, indent) + + return text + + def args(self): + """ + Returns a tuple: + + ((arg1_name, arg2_name, ...), + (arg1_name, arg2_name, ..., optional1_name, optional2_name, ...), + (None, None, ..., optional1_default, optional2_default, ...)) + + That represents the minimum and maximum sets of arguments that + this function accepts and the defaults for the optional arguments. + """ + + # Inspect call. Remove self from the argument list. + max_args = self.call.func_code.co_varnames[1:self.call.func_code.co_argcount] + defaults = self.call.func_defaults + if defaults is None: + defaults = () + + min_args = max_args[0:len(max_args) - len(defaults)] + defaults = tuple([None for arg in min_args]) + defaults + + return (min_args, max_args, defaults) + + def type_check(self, name, value, expected, args): + """ + Checks the type of the named value against the expected type, + which may be a Python type, a typed value, a Parameter, a + Mixed type, or a list or dictionary of possibly mixed types, + values, Parameters, or Mixed types. + + Extraneous members of lists must be of the same type as the + last specified type. For example, if the expected argument + type is [int, bool], then [1, False] and [14, True, False, + True] are valid, but [1], [False, 1] and [14, True, 1] are + not. + + Extraneous members of dictionaries are ignored. + """ + + # If any of a number of types is acceptable + if isinstance(expected, Mixed): + for item in expected: + try: + self.type_check(name, value, item, args) + return + except PLCInvalidArgument, fault: + pass + raise fault + + # If an authentication structure is expected, save it and + # authenticate after basic type checking is done. + if isinstance(expected, Auth): + auth = expected + else: + auth = None + + # Get actual expected type from within the Parameter structure + if isinstance(expected, Parameter): + min = expected.min + max = expected.max + nullok = expected.nullok + expected = expected.type + else: + min = None + max = None + nullok = False + + expected_type = python_type(expected) + + # If value can be NULL + if value is None and nullok: + return + + # Strings are a special case. Accept either unicode or str + # types if a string is expected. + if expected_type in StringTypes and isinstance(value, StringTypes): + pass + + # Integers and long integers are also special types. Accept + # either int or long types if an int or long is expected. + elif expected_type in (IntType, LongType) and isinstance(value, (IntType, LongType)): + pass + + elif not isinstance(value, expected_type): + raise PLCInvalidArgument("expected %s, got %s" % \ + (xmlrpc_type(expected_type), + xmlrpc_type(type(value))), + name) + + # If a minimum or maximum (length, value) has been specified + if expected_type in StringTypes: + if min is not None and \ + len(value.encode(self.api.encoding)) < min: + raise PLCInvalidArgument, "%s must be at least %d bytes long" % (name, min) + if max is not None and \ + len(value.encode(self.api.encoding)) > max: + raise PLCInvalidArgument, "%s must be at most %d bytes long" % (name, max) + elif expected_type in (list, tuple, set): + if min is not None and len(value) < min: + raise PLCInvalidArgument, "%s must contain at least %d items" % (name, min) + if max is not None and len(value) > max: + raise PLCInvalidArgument, "%s must contain at most %d items" % (name, max) + else: + if min is not None and value < min: + raise PLCInvalidArgument, "%s must be > %s" % (name, str(min)) + if max is not None and value > max: + raise PLCInvalidArgument, "%s must be < %s" % (name, str(max)) + + # If a list with particular types of items is expected + if isinstance(expected, (list, tuple, set)): + for i in range(len(value)): + if i >= len(expected): + j = len(expected) - 1 + else: + j = i + self.type_check(name + "[]", value[i], expected[j], args) + + # If a struct with particular (or required) types of items is + # expected. + elif isinstance(expected, dict): + for key in value.keys(): + if key in expected: + self.type_check(name + "['%s']" % key, value[key], expected[key], args) + for key, subparam in expected.iteritems(): + if isinstance(subparam, Parameter) and \ + subparam.optional is not None and \ + not subparam.optional and key not in value.keys(): + raise PLCInvalidArgument("'%s' not specified" % key, name) + + if auth is not None: + auth.check(self, *args) diff --git a/PLC/Methods/AddAddressType.py b/PLC/Methods/AddAddressType.py new file mode 100644 index 0000000..2847714 --- /dev/null +++ b/PLC/Methods/AddAddressType.py @@ -0,0 +1,36 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in ['address_type_id'] + +class AddAddressType(Method): + """ + Adds a new address type. Fields specified in address_type_fields + are used. + + Returns the new address_type_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + address_type_fields = dict(filter(can_update, AddressType.fields.items())) + + accepts = [ + Auth(), + address_type_fields + ] + + returns = Parameter(int, 'New address_type_id (> 0) if successful') + + + def call(self, auth, address_type_fields): + address_type_fields = dict(filter(can_update, address_type_fields.items())) + address_type = AddressType(self.api, address_type_fields) + address_type.sync() + + self.event_objects = {'AddressType' : [address_type['address_type_id']]} + + return address_type['address_type_id'] diff --git a/PLC/Methods/AddAddressTypeToAddress.py b/PLC/Methods/AddAddressTypeToAddress.py new file mode 100644 index 0000000..99ddfe4 --- /dev/null +++ b/PLC/Methods/AddAddressTypeToAddress.py @@ -0,0 +1,47 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth + +class AddAddressTypeToAddress(Method): + """ + Adds an address type to the specified address. + + PIs may only update addresses of their own sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(AddressType.fields['address_type_id'], + AddressType.fields['name']), + Address.fields['address_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, address_type_id_or_name, address_id): + address_types = AddressTypes(self.api, [address_type_id_or_name]) + if not address_types: + raise PLCInvalidArgument, "No such address type" + address_type = address_types[0] + + addresses = Addresses(self.api, [address_id]) + if not addresses: + raise PLCInvalidArgument, "No such address" + address = addresses[0] + + if 'admin' not in self.caller['roles']: + if address['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Address must be associated with one of your sites" + + address.add_address_type(address_type) + self.event_objects = {'Address': [address['address_id']]} + + return 1 diff --git a/PLC/Methods/AddBootState.py b/PLC/Methods/AddBootState.py new file mode 100644 index 0000000..522fd25 --- /dev/null +++ b/PLC/Methods/AddBootState.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.BootStates import BootState, BootStates +from PLC.Auth import Auth + +class AddBootState(Method): + """ + Adds a new node boot state. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + BootState.fields['boot_state'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + boot_state = BootState(self.api) + boot_state['boot_state'] = name + boot_state.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddConfFile.py b/PLC/Methods/AddConfFile.py new file mode 100644 index 0000000..6e4fe8b --- /dev/null +++ b/PLC/Methods/AddConfFile.py @@ -0,0 +1,37 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in \ + ['conf_file_id', 'node_ids', 'nodegroup_ids'] + +class AddConfFile(Method): + """ + Adds a new node configuration file. Any fields specified in + conf_file_fields are used, otherwise defaults are used. + + Returns the new conf_file_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + conf_file_fields = dict(filter(can_update, ConfFile.fields.items())) + + accepts = [ + Auth(), + conf_file_fields + ] + + returns = Parameter(int, 'New conf_file_id (> 0) if successful') + + + def call(self, auth, conf_file_fields): + conf_file_fields = dict(filter(can_update, conf_file_fields.items())) + conf_file = ConfFile(self.api, conf_file_fields) + conf_file.sync() + + self.event_objects = {'ConfFile': [conf_file['conf_file_id']]} + + return conf_file['conf_file_id'] diff --git a/PLC/Methods/AddConfFileToNode.py b/PLC/Methods/AddConfFileToNode.py new file mode 100644 index 0000000..eeb9af5 --- /dev/null +++ b/PLC/Methods/AddConfFileToNode.py @@ -0,0 +1,51 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Nodes import Node, Nodes +from PLC.Auth import Auth + +class AddConfFileToNode(Method): + """ + Adds a configuration file to the specified node. If the node is + already linked to the configuration file, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'], + Mixed(Node.fields['node_id'], + Node.fields['hostname']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, conf_file_id, node_id_or_hostname): + # Get configuration file + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + conf_file = conf_files[0] + + # Get node + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node" + + # Link configuration file to node + if node['node_id'] not in conf_file['node_ids']: + conf_file.add_node(node) + + # Log affected objects + self.event_objects = {'ConfFile': [conf_file_id], + 'Node': [node['node_id']] } + + return 1 diff --git a/PLC/Methods/AddConfFileToNodeGroup.py b/PLC/Methods/AddConfFileToNodeGroup.py new file mode 100644 index 0000000..892e64f --- /dev/null +++ b/PLC/Methods/AddConfFileToNodeGroup.py @@ -0,0 +1,50 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.Auth import Auth + +class AddConfFileToNodeGroup(Method): + """ + Adds a configuration file to the specified node group. If the node + group is already linked to the configuration file, no errors are + returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'], + Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname']) + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, conf_file_id, nodegroup_id_or_name): + # Get configuration file + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + conf_file = conf_files[0] + + # Get node + nodegroups = NodeGroups(self.api, [nodegroup_id_or_name]) + if not nodegroups: + raise PLCInvalidArgument, "No such node group" + nodegroup = nodegroups[0] + + # Link configuration file to node + if nodegroup['nodegroup_id'] not in conf_file['nodegroup_ids']: + conf_file.add_nodegroup(nodegroup) + + # Log affected objects + self.event_objects = {'ConfFile': [conf_file_id], + 'NodeGroup': [nodegroup['nodegroup_id']] } + + return 1 diff --git a/PLC/Methods/AddIlink.py b/PLC/Methods/AddIlink.py new file mode 100644 index 0000000..0a7066a --- /dev/null +++ b/PLC/Methods/AddIlink.py @@ -0,0 +1,88 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.TagTypes import TagType, TagTypes +from PLC.Ilinks import Ilink, Ilinks +from PLC.Interfaces import Interface, Interfaces +from PLC.Sites import Sites + +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddIlink(Method): + """ + Create a link between two interfaces + The link has a tag type, that needs be created beforehand + and an optional value. + + Returns the new ilink_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + # refer to either the id or the type name + Ilink.fields['src_interface_id'], + Ilink.fields['dst_interface_id'], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + Ilink.fields['value'], + ] + + returns = Parameter(int, 'New ilink_id (> 0) if successful') + + def call(self, auth, src_if_id, dst_if_id, tag_type_id_or_name, value): + + src_if = Interfaces (self.api, [src_if_id],['interface_id']) + if not src_if: + raise PLCInvalidArgument, "No such source interface %r"%src_if_id + dst_if = Interfaces (self.api, [dst_if_id],['interface_id']) + if not dst_if: + raise PLCInvalidArgument, "No such destination interface %r"%dst_if_id + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "AddIlink: No such tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # checks for existence - with the same type + conflicts = Ilinks(self.api, + {'tag_type_id':tag_type['tag_type_id'], + 'src_interface_id':src_if_id, + 'dst_interface_id':dst_if_id,}) + + if len(conflicts) : + ilink=conflicts[0] + raise PLCInvalidArgument, "Ilink (%s,%d,%d) already exists and has value %r"\ + %(tag_type['name'],src_if_id,dst_if_id,ilink['value']) + + # check authorizations + if 'admin' in self.caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (self.api, self.caller, tag_type): + raise PLCPermissionDenied, "%s, forbidden tag %s"%(self.name,tag_type['tagname']) + elif AuthorizeHelpers.interface_belongs_to_person (self.api, src_if, self.caller): + pass + elif src_if_id != dst_if_id and AuthorizeHelpers.interface_belongs_to_person (self.api, dst_if, self.caller): + pass + else: + raise PLCPermissionDenied, "%s: you must one either the src or dst interface"%self.name + + ilink = Ilink(self.api) + ilink['tag_type_id'] = tag_type['tag_type_id'] + ilink['src_interface_id'] = src_if_id + ilink['dst_interface_id'] = dst_if_id + ilink['value'] = value + + ilink.sync() + + self.object_type = 'Interface' + self.object_ids = [src_if_id,dst_if_id] + + return ilink['ilink_id'] diff --git a/PLC/Methods/AddInitScript.py b/PLC/Methods/AddInitScript.py new file mode 100644 index 0000000..10f844f --- /dev/null +++ b/PLC/Methods/AddInitScript.py @@ -0,0 +1,37 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.InitScripts import InitScript, InitScripts +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in \ + ['initscript_id'] + +class AddInitScript(Method): + """ + Adds a new initscript. Any fields specified in initscript_fields + are used, otherwise defaults are used. + + Returns the new initscript_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + initscript_fields = dict(filter(can_update, InitScript.fields.items())) + + accepts = [ + Auth(), + initscript_fields + ] + + returns = Parameter(int, 'New initscript_id (> 0) if successful') + + + def call(self, auth, initscript_fields): + initscript_fields = dict(filter(can_update, initscript_fields.items())) + initscript = InitScript(self.api, initscript_fields) + initscript.sync() + + self.event_objects = {'InitScript': [initscript['initscript_id']]} + + return initscript['initscript_id'] diff --git a/PLC/Methods/AddInterface.py b/PLC/Methods/AddInterface.py new file mode 100644 index 0000000..ec35fc7 --- /dev/null +++ b/PLC/Methods/AddInterface.py @@ -0,0 +1,98 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row + +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagTypes +from PLC.InterfaceTags import InterfaceTags +from PLC.Methods.AddInterfaceTag import AddInterfaceTag +from PLC.Methods.UpdateInterfaceTag import UpdateInterfaceTag + +cannot_update = ['interface_id', 'node_id'] + +class AddInterface(Method): + """ + + Adds a new network for a node. Any values specified in + interface_fields are used, otherwise defaults are + used. + + If type is static, then ip, gateway, network, broadcast, netmask, + and dns1 must all be specified in interface_fields. If type is + dhcp, these parameters, even if specified, are ignored. + + PIs and techs may only add interfaces to their own nodes. Admins may + add interfaces to any node. + + Returns the new interface_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepted_fields = Row.accepted_fields(cannot_update, Interface.fields, exclude=True) + accepted_fields.update(Interface.tags) + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + accepted_fields + ] + + returns = Parameter(int, 'New interface_id (> 0) if successful') + + + def call(self, auth, node_id_or_hostname, interface_fields): + + [native,tags,rejected]=Row.split_fields(interface_fields,[Interface.fields,Interface.tags]) + + # type checking + native = Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot add Interface with column(s) %r"%rejected + + # Check if node exists + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node %r"%node_id_or_hostname + node = nodes[0] + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site where the node exists. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to add an interface to the specified node" + + # Add interface + interface = Interface(self.api, native) + interface['node_id'] = node['node_id'] + # if this is the first interface, make it primary + if not node['interface_ids']: + interface['is_primary'] = True + interface.sync() + + # Logging variables + self.event_objects = { 'Node': [node['node_id']], + 'Interface' : [interface['interface_id']] } + self.message = "Interface %d added" % interface['interface_id'] + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + if not TagTypes(self.api,{'tagname':tagname}): + raise PLCInvalidArgument,"No such TagType %s"%tagname + interface_tags=InterfaceTags(self.api,{'tagname':tagname,'interface_id':interface['interface_id']}) + if not interface_tags: + AddInterfaceTag(self.api).__call__(auth,interface['interface_id'],tagname,value) + else: + UpdateInterfaceTag(self.api).__call__(auth,interface_tags[0]['interface_tag_id'],value) + + return interface['interface_id'] diff --git a/PLC/Methods/AddInterfaceTag.py b/PLC/Methods/AddInterfaceTag.py new file mode 100644 index 0000000..b02b484 --- /dev/null +++ b/PLC/Methods/AddInterfaceTag.py @@ -0,0 +1,75 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Sites +from PLC.Nodes import Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes +from PLC.InterfaceTags import InterfaceTag, InterfaceTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddInterfaceTag(Method): + """ + Sets the specified setting for the specified interface + to the specified value. + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns the new interface_tag_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + # no other way to refer to a interface + InterfaceTag.fields['interface_id'], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + InterfaceTag.fields['value'], + ] + + returns = Parameter(int, 'New interface_tag_id (> 0) if successful') + + def call(self, auth, interface_id, tag_type_id_or_name, value): + interfaces = Interfaces(self.api, [interface_id]) + if not interfaces: + raise PLCInvalidArgument, "No such interface %r"%interface_id + interface = interfaces[0] + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # checks for existence - does not allow several different settings + conflicts = InterfaceTags(self.api, + {'interface_id':interface['interface_id'], + 'tag_type_id':tag_type['tag_type_id']}) + + if len(conflicts) : + raise PLCInvalidArgument, "Interface %d already has setting %d"%(interface['interface_id'], + tag_type['tag_type_id']) + + # check authorizations + interface.caller_may_write_tag(self.api,self.caller,tag_type) + + interface_tag = InterfaceTag(self.api) + interface_tag['interface_id'] = interface['interface_id'] + interface_tag['tag_type_id'] = tag_type['tag_type_id'] + interface_tag['value'] = value + + interface_tag.sync() + self.object_ids = [interface_tag['interface_tag_id']] + + return interface_tag['interface_tag_id'] diff --git a/PLC/Methods/AddKeyType.py b/PLC/Methods/AddKeyType.py new file mode 100644 index 0000000..b3690a8 --- /dev/null +++ b/PLC/Methods/AddKeyType.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.KeyTypes import KeyType, KeyTypes +from PLC.Auth import Auth + +class AddKeyType(Method): + """ + Adds a new key type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + KeyType.fields['key_type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + key_type = KeyType(self.api) + key_type['key_type'] = name + key_type.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddLeases.py b/PLC/Methods/AddLeases.py new file mode 100644 index 0000000..0ae7ca3 --- /dev/null +++ b/PLC/Methods/AddLeases.py @@ -0,0 +1,116 @@ +# Thierry Parmentelat -- INRIA + +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row + +from PLC.Leases import Leases, Lease +from PLC.Nodes import Nodes, Node +from PLC.Slices import Slices, Slice +from PLC.Timestamp import Timestamp + +can_update = ['name', 'instantiation', 'url', 'description', 'max_nodes'] + + +class AddLeases(Method): + """ + Adds a new lease. + Mandatory arguments are node(s), slice, t_from and t_until + times can be either integers, datetime's, or human readable (see Timestamp) + + PIs may only add leases associated with their own sites (i.e., + to a slice that belongs to their site). + Users may only add leases associated with their own slices. + + Returns the new lease_ids if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], [Node.fields['node_id']], + Node.fields['hostname'], [Node.fields['hostname']],), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + Mixed(Lease.fields['t_from']), + Mixed(Lease.fields['t_until']), + ] + + returns = Parameter( + dict, + " 'new_ids' is the list of newly created ids," + "'errors' is a list of error strings") + + def call(self, auth, node_id_or_hostname_s, slice_id_or_name, + t_from, t_until): + + # Get node information + nodes = Nodes(self.api, node_id_or_hostname_s) + if not nodes: + raise PLCInvalidArgument( + "No such node(s) {}".format(node_id_or_hostname_s)) + for node in nodes: + if node['node_type'] != 'reservable': + raise PLCInvalidArgument( + "Node {} is not reservable".format(node['hostname'])) + + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument( + "No such slice {}".format(slice_id_or_name)) + slice = slices[0] + + # check access + if 'admin' not in self.caller['roles']: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied( + "Not a member of the specified slice") + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied( + "Specified slice not associated with any of your sites") + + # normalize timestamps - use granularity to round up limits + t_from = Timestamp.sql_validate_utc(t_from) + t_until = Timestamp.sql_validate_utc(t_until) + + # create stuff + errors = [] + result_ids = [] + for node in nodes: + if node['peer_id'] is not None: + errors.append("Cannot set lease on remote node {}" + .format(node['hostname'])) + continue + # let the DB check for time consistency + try: + lease = Lease(self.api, {'node_id': node['node_id'], + 'slice_id': slice['slice_id'], + 't_from': t_from, 't_until': t_until}) + lease.sync() + result_ids.append(lease['lease_id']) + + except PLCDBError as e: + errors.append( + "Timeslot busy - could not create overlapping lease" + " on n={} s={} [{} .. {}]" + .format(node['hostname'], slice['name'], t_from, t_until)) + nodes.remove(node) + except Exception as e: + errors.append( + "Could not create lease on n={} s={} [{} .. {}] -- {}" + .format(node['hostname'], slice['name'], t_from, t_until, e)) + nodes.remove(node) + + self.event_objects = {'Slice': [slice['slice_id']], + 'Node': [node['node_id'] for node in nodes]} + self.message = "New leases {} on n={} s={} [{} -> {}]"\ + .format(result_ids, [node['hostname'] for node in nodes], + slice['name'], t_from, t_until) + + return {'new_ids': result_ids, 'errors': errors} diff --git a/PLC/Methods/AddMessage.py b/PLC/Methods/AddMessage.py new file mode 100644 index 0000000..62a2da7 --- /dev/null +++ b/PLC/Methods/AddMessage.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter +from PLC.Messages import Message, Messages +from PLC.Auth import Auth + +class AddMessage(Method): + """ + Adds a new message template. Any values specified in + message_fields are used, otherwise defaults are used. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Message.fields, + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, message_fields): + message = Message(self.api, message_fields) + message.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddNetworkMethod.py b/PLC/Methods/AddNetworkMethod.py new file mode 100644 index 0000000..11f3845 --- /dev/null +++ b/PLC/Methods/AddNetworkMethod.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkMethods import NetworkMethod, NetworkMethods +from PLC.Auth import Auth + +class AddNetworkMethod(Method): + """ + Adds a new network method. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NetworkMethod.fields['method'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + network_method = NetworkMethod(self.api) + network_method['method'] = name + network_method.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddNetworkType.py b/PLC/Methods/AddNetworkType.py new file mode 100644 index 0000000..6533053 --- /dev/null +++ b/PLC/Methods/AddNetworkType.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkTypes import NetworkType, NetworkTypes +from PLC.Auth import Auth + +class AddNetworkType(Method): + """ + Adds a new network type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NetworkType.fields['type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + network_type = NetworkType(self.api) + network_type['type'] = name + network_type.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddNode.py b/PLC/Methods/AddNode.py new file mode 100644 index 0000000..0ee13f5 --- /dev/null +++ b/PLC/Methods/AddNode.py @@ -0,0 +1,102 @@ +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row +from PLC.Namespace import hostname_to_hrn +from PLC.Peers import Peers +from PLC.Sites import Site, Sites +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagTypes +from PLC.NodeTags import NodeTags, NodeTag +from PLC.Methods.AddNodeTag import AddNodeTag +from PLC.Methods.UpdateNodeTag import UpdateNodeTag + +can_update = ['hostname', 'node_type', 'boot_state', 'model', 'version'] + +class AddNode(Method): + """ + Adds a new node. Any values specified in node_fields are used, + otherwise defaults are used. + + PIs and techs may only add nodes to their own sites. Admins may + add nodes to any site. + + Returns the new node_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepted_fields = Row.accepted_fields(can_update,Node.fields) + accepted_fields.update(Node.tags) + + accepts = [ + Auth(), + Mixed(Site.fields['site_id'], + Site.fields['login_base']), + accepted_fields + ] + + returns = Parameter(int, 'New node_id (> 0) if successful') + + def call(self, auth, site_id_or_login_base, node_fields): + + [native,tags,rejected]=Row.split_fields(node_fields,[Node.fields,Node.tags]) + + # type checking + native = Row.check_fields(native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot add Node with column(s) %r"%rejected + + # Get site information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + + site = sites[0] + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site. + if 'admin' not in self.caller['roles']: + if site['site_id'] not in self.caller['site_ids']: + assert self.caller['person_id'] not in site['person_ids'] + raise PLCPermissionDenied, "Not allowed to add nodes to specified site" + else: + assert self.caller['person_id'] in site['person_ids'] + + node = Node(self.api, native) + node['site_id'] = site['site_id'] + node.sync() + + # since hostname was specified lets add the 'hrn' node tag + root_auth = self.api.config.PLC_HRN_ROOT + login_base = site['login_base'] + tags['hrn'] = hostname_to_hrn(root_auth, login_base, node['hostname']) + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + tag_types = TagTypes(self.api,{'tagname':tagname}) + if not tag_types: + raise PLCInvalidArgument,"No such TagType %s"%tagname + tag_type = tag_types[0] + node_tags=NodeTags(self.api,{'tagname':tagname,'node_id':node['node_id']}) + if not node_tags: + node_tag = NodeTag(self.api) + node_tag['node_id'] = node['node_id'] + node_tag['tag_type_id'] = tag_type['tag_type_id'] + node_tag['tagname'] = tagname + node_tag['value'] = value + node_tag.sync() + else: + node_tag = node_tags[0] + node_tag['value'] = value + node_tag.sync() + + self.event_objects = {'Site': [site['site_id']], + 'Node': [node['node_id']]} + self.message = "Node %d=%s created" % (node['node_id'],node['hostname']) + + return node['node_id'] diff --git a/PLC/Methods/AddNodeGroup.py b/PLC/Methods/AddNodeGroup.py new file mode 100644 index 0000000..a24fa8e --- /dev/null +++ b/PLC/Methods/AddNodeGroup.py @@ -0,0 +1,52 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.TagTypes import TagType, TagTypes +from PLC.NodeTags import NodeTag, NodeTags + +can_update = lambda (field, value): field in NodeGroup.fields.keys() and field != NodeGroup.primary_field + +class AddNodeGroup(Method): + """ + Adds a new node group. Any values specified in nodegroup_fields + are used, otherwise defaults are used. + + Returns the new nodegroup_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + nodegroup_fields = dict(filter(can_update, NodeGroup.fields.items())) + + accepts = [ + Auth(), + NodeGroup.fields['groupname'], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + NodeTag.fields['value'], + ] + + returns = Parameter(int, 'New nodegroup_id (> 0) if successful') + + + def call(self, auth, groupname, tag_type_id_or_tagname, value): + # locate tag type + tag_types = TagTypes (self.api,[tag_type_id_or_tagname]) + if not(tag_types): + raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_tagname + tag_type=tag_types[0] + + nodegroup_fields = { 'groupname' : groupname, + 'tag_type_id' : tag_type['tag_type_id'], + 'value' : value } + nodegroup = NodeGroup(self.api, nodegroup_fields) + nodegroup.sync() + + # Logging variables + self.event_objects = {'NodeGroup': [nodegroup['nodegroup_id']]} + self.message = 'Node group %d created' % nodegroup['nodegroup_id'] + + return nodegroup['nodegroup_id'] diff --git a/PLC/Methods/AddNodeTag.py b/PLC/Methods/AddNodeTag.py new file mode 100644 index 0000000..5080131 --- /dev/null +++ b/PLC/Methods/AddNodeTag.py @@ -0,0 +1,75 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Sites +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagType, TagTypes +from PLC.NodeTags import NodeTag, NodeTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddNodeTag(Method): + """ + Sets the specified tag for the specified node + to the specified value. + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns the new node_tag_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + # no other way to refer to a node + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + NodeTag.fields['value'], + ] + + returns = Parameter(int, 'New node_tag_id (> 0) if successful') + + def call(self, auth, node_id, tag_type_id_or_name, value): + nodes = Nodes(self.api, [node_id]) + if not nodes: + raise PLCInvalidArgument, "No such node %r"%node_id + node = nodes[0] + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such node tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # checks for existence - does not allow several different tags + conflicts = NodeTags(self.api, + {'node_id':node['node_id'], + 'tag_type_id':tag_type['tag_type_id']}) + + if len(conflicts) : + raise PLCInvalidArgument, "Node %d already has tag %d"%(node['node_id'], + tag_type['tag_type_id']) + + # check authorizations + node.caller_may_write_tag(self.api,self.caller,tag_type) + + node_tag = NodeTag(self.api) + node_tag['node_id'] = node['node_id'] + node_tag['tag_type_id'] = tag_type['tag_type_id'] + node_tag['value'] = value + + node_tag.sync() + self.object_ids = [node_tag['node_tag_id']] + + return node_tag['node_tag_id'] diff --git a/PLC/Methods/AddNodeToPCU.py b/PLC/Methods/AddNodeToPCU.py new file mode 100644 index 0000000..72ed8a5 --- /dev/null +++ b/PLC/Methods/AddNodeToPCU.py @@ -0,0 +1,74 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.PCUs import PCU, PCUs +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +class AddNodeToPCU(Method): + """ + Adds a node to a port on a PCU. Faults if the node has already + been added to the PCU or if the port is already in use. + + Non-admins may only update PCUs at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + PCU.fields['pcu_id'], + Parameter(int, 'PCU port number') + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname, pcu_id, port): + # Get node + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node" + + # Get PCU + pcus = PCUs(self.api, [pcu_id]) + if not pcus: + raise PLCInvalidArgument, "No such PCU" + pcu = pcus[0] + + if 'admin' not in self.caller['roles']: + ok = False + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + if pcu['pcu_id'] in site['pcu_ids']: + ok = True + break + if not ok: + raise PLCPermissionDenied, "Not allowed to update that PCU" + + # Add node to PCU + if node['node_id'] in pcu['node_ids']: + raise PLCInvalidArgument, "Node already controlled by PCU" + + if node['site_id'] != pcu['site_id']: + raise PLCInvalidArgument, "Node is at a different site than this PCU" + + if port in pcu['ports']: + raise PLCInvalidArgument, "PCU port already in use" + + pcu.add_node(node, port) + + # Logging variables + self.event_objects = {'Node': [node['node_id']], + 'PCU': [pcu['pcu_id']]} + self.message = 'Node %d added to pcu %d on port %d' % \ + (node['node_id'], pcu['pcu_id'], port) + return 1 diff --git a/PLC/Methods/AddNodeType.py b/PLC/Methods/AddNodeType.py new file mode 100644 index 0000000..d06b7cb --- /dev/null +++ b/PLC/Methods/AddNodeType.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NodeTypes import NodeType, NodeTypes +from PLC.Auth import Auth + +class AddNodeType(Method): + """ + Adds a new node node type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NodeType.fields['node_type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + node_type = NodeType(self.api) + node_type['node_type'] = name + node_type.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddPCU.py b/PLC/Methods/AddPCU.py new file mode 100644 index 0000000..9937c70 --- /dev/null +++ b/PLC/Methods/AddPCU.py @@ -0,0 +1,61 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUs import PCU, PCUs +from PLC.Auth import Auth +from PLC.Sites import Site, Sites + +can_update = lambda (field, value): field in \ + ['ip', 'hostname', 'protocol', + 'username', 'password', + 'model', 'notes'] + +class AddPCU(Method): + """ + Adds a new power control unit (PCU) to the specified site. Any + fields specified in pcu_fields are used, otherwise defaults are + used. + + PIs and technical contacts may only add PCUs to their own sites. + + Returns the new pcu_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + pcu_fields = dict(filter(can_update, PCU.fields.items())) + + accepts = [ + Auth(), + Mixed(Site.fields['site_id'], + Site.fields['login_base']), + pcu_fields + ] + + returns = Parameter(int, 'New pcu_id (> 0) if successful') + + + def call(self, auth, site_id_or_login_base, pcu_fields): + pcu_fields = dict(filter(can_update, pcu_fields.items())) + + # Get associated site details + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if 'admin' not in self.caller['roles']: + if site['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to add a PCU to that site" + + pcu = PCU(self.api, pcu_fields) + pcu['site_id'] = site['site_id'] + pcu.sync() + + # Logging variables + self.event_objects = {'Site': [site['site_id']], + 'PCU': [pcu['pcu_id']]} + self.message = 'PCU %d added site %s' % \ + (pcu['pcu_id'], site['site_id']) + + return pcu['pcu_id'] diff --git a/PLC/Methods/AddPCUProtocolType.py b/PLC/Methods/AddPCUProtocolType.py new file mode 100644 index 0000000..870144e --- /dev/null +++ b/PLC/Methods/AddPCUProtocolType.py @@ -0,0 +1,55 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUProtocolTypes import PCUProtocolType, PCUProtocolTypes +from PLC.PCUTypes import PCUType, PCUTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['pcu_type_id', 'port', 'protocol', 'supported'] + +class AddPCUProtocolType(Method): + """ + Adds a new pcu protocol type. + + Returns the new pcu_protocol_type_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + protocol_type_fields = dict(filter(can_update, PCUProtocolType.fields.items())) + + accepts = [ + Auth(), + Mixed(PCUType.fields['pcu_type_id'], + PCUType.fields['model']), + protocol_type_fields + ] + + returns = Parameter(int, 'New pcu_protocol_type_id (> 0) if successful') + + def call(self, auth, pcu_type_id_or_model, protocol_type_fields): + + # Check if pcu type exists + pcu_types = PCUTypes(self.api, [pcu_type_id_or_model]) + if not pcu_types: + raise PLCInvalidArgument, "No such pcu type" + pcu_type = pcu_types[0] + + + # Check if this port is already used + if 'port' not in protocol_type_fields: + raise PLCInvalidArgument, "Must specify a port" + else: + protocol_types = PCUProtocolTypes(self.api, {'pcu_type_id': pcu_type['pcu_type_id']}) + for protocol_type in protocol_types: + if protocol_type['port'] == protocol_type_fields['port']: + raise PLCInvalidArgument, "Port alreay in use" + + protocol_type_fields = dict(filter(can_update, protocol_type_fields.items())) + protocol_type = PCUProtocolType(self.api, protocol_type_fields) + protocol_type['pcu_type_id'] = pcu_type['pcu_type_id'] + protocol_type.sync() + self.event_object = {'PCUProtocolType': [protocol_type['pcu_protocol_type_id']]} + + return protocol_type['pcu_protocol_type_id'] diff --git a/PLC/Methods/AddPCUType.py b/PLC/Methods/AddPCUType.py new file mode 100644 index 0000000..2c8fbe5 --- /dev/null +++ b/PLC/Methods/AddPCUType.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUTypes import PCUType, PCUTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['model', 'name'] + +class AddPCUType(Method): + """ + Adds a new pcu type. + + Returns the new pcu_type_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + pcu_type_fields = dict(filter(can_update, PCUType.fields.items())) + + accepts = [ + Auth(), + pcu_type_fields + ] + + returns = Parameter(int, 'New pcu_type_id (> 0) if successful') + + + def call(self, auth, pcu_type_fields): + pcu_type_fields = dict(filter(can_update, pcu_type_fields.items())) + pcu_type = PCUType(self.api, pcu_type_fields) + pcu_type.sync() + self.event_object = {'PCUType': [pcu_type['pcu_type_id']]} + + return pcu_type['pcu_type_id'] diff --git a/PLC/Methods/AddPeer.py b/PLC/Methods/AddPeer.py new file mode 100644 index 0000000..868ce11 --- /dev/null +++ b/PLC/Methods/AddPeer.py @@ -0,0 +1,36 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Peers import Peer, Peers + +can_update = lambda (field, value): field in \ + ['peername', 'peer_url', 'key', 'cacert', 'shortname', 'hrn_root'] + +class AddPeer(Method): + """ + Adds a new peer. + + Returns the new peer_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + peer_fields = dict(filter(can_update, Peer.fields.items())) + + accepts = [ + Auth(), + peer_fields + ] + + returns = Parameter(int, "New peer_id (> 0) if successful") + + def call(self, auth, peer_fields): + peer = Peer(self.api, peer_fields); + peer.sync() + self.event_objects = {'Peer': [peer['peer_id']]} + + return peer['peer_id'] diff --git a/PLC/Methods/AddPerson.py b/PLC/Methods/AddPerson.py new file mode 100644 index 0000000..4ced974 --- /dev/null +++ b/PLC/Methods/AddPerson.py @@ -0,0 +1,84 @@ +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row +from PLC.Persons import Person, Persons +from PLC.TagTypes import TagTypes +from PLC.PersonTags import PersonTags, PersonTag + +can_update = ['first_name', 'last_name', 'title', + 'email', 'password', 'phone', 'url', 'bio'] + +required=['email','first_name','last_name'] + +class AddPerson(Method): + """ + Adds a new account. Any fields specified in person_fields are + used, otherwise defaults are used. + + Accounts are disabled by default. To enable an account, use + UpdatePerson(). + + Returns the new person_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepted_fields = Row.accepted_fields(can_update,Person.fields) + accepted_fields.update(Person.tags) + + accepts = [ + Auth(), + accepted_fields + ] + + returns = Parameter(int, 'New person_id (> 0) if successful') + + def call(self, auth, person_fields): + + # silently ignore 'enabled' if passed, for backward compat + # this is forced to False below anyways + if 'enabled' in person_fields: del person_fields['enabled'] + + [native,tags,rejected]=Row.split_fields(person_fields,[Person.fields,Person.tags]) + + # type checking + native = Row.check_fields(native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot add Person with column(s) %r"%rejected + + missing=[ r for r in required if r not in native ] + if missing: + raise PLCInvalidArgument, "Missing mandatory arguments %s to AddPerson"%missing + + # handle native fields + native['enabled'] = False + person = Person(self.api, native) + person.sync() + + # handle tags + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + tag_types = TagTypes(self.api,{'tagname':tagname}) + if not tag_types: + raise PLCInvalidArgument,"No such TagType %s"%tagname + tag_type = tag_types[0] + person_tags=PersonTags(self.api,{'tagname':tagname,'person_id':person['person_id']}) + if not person_tags: + person_tag = PersonTag(self.api) + person_tag['person_id'] = person['person_id'] + person_tag['tag_type_id'] = tag_type['tag_type_id'] + person_tag['tagname'] = tagname + person_tag['value'] = value + person_tag.sync() + else: + person_tag = person_tags[0] + person_tag['value'] = value + person_tag.sync() + + # Logging variables + self.event_objects = {'Person': [person['person_id']]} + self.message = 'Person %d added' % person['person_id'] + + return person['person_id'] diff --git a/PLC/Methods/AddPersonKey.py b/PLC/Methods/AddPersonKey.py new file mode 100644 index 0000000..af70185 --- /dev/null +++ b/PLC/Methods/AddPersonKey.py @@ -0,0 +1,59 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Keys import Key, Keys +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +can_update = lambda (field, value): field in ['key_type','key'] + +class AddPersonKey(Method): + """ + Adds a new key to the specified account. + + Non-admins can only modify their own keys. + + Returns the new key_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + key_fields = dict(filter(can_update, Key.fields.items())) + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + key_fields + ] + + returns = Parameter(int, 'New key_id (> 0) if successful') + + def call(self, auth, person_id_or_email, key_fields): + key_fields = dict(filter(can_update, key_fields.items())) + + # Get account details + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # If we are not admin, make sure caller is adding a key to their account + if 'admin' not in self.caller['roles']: + if person['person_id'] != self.caller['person_id']: + raise PLCPermissionDenied, "You may only modify your own keys" + + key = Key(self.api, key_fields) + key.sync(commit = False) + person.add_key(key, commit = True) + + # Logging variables + self.event_objects = {'Person': [person['person_id']], + 'Key': [key['key_id']]} + self.message = 'Key %d added to person %d' % \ + (key['key_id'], person['person_id']) + + return key['key_id'] diff --git a/PLC/Methods/AddPersonTag.py b/PLC/Methods/AddPersonTag.py new file mode 100644 index 0000000..244b546 --- /dev/null +++ b/PLC/Methods/AddPersonTag.py @@ -0,0 +1,70 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Persons import Person, Persons +from PLC.TagTypes import TagType, TagTypes +from PLC.PersonTags import PersonTag, PersonTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddPersonTag(Method): + """ + Sets the specified setting for the specified person + to the specified value. + + Admins have full access. Non-admins can change their own tags. + + Returns the new person_tag_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + # no other way to refer to a person + PersonTag.fields['person_id'], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + PersonTag.fields['value'], + ] + + returns = Parameter(int, 'New person_tag_id (> 0) if successful') + + def call(self, auth, person_id, tag_type_id_or_name, value): + persons = Persons(self.api, [person_id]) + if not persons: + raise PLCInvalidArgument, "No such person %r"%person_id + person = persons[0] + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # checks for existence - does not allow several different settings + conflicts = PersonTags(self.api, {'person_id':person['person_id'], + 'tag_type_id':tag_type['tag_type_id']}) + + if len(conflicts) : + raise PLCInvalidArgument, "Person %d (%s) already has setting %d"% \ + (person['person_id'],person['email'], tag_type['tag_type_id']) + + # check authorizations + person.caller_may_write_tag (self.api,self.caller,tag_type) + + person_tag = PersonTag(self.api) + person_tag['person_id'] = person['person_id'] + person_tag['tag_type_id'] = tag_type['tag_type_id'] + person_tag['value'] = value + + person_tag.sync() + self.object_ids = [person_tag['person_tag_id']] + + return person_tag['person_tag_id'] diff --git a/PLC/Methods/AddPersonToSite.py b/PLC/Methods/AddPersonToSite.py new file mode 100644 index 0000000..9710f2b --- /dev/null +++ b/PLC/Methods/AddPersonToSite.py @@ -0,0 +1,89 @@ +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.PersonTags import PersonTags, PersonTag +from PLC.Namespace import email_to_hrn +from PLC.TagTypes import TagTypes + +from PLC.Logger import logger + +class AddPersonToSite(Method): + """ + Adds the specified person to the specified site. If the person is + already a member of the site, no errors are returned. Does not + change the person's primary site. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Mixed(Site.fields['site_id'], + Site.fields['login_base']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email, site_id_or_login_base): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Get site information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if site['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local site" + + if site['site_id'] not in person['site_ids']: + site.add_person(person) + + # Logging variables + self.event_objects = {'Site': [site['site_id']], + 'Person': [person['person_id']]} + self.message = 'Person %d added to site %d' % \ + (person['person_id'], site['site_id']) + + # maintain person's hrn + # only if at this point we have a single site + # which means, there was no site attached to person upon entering this call + try: + had_no_site= (len (person['site_ids']) == 0) + if had_no_site: + login_base = site['login_base'] + root_auth = self.api.config.PLC_HRN_ROOT + hrn = email_to_hrn("%s.%s"%(root_auth,login_base),person['email']) + tagname = 'hrn' + tag_type = TagTypes(self.api,{'tagname':tagname})[0] + person_tags = PersonTags(self.api,{'tagname':tagname,'person_id':person['person_id']}) + if not person_tags: + person_tag = PersonTag(self.api) + person_tag['person_id'] = person['person_id'] + person_tag['tag_type_id'] = tag_type['tag_type_id'] + person_tag['tagname'] = tagname + person_tag['value'] = hrn + person_tag.sync() + else: + person_tag = person_tags[0] + person_tag['value'] = hrn + person_tag.sync() + except Exception as e: + logger.exception("ERROR cannot maintain person's hrn, {}" + .format(person_id_or_email)) + + return 1 diff --git a/PLC/Methods/AddPersonToSlice.py b/PLC/Methods/AddPersonToSlice.py new file mode 100644 index 0000000..41e6a6a --- /dev/null +++ b/PLC/Methods/AddPersonToSlice.py @@ -0,0 +1,61 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class AddPersonToSlice(Method): + """ + Adds the specified person to the specified slice. If the person is + already a member of the slice, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email, slice_id_or_name): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account %s"%person_id_or_email + person = persons[0] + + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %s"%slice_id_or_name + slice = slices[0] + + # N.B. Allow foreign users to be added to local slices and + # local users to be added to foreign slices (and, of course, + # local users to be added to local slices). + if person['peer_id'] is not None and slice['peer_id'] is not None: + raise PLCInvalidArgument, "Cannot add foreign users to foreign slices" + + # If we are not admin, make sure the caller is a PI + # of the site associated with the slice + if 'admin' not in self.caller['roles']: + if slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to add users to slice %s"%slice_id_or_name + + if slice['slice_id'] not in person['slice_ids']: + slice.add_person(person) + + # Logging variables + self.event_objects = {'Person': [person['person_id']], + 'Slice': [slice['slice_id']]} + self.object_ids = [slice['slice_id']] + + return 1 diff --git a/PLC/Methods/AddRole.py b/PLC/Methods/AddRole.py new file mode 100644 index 0000000..d42858f --- /dev/null +++ b/PLC/Methods/AddRole.py @@ -0,0 +1,32 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Roles import Role, Roles +from PLC.Auth import Auth + +class AddRole(Method): + """ + Adds a new role. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Role.fields['role_id'], + Role.fields['name'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, role_id, name): + role = Role(self.api) + role['role_id'] = role_id + role['name'] = name + role.sync(insert = True) + self.event_objects = {'Role': [role['role_id']]} + + return 1 diff --git a/PLC/Methods/AddRoleToPerson.py b/PLC/Methods/AddRoleToPerson.py new file mode 100644 index 0000000..1e47033 --- /dev/null +++ b/PLC/Methods/AddRoleToPerson.py @@ -0,0 +1,66 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Auth import Auth +from PLC.Roles import Role, Roles + +class AddRoleToPerson(Method): + """ + Grants the specified role to the person. + + PIs can only grant the tech and user roles to users and techs at + their sites. Admins can grant any role to any user. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(Role.fields['role_id'], + Role.fields['name']), + Mixed(Person.fields['person_id'], + Person.fields['email']), + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, role_id_or_name, person_id_or_email): + # Get role + roles = Roles(self.api, [role_id_or_name]) + if not roles: + raise PLCInvalidArgument, "Invalid role '%s'" % unicode(role_id_or_name) + role = roles[0] + + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Authenticated function + assert self.caller is not None + + # Check if we can update this account + if not self.caller.can_update(person): + raise PLCPermissionDenied, "Not allowed to update specified account" + + # Can only grant lesser (higher) roles to others + if 'admin' not in self.caller['roles'] and \ + role['role_id'] <= min(self.caller['role_ids']): + raise PLCInvalidArgument, "Not allowed to grant that role" + + if role['role_id'] not in person['role_ids']: + person.add_role(role) + + self.event_objects = {'Person': [person['person_id']], + 'Role': [role['role_id']]} + self.message = "Role %d granted to person %d" % \ + (role['role_id'], person['person_id']) + + return 1 diff --git a/PLC/Methods/AddRoleToTagType.py b/PLC/Methods/AddRoleToTagType.py new file mode 100644 index 0000000..947adcd --- /dev/null +++ b/PLC/Methods/AddRoleToTagType.py @@ -0,0 +1,58 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Auth import Auth +from PLC.Parameter import Parameter, Mixed +from PLC.TagTypes import TagType, TagTypes +from PLC.Roles import Role, Roles + +class AddRoleToTagType(Method): + """ + Add the specified role to the tagtype so that + users with that role can tweak the tag. + + Only admins can call this method + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Role.fields['role_id'], + Role.fields['name']), + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, role_id_or_name, tag_type_id_or_tagname): + # Get role + roles = Roles(self.api, [role_id_or_name]) + if not roles: + raise PLCInvalidArgument, "Invalid role '%s'" % unicode(role_id_or_name) + role = roles[0] + + # Get subject tag type + tag_types = TagTypes(self.api, [tag_type_id_or_tagname]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type" + tag_type = tag_types[0] + + # Authenticated function + assert self.caller is not None + + # Only admins + if 'admin' not in self.caller['roles']: + raise PLCInvalidArgument, "Not allowed to grant that role" + + if role['role_id'] not in tag_type['role_ids']: + tag_type.add_role(role) + + self.event_objects = {'TagType': [tag_type['tag_type_id']], + 'Role': [role['role_id']]} + self.message = "Role %d added to tag_type %d" % \ + (role['role_id'], tag_type['tag_type_id']) + + return 1 diff --git a/PLC/Methods/AddSession.py b/PLC/Methods/AddSession.py new file mode 100644 index 0000000..32da201 --- /dev/null +++ b/PLC/Methods/AddSession.py @@ -0,0 +1,37 @@ +import time + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Sessions import Session, Sessions +from PLC.Persons import Person, Persons + +class AddSession(Method): + """ + Creates and returns a new session key for the specified user. + (Used for website 'user sudo') + """ + + roles = ['admin'] + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']) + ] + returns = Session.fields['session_id'] + + + def call(self, auth, person_id_or_email): + + persons = Persons(self.api, [person_id_or_email], ['person_id', 'email']) + + if not persons: + raise PLCInvalidArgument, "No such person" + + person = persons[0] + session = Session(self.api) + session['expires'] = int(time.time()) + (24 * 60 * 60) + session.sync(commit = False) + session.add_person(person, commit = True) + + return session['session_id'] diff --git a/PLC/Methods/AddSite.py b/PLC/Methods/AddSite.py new file mode 100644 index 0000000..2150a91 --- /dev/null +++ b/PLC/Methods/AddSite.py @@ -0,0 +1,50 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +from PLC.Methods.AddSiteTag import AddSiteTag + +can_update = lambda (field, value): field in \ + ['name', 'abbreviated_name', 'login_base', + 'is_public', 'latitude', 'longitude', 'url', + 'max_slices', 'max_slivers', 'enabled', 'ext_consortium_id'] + +class AddSite(Method): + """ + Adds a new site, and creates a node group for that site. Any + fields specified in site_fields are used, otherwise defaults are + used. + + Returns the new site_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin'] + + site_fields = dict(filter(can_update, Site.fields.items())) + + accepts = [ + Auth(), + site_fields + ] + + returns = Parameter(int, 'New site_id (> 0) if successful') + + def call(self, auth, site_fields): + site_fields = dict(filter(can_update, site_fields.items())) + site = Site(self.api, site_fields) + site.sync() + + # Logging variables + self.event_objects = {'Site': [site['site_id']]} + self.message = 'Site %d created' % site['site_id'] + + # Set Site HRN + root_auth = self.api.config.PLC_HRN_ROOT + tagname = 'hrn' + tagvalue = '.'.join([root_auth, site['login_base']]) + AddSiteTag(self.api).__call__(auth,site['site_id'],tagname,tagvalue) + + + return site['site_id'] diff --git a/PLC/Methods/AddSiteAddress.py b/PLC/Methods/AddSiteAddress.py new file mode 100644 index 0000000..5514805 --- /dev/null +++ b/PLC/Methods/AddSiteAddress.py @@ -0,0 +1,58 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth +from PLC.Sites import Site, Sites + +can_update = lambda (field, value): field in \ + ['line1', 'line2', 'line3', + 'city', 'state', 'postalcode', 'country'] + +class AddSiteAddress(Method): + """ + Adds a new address to a site. Fields specified in + address_fields are used; some are not optional. + + PIs may only add addresses to their own sites. + + Returns the new address_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + address_fields = dict(filter(can_update, Address.fields.items())) + + accepts = [ + Auth(), + Mixed(Site.fields['site_id'], + Site.fields['login_base']), + address_fields + ] + + returns = Parameter(int, 'New address_id (> 0) if successful') + + def call(self, auth, site_id_or_login_base, address_fields): + address_fields = dict(filter(can_update, address_fields.items())) + + # Get associated site details + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if 'admin' not in self.caller['roles']: + if site['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Address must be associated with one of your sites" + + address = Address(self.api, address_fields) + address.sync(commit = False) + site.add_address(address, commit = True) + + # Logging variables + self.event_objects = {'Site': [site['site_id']], + 'Address': [address['address_id']]} + self.message = 'Address %d assigned to Site %d' % \ + (address['address_id'], site['site_id']) + + return address['address_id'] diff --git a/PLC/Methods/AddSiteTag.py b/PLC/Methods/AddSiteTag.py new file mode 100644 index 0000000..bb5cfae --- /dev/null +++ b/PLC/Methods/AddSiteTag.py @@ -0,0 +1,74 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Site, Sites +from PLC.Nodes import Nodes +from PLC.TagTypes import TagType, TagTypes +from PLC.SiteTags import SiteTag, SiteTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddSiteTag(Method): + """ + Sets the specified setting for the specified site + to the specified value. + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns the new site_tag_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + # no other way to refer to a site + SiteTag.fields['site_id'], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + SiteTag.fields['value'], + ] + + returns = Parameter(int, 'New site_tag_id (> 0) if successful') + + def call(self, auth, site_id, tag_type_id_or_name, value): + sites = Sites(self.api, [site_id]) + if not sites: + raise PLCInvalidArgument, "No such site %r"%site_id + site = sites[0] + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # checks for existence - does not allow several different settings + conflicts = SiteTags(self.api, + {'site_id':site['site_id'], + 'tag_type_id':tag_type['tag_type_id']}) + + if len(conflicts) : + raise PLCInvalidArgument, "Site %d already has setting %d"%(site['site_id'], + tag_type['tag_type_id']) + + # check authorizations + site.caller_may_write_tag(self.api,self.caller,tag_type) + + site_tag = SiteTag(self.api) + site_tag['site_id'] = site['site_id'] + site_tag['tag_type_id'] = tag_type['tag_type_id'] + site_tag['value'] = value + + site_tag.sync() + self.object_ids = [site_tag['site_tag_id']] + + return site_tag['site_tag_id'] diff --git a/PLC/Methods/AddSlice.py b/PLC/Methods/AddSlice.py new file mode 100644 index 0000000..7542cf0 --- /dev/null +++ b/PLC/Methods/AddSlice.py @@ -0,0 +1,119 @@ +import re + +from PLC.Faults import * +from PLC.Auth import Auth +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row + +from PLC.Slices import Slice, Slices +from PLC.Sites import Site, Sites +from PLC.TagTypes import TagTypes +from PLC.SliceTags import SliceTags +from PLC.Methods.AddSliceTag import AddSliceTag +from PLC.Methods.UpdateSliceTag import UpdateSliceTag + +from PLC.Logger import logger + +can_update = ['name', 'instantiation', 'url', 'description', 'max_nodes'] + +class AddSlice(Method): + """ + Adds a new slice. Any fields specified in slice_fields are used, + otherwise defaults are used. + + Valid slice names are lowercase and begin with the login_base + (slice prefix) of a valid site, followed by a single + underscore. Thereafter, only letters, numbers, or additional + underscores may be used. + + PIs may only add slices associated with their own sites (i.e., + slice prefixes must always be the login_base of one of their + sites). + + Returns the new slice_id (> 0) if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepted_fields = Row.accepted_fields(can_update, Slice.fields) + accepted_fields.update(Slice.tags) + + accepts = [ + Auth(), + accepted_fields + ] + + returns = Parameter(int, 'New slice_id (> 0) if successful') + + def call(self, auth, slice_fields): + + [native,tags,rejected]=Row.split_fields(slice_fields,[Slice.fields,Slice.tags]) + + # type checking + native = Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot add Slice with column(s) %r"%rejected + + # Authenticated function + assert self.caller is not None + + # 1. Lowercase. + # 2. Begins with login_base (letters or numbers). + # 3. Then single underscore after login_base. + # 4. Then letters, numbers, or underscores. + name = slice_fields['name'] + good_name = r'^[a-z0-9\.]+_[a-zA-Z0-9_\.]+$' + if not name or \ + not re.match(good_name, name): + raise PLCInvalidArgument, "Invalid slice name" + + # Get associated site details + login_base = name.split("_")[0] + sites = Sites(self.api, [login_base]) + if not sites: + raise PLCInvalidArgument, "Invalid slice prefix %s in %s"%(login_base,name) + site = sites[0] + + if 'admin' not in self.caller['roles']: + if site['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Slice prefix %s must match one of your sites' login_base"%login_base + + if len(site['slice_ids']) >= site['max_slices']: + raise PLCInvalidArgument, \ + "Site %s has reached (%d) its maximum allowable slice count (%d)"%(site['name'], + len(site['slice_ids']), + site['max_slices']) + if not site['enabled']: + raise PLCInvalidArgument, "Site %s is disabled and can cannot create slices" % (site['name']) + + slice = Slice(self.api, native) + slice['creator_person_id'] = self.caller['person_id'] + slice['site_id'] = site['site_id'] + slice.sync() + + # Set Slice HRN + root_auth = self.api.config.PLC_HRN_ROOT + tags['hrn'] = '.'.join([root_auth, login_base, name.split("_")[1]]) + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + if not TagTypes(self.api,{'tagname':tagname}): + raise PLCInvalidArgument,"No such TagType %s"%tagname + slice_tags=SliceTags(self.api,{'tagname':tagname,'slice_id':slice['slice_id']}) + if not slice_tags: + AddSliceTag(self.api).__call__(auth,slice['slice_id'],tagname,value) + else: + UpdateSliceTag(self.api).__call__(auth,slice_tags[0]['slice_tag_id'],value) + + # take PLC_VSYS_DEFAULTS into account for convenience + try: + values= [ y for y in [ x.strip() for x in self.api.config.PLC_VSYS_DEFAULTS.split(',') ] if y ] + for value in values: + AddSliceTag(self.api).__call__(auth,slice['slice_id'],'vsys',value) + except: + logger.exception("Could not set vsys tags as configured in PLC_VSYS_DEFAULTS") + self.event_objects = {'Slice': [slice['slice_id']]} + self.message = "Slice %d created" % slice['slice_id'] + + return slice['slice_id'] diff --git a/PLC/Methods/AddSliceInstantiation.py b/PLC/Methods/AddSliceInstantiation.py new file mode 100644 index 0000000..0374957 --- /dev/null +++ b/PLC/Methods/AddSliceInstantiation.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.SliceInstantiations import SliceInstantiation, SliceInstantiations +from PLC.Auth import Auth + +class AddSliceInstantiation(Method): + """ + Adds a new slice instantiation state. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + SliceInstantiation.fields['instantiation'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + slice_instantiation = SliceInstantiation(self.api) + slice_instantiation['instantiation'] = name + slice_instantiation.sync(insert = True) + + return 1 diff --git a/PLC/Methods/AddSliceTag.py b/PLC/Methods/AddSliceTag.py new file mode 100644 index 0000000..ef78fa6 --- /dev/null +++ b/PLC/Methods/AddSliceTag.py @@ -0,0 +1,138 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.TagTypes import TagType, TagTypes +from PLC.Slices import Slice, Slices +from PLC.Nodes import Node, Nodes +from PLC.SliceTags import SliceTag, SliceTags +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.InitScripts import InitScript, InitScripts + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class AddSliceTag(Method): + """ + Sets the specified tag of the slice to the specified value. + If nodegroup is specified, this applies to all slivers of that group. + If node is specified, this only applies to a sliver. + + Admins have full access, including on nodegroups. + + Non-admins need to have at least one of the roles + attached to the tagtype. In addition: + (*) Users may only set tags of slices or slivers of which they are members. + (*) PIs may only set tags of slices in their site + (*) techs cannot use this method + + Returns the new slice_tag_id (> 0) if successful, faults + otherwise. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + Mixed(SliceTag.fields['tag_type_id'], + SliceTag.fields['tagname']), + Mixed(SliceTag.fields['value'], + InitScript.fields['name']), + Mixed(Node.fields['node_id'], + Node.fields['hostname'], + None), + Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname']) + ] + + returns = Parameter(int, 'New slice_tag_id (> 0) if successful') + + def call(self, auth, slice_id_or_name, tag_type_id_or_name, value, node_id_or_hostname = None, nodegroup_id_or_name = None): + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name + slice = slices[0] + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_name + tag_type = tag_types[0] + + # check authorizations + slice.caller_may_write_tag (self.api,self.caller,tag_type,node_id_or_hostname,nodegroup_id_or_name) + + # if initscript is specified, validate value + if tag_type['tagname'] in ['initscript']: + initscripts = InitScripts(self.api, {'enabled': True, 'name': value}) + if not initscripts: + raise PLCInvalidArgument, "No such plc initscript %r"%value + + slice_tag = SliceTag(self.api) + slice_tag['slice_id'] = slice['slice_id'] + slice_tag['tag_type_id'] = tag_type['tag_type_id'] + slice_tag['value'] = unicode(value) + + # Sliver attribute if node is specified + if node_id_or_hostname is not None or isinstance(self.caller, Node): + node_id = None + if isinstance(self.caller, Node): + node = self.caller + node_id = node['node_id'] + + if node_id_or_hostname is not None: + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + if node_id <> None and node_id <> node['node_id']: + raise PLCPermissionDenied, "Not allowed to set another node's sliver attribute" + else: + node_id = node['node_id'] + + system_slice_tags = SliceTags(self.api, {'tagname': 'system', 'value': '1'}).dict('slice_id') + system_slice_ids = system_slice_tags.keys() + if slice['slice_id'] not in system_slice_ids and node_id not in slice['node_ids']: + raise PLCInvalidArgument, "AddSliceTag: slice %s not on specified node %s nor is it a system slice (%r)"%\ + (slice['name'],node['hostname'],system_slice_ids) + slice_tag['node_id'] = node['node_id'] + + # Sliver attribute shared accross nodes if nodegroup is sepcified + if nodegroup_id_or_name is not None: + if isinstance(self.caller, Node): + raise PLCPermissionDenied, "Not allowed to set nodegroup slice attributes" + + nodegroups = NodeGroups(self.api, [nodegroup_id_or_name]) + if not nodegroups: + raise PLCInvalidArgument, "No such nodegroup %r"%nodegroup_id_or_name + nodegroup = nodegroups[0] + + slice_tag['nodegroup_id'] = nodegroup['nodegroup_id'] + + # Check if slice attribute already exists + slice_tags_check = SliceTags(self.api, {'slice_id': slice['slice_id'], + 'tagname': tag_type['tagname'], + 'value': value}) + for slice_tag_check in slice_tags_check: + # do not compare between slice tag and sliver tag + if 'node_id' not in slice_tag and slice_tag_check['node_id'] is not None: + continue + # do not compare between sliver tag and slice tag + if 'node_id' in slice_tag and slice_tag['node_id'] is not None and slice_tag_check['node_id'] is None: + continue + if 'node_id' in slice_tag and slice_tag['node_id'] == slice_tag_check['node_id']: + raise PLCInvalidArgument, "Sliver attribute already exists" + if 'nodegroup_id' in slice_tag and slice_tag['nodegroup_id'] == slice_tag_check['nodegroup_id']: + raise PLCInvalidArgument, "Slice attribute already exists for this nodegroup" + if node_id_or_hostname is None and nodegroup_id_or_name is None: + raise PLCInvalidArgument, "Slice attribute already exists" + + slice_tag.sync() + self.event_objects = {'SliceTag': [slice_tag['slice_tag_id']]} + + return slice_tag['slice_tag_id'] diff --git a/PLC/Methods/AddSliceToNodes.py b/PLC/Methods/AddSliceToNodes.py new file mode 100644 index 0000000..db074ca --- /dev/null +++ b/PLC/Methods/AddSliceToNodes.py @@ -0,0 +1,72 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +class AddSliceToNodes(Method): + """ + Adds the specified slice to the specified nodes. Nodes may be + either local or foreign nodes. + + If the slice is already associated with a node, no errors are + returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + [Mixed(Node.fields['node_id'], + Node.fields['hostname'])] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name, node_id_or_hostname_list): + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + if 'admin' not in self.caller['roles']: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied, "Not a member of the specified slice" + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Specified slice not associated with any of your sites" + + # Get specified nodes, add them to the slice + nodes = Nodes(self.api, node_id_or_hostname_list, + ['node_id', 'hostname', 'slice_ids', 'slice_ids_whitelist', 'site_id']) + + for node in nodes: + # check the slice whitelist on each node first + # allow users at site to add node to slice, ignoring whitelist + if node['slice_ids_whitelist'] and \ + slice['slice_id'] not in node['slice_ids_whitelist'] and \ + not set(self.caller['site_ids']).intersection([node['site_id']]): + raise PLCInvalidArgument, "%s is not allowed on %s (not on the whitelist)" % \ + (slice['name'], node['hostname']) + if slice['slice_id'] not in node['slice_ids']: + slice.add_node(node, commit = False) + + slice.sync() + + nodeids = [node['node_id'] for node in nodes] + self.event_objects = {'Node': nodeids, + 'Slice': [slice['slice_id']]} + self.message = 'Slice %d added to nodes %s' % (slice['slice_id'], nodeids) + + return 1 diff --git a/PLC/Methods/AddSliceToNodesWhitelist.py b/PLC/Methods/AddSliceToNodesWhitelist.py new file mode 100644 index 0000000..4dde439 --- /dev/null +++ b/PLC/Methods/AddSliceToNodesWhitelist.py @@ -0,0 +1,54 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class AddSliceToNodesWhitelist(Method): + """ + Adds the specified slice to the whitelist on the specified nodes. Nodes may be + either local or foreign nodes. + + If the slice is already associated with a node, no errors are + returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + [Mixed(Node.fields['node_id'], + Node.fields['hostname'])] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name, node_id_or_hostname_list): + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + # Get specified nodes, add them to the slice + nodes = Nodes(self.api, node_id_or_hostname_list) + for node in nodes: + if node['peer_id'] is not None: + raise PLCInvalidArgument, "%s not a local node" % node['hostname'] + if slice['slice_id'] not in node['slice_ids_whitelist']: + slice.add_to_node_whitelist(node, commit = False) + + slice.sync() + + self.event_objects = {'Node': [node['node_id'] for node in nodes], + 'Slice': [slice['slice_id']]} + + return 1 diff --git a/PLC/Methods/AddTagType.py b/PLC/Methods/AddTagType.py new file mode 100644 index 0000000..6432e05 --- /dev/null +++ b/PLC/Methods/AddTagType.py @@ -0,0 +1,42 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.TagTypes import TagType, TagTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['tagname', 'description', 'category'] + +class AddTagType(Method): + """ + Adds a new type of node tag. + Any fields specified are used, otherwise defaults are used. + + Returns the new node_tag_id (> 0) if successful, + faults otherwise. + """ + + roles = ['admin'] + + tag_type_fields = dict(filter(can_update, TagType.fields.items())) + + accepts = [ + Auth(), + tag_type_fields + ] + + returns = Parameter(int, 'New node_tag_id (> 0) if successful') + + + def call(self, auth, tag_type_fields): + tag_type_fields = dict(filter(can_update, tag_type_fields.items())) + tag_type = TagType(self.api, tag_type_fields) + tag_type.sync() + + self.object_ids = [tag_type['tag_type_id']] + + return tag_type['tag_type_id'] diff --git a/PLC/Methods/AuthCheck.py b/PLC/Methods/AuthCheck.py new file mode 100644 index 0000000..0a4c260 --- /dev/null +++ b/PLC/Methods/AuthCheck.py @@ -0,0 +1,16 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth, BootAuth + +class AuthCheck(Method): + """ + Returns 1 if the user or node authenticated successfully, faults + otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + accepts = [Auth()] + returns = Parameter(int, '1 if successful') + + def call(self, auth): + return 1 diff --git a/PLC/Methods/BindObjectToPeer.py b/PLC/Methods/BindObjectToPeer.py new file mode 100644 index 0000000..ea16536 --- /dev/null +++ b/PLC/Methods/BindObjectToPeer.py @@ -0,0 +1,72 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Persons import Persons +from PLC.Sites import Sites +from PLC.Nodes import Nodes +from PLC.Slices import Slices +from PLC.Keys import Keys +from PLC.Peers import Peers +from PLC.Faults import * + +class BindObjectToPeer(Method): + """ + This method is a hopefully temporary hack to let the sfa correctly + attach the objects it creates to a remote peer object. This is + needed so that the sfa federation link can work in parallel with + RefreshPeer, as RefreshPeer depends on remote objects being + correctly marked. + + BindRemoteObjectToPeer is allowed to admins only. + """ + + roles = ['admin'] + + known_types = ['site','person','slice','node','key'] + types_doc = ",".join(["'%s'"%type for type in known_types]) + + accepts = [ + Auth(), + Parameter(str,"Object type, among "+types_doc), + Parameter(int,"object_id"), + Parameter(str,"peer shortname"), + Parameter(int,"remote object_id, set to 0 if unknown"), + ] + + returns = Parameter (int, '1 if successful') + + def locate_object (self, object_type, object_id): + # locate e.g. the Nodes symbol + class_obj = globals()[object_type.capitalize()+'s'] + id_name=object_type+'_id' + # invoke e.g. Nodes ({'node_id':node_id}) + objs=class_obj(self.api,{id_name:object_id}) + if len(objs) != 1: + raise PLCInvalidArgument,"Cannot locate object, type=%s id=%d"%\ + (type,object_id) + return objs[0] + + + def call(self, auth, object_type, object_id, shortname,remote_object_id): + + object_type = object_type.lower() + if object_type not in self.known_types: + raise PLCInvalidArgument, 'Unrecognized object type %s'%object_type + + peers=Peers(self.api,{'shortname':shortname.upper()}) + if len(peers) !=1: + raise PLCInvalidArgument, 'No such peer with shortname %s'%shortname + + peer=peers[0] + object = self.locate_object (object_type, object_id) + + # There is no need to continue if the object is already bound to this peer + if object['peer_id'] in [peer['peer_id']]: + return 1 + + adder_name = 'add_'+object_type + add_function = getattr(type(peer),adder_name) + add_function(peer,object,remote_object_id) + + return 1 diff --git a/PLC/Methods/BlacklistKey.py b/PLC/Methods/BlacklistKey.py new file mode 100644 index 0000000..2afd664 --- /dev/null +++ b/PLC/Methods/BlacklistKey.py @@ -0,0 +1,42 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Keys import Key, Keys +from PLC.Auth import Auth + +class BlacklistKey(Method): + """ + Blacklists a key, disassociating it and all others identical to it + from all accounts and preventing it from ever being added again. + + WARNING: Identical keys associated with other accounts with also + be blacklisted. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Key.fields['key_id'], + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, key_id): + # Get associated key details + keys = Keys(self.api, [key_id]) + if not keys: + raise PLCInvalidArgument, "No such key" + key = keys[0] + + # N.B.: Can blacklist any key, even foreign ones + + key.blacklist() + + # Logging variables + self.event_objects = {'Key': [key['key_id']]} + self.message = 'Key %d blacklisted' % key['key_id'] + + return 1 diff --git a/PLC/Methods/BootCheckAuthentication.py b/PLC/Methods/BootCheckAuthentication.py new file mode 100644 index 0000000..ea9b098 --- /dev/null +++ b/PLC/Methods/BootCheckAuthentication.py @@ -0,0 +1,8 @@ +from PLC.Methods.AuthCheck import AuthCheck + +class BootCheckAuthentication(AuthCheck): + """ + Deprecated. See AuthCheck. + """ + + status = "deprecated" diff --git a/PLC/Methods/BootGetNodeDetails.py b/PLC/Methods/BootGetNodeDetails.py new file mode 100644 index 0000000..afc7b0c --- /dev/null +++ b/PLC/Methods/BootGetNodeDetails.py @@ -0,0 +1,54 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import BootAuth +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.Sessions import Session, Sessions + +class BootGetNodeDetails(Method): + """ + Returns a set of details about the calling node, including a new + node session value. + """ + + roles = ['node'] + + accepts = [BootAuth()] + + returns = { + 'hostname': Node.fields['hostname'], + 'boot_state': Node.fields['boot_state'], + 'model': Node.fields['model'], + 'networks': [Interface.fields], + 'session': Session.fields['session_id'], + } + + def call(self, auth): + details = { + 'hostname': self.caller['hostname'], + 'boot_state': self.caller['boot_state'], + # XXX Boot Manager cannot unmarshal None + 'model': self.caller['model'] or "", + } + + # Generate a new session value + session = Session(self.api) + session.sync(commit = False) + session.add_node(self.caller, commit = True) + + details['session'] = session['session_id'] + + if self.caller['interface_ids']: + details['networks'] = Interfaces(self.api, self.caller['interface_ids']) + # XXX Boot Manager cannot unmarshal None + for network in details['networks']: + for field in network: + if network[field] is None: + if isinstance(network[field], (int, long)): + network[field] = -1 + else: + network[field] = "" + + self.message = "Node request boot_state (%s) and networks" % \ + (details['boot_state']) + return details diff --git a/PLC/Methods/BootNotifyOwners.py b/PLC/Methods/BootNotifyOwners.py new file mode 100644 index 0000000..7458b9d --- /dev/null +++ b/PLC/Methods/BootNotifyOwners.py @@ -0,0 +1,32 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth, BootAuth, SessionAuth +from PLC.Nodes import Node, Nodes +from PLC.Messages import Message, Messages + +from PLC.Boot import notify_owners + +class BootNotifyOwners(Method): + """ + Notify the owners of the node, and/or support about an event that + happened on the machine. + + Returns 1 if successful. + """ + + roles = ['node'] + + accepts = [ + Auth(), + Message.fields['message_id'], + Parameter(int, "Notify PIs"), + Parameter(int, "Notify technical contacts"), + Parameter(int, "Notify support") + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, message_id, include_pis, include_techs, include_support): + assert isinstance(self.caller, Node) + notify_owners(self, self.caller, message_id, include_pis, include_techs, include_support) + return 1 diff --git a/PLC/Methods/BootUpdateNode.py b/PLC/Methods/BootUpdateNode.py new file mode 100644 index 0000000..d7c85b3 --- /dev/null +++ b/PLC/Methods/BootUpdateNode.py @@ -0,0 +1,115 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth, BootAuth, SessionAuth +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.Timestamp import * + +can_update = lambda (field, value): field in \ + ['method', 'mac', 'gateway', 'network', + 'broadcast', 'netmask', 'dns1', 'dns2'] + +class BootUpdateNode(Method): + """ + Allows the calling node to update its own record. Only the primary + network can be updated, and the node IP cannot be changed. + + Returns 1 if updated successfully. + """ + + roles = ['node'] + + interface_fields = dict(filter(can_update, Interface.fields.items())) + + accepts = [ + Mixed(BootAuth(), SessionAuth()), + {'boot_state': Node.fields['boot_state'], + 'primary_network': interface_fields, + ### BEWARE that the expected formerly did not match the native Node field + # support both for now + 'ssh_rsa_key': Node.fields['ssh_rsa_key'], + 'ssh_host_key': Node.fields['ssh_rsa_key'], + }] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_fields): + + if not isinstance(self.caller, Node): + raise PLCInvalidArgument,"Caller is expected to be a node" + + node = self.caller + + # log this event only if a change occured + # otherwise the db gets spammed with meaningless entries + changed_fields = [] + # Update node state + if node_fields.has_key('boot_state'): + if node['boot_state'] != node_fields['boot_state']: changed_fields.append('boot_state') + node['boot_state'] = node_fields['boot_state'] + ### for legacy BootManager + if node_fields.has_key('ssh_host_key'): + if node['ssh_rsa_key'] != node_fields['ssh_host_key']: changed_fields.append('ssh_rsa_key') + node['ssh_rsa_key'] = node_fields['ssh_host_key'] + if node_fields.has_key('ssh_rsa_key'): + if node['ssh_rsa_key'] != node_fields['ssh_rsa_key']: changed_fields.append('ssh_rsa_key') + node['ssh_rsa_key'] = node_fields['ssh_rsa_key'] + + # Update primary interface state + if node_fields.has_key('primary_network'): + primary_network = node_fields['primary_network'] + + if 'interface_id' not in primary_network: + raise PLCInvalidArgument, "Interface not specified" + if primary_network['interface_id'] not in node['interface_ids']: + raise PLCInvalidArgument, "Interface not associated with calling node" + + interfaces = Interfaces(self.api, [primary_network['interface_id']]) + if not interfaces: + raise PLCInvalidArgument, "No such interface %r"%interface_id + interface = interfaces[0] + + if not interface['is_primary']: + raise PLCInvalidArgument, "Not the primary interface on record" + + interface_fields = dict(filter(can_update, primary_network.items())) + for field in interface_fields: + if interface[field] != primary_network[field] : changed_fields.append('Interface.'+field) + interface.update(interface_fields) + interface.sync(commit = False) + + current_time = int(time.time()) + + # ONLY UPDATE ONCE when the boot_state flag and ssh_rsa_key flag are NOT passed + if not node_fields.has_key('boot_state') and not node_fields.has_key('ssh_rsa_key'): + + # record times spent on and off line by comparing last_contact with previous value of last_boot + if node['last_boot'] and node['last_contact']: + # last_boot is when the machine last called this API function. + # last_contact is the last time NM or RLA pinged the API. + node['last_time_spent_online'] = node['last_contact'] - node['last_boot'] + node['last_time_spent_offline'] = current_time - Timestamp.cast_long(node['last_contact']) + + node.update_readonly_int('last_time_spent_online') + node.update_readonly_int('last_time_spent_offline') + changed_fields.append('last_time_spent_online') + changed_fields.append('last_time_spent_offline') + + # indicate that node has booted & contacted PLC. + node.update_last_contact() + node.update_last_boot() + + # if last_pcu_reboot is within 20 minutes of current_time, accept that the PCU is responsible + if node['last_pcu_reboot'] and Timestamp.cast_long(node['last_pcu_reboot']) >= current_time - 60*20: + node.update_last_pcu_confirmation(commit=False) + + node.sync(commit = True) + + if changed_fields: + self.message = "Boot updated: %s" % ", ".join(changed_fields) + self.event_objects = { 'Node' : [node['node_id']] } + + return 1 diff --git a/PLC/Methods/DeleteAddress.py b/PLC/Methods/DeleteAddress.py new file mode 100644 index 0000000..406965b --- /dev/null +++ b/PLC/Methods/DeleteAddress.py @@ -0,0 +1,43 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth + +class DeleteAddress(Method): + """ + Deletes an address. + + PIs may only delete addresses from their own sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Address.fields['address_id'], + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, address_id): + # Get associated address details + addresses = Addresses(self.api, address_id) + if not addresses: + raise PLCInvalidArgument, "No such address" + address = addresses[0] + + if 'admin' not in self.caller['roles']: + if address['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Address must be associated with one of your sites" + + address.delete() + + # Logging variables + self.event_objects = {'Address': [address['address_id']]} + self.message = 'Address %d deleted' % address['address_id'] + + return 1 diff --git a/PLC/Methods/DeleteAddressType.py b/PLC/Methods/DeleteAddressType.py new file mode 100644 index 0000000..ca6cadb --- /dev/null +++ b/PLC/Methods/DeleteAddressType.py @@ -0,0 +1,33 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Auth import Auth + +class DeleteAddressType(Method): + """ + Deletes an address type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(AddressType.fields['address_type_id'], + AddressType.fields['name']) + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, address_type_id_or_name): + address_types = AddressTypes(self.api, [address_type_id_or_name]) + if not address_types: + raise PLCInvalidArgument, "No such address type" + address_type = address_types[0] + address_type.delete() + self.event_objects = {'AddressType': [address_type['address_type_id']]} + + return 1 diff --git a/PLC/Methods/DeleteAddressTypeFromAddress.py b/PLC/Methods/DeleteAddressTypeFromAddress.py new file mode 100644 index 0000000..e12a5df --- /dev/null +++ b/PLC/Methods/DeleteAddressTypeFromAddress.py @@ -0,0 +1,48 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth + +class DeleteAddressTypeFromAddress(Method): + """ + Deletes an address type from the specified address. + + PIs may only update addresses of their own sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(AddressType.fields['address_type_id'], + AddressType.fields['name']), + Address.fields['address_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, address_type_id_or_name, address_id): + address_types = AddressTypes(self.api, [address_type_id_or_name]) + if not address_types: + raise PLCInvalidArgument, "No such address type" + address_type = address_types[0] + + addresses = Addresses(self.api, [address_id]) + if not addresses: + raise PLCInvalidArgument, "No such address" + address = addresses[0] + + if 'admin' not in self.caller['roles']: + if address['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Address must be associated with one of your sites" + + address.remove_address_type(address_type) + self.event_objects = {'Address' : [address['address_id']], + 'AddressType': [address_type['address_type_id']]} + + return 1 diff --git a/PLC/Methods/DeleteAllPeerEntries.py b/PLC/Methods/DeleteAllPeerEntries.py new file mode 100644 index 0000000..5499ba8 --- /dev/null +++ b/PLC/Methods/DeleteAllPeerEntries.py @@ -0,0 +1,105 @@ +# +# Thierry Parmentelat - INRIA +# +# utility to clear all entries from a peer +# initially duplicated from RefreshPeer +# + +import sys + +from PLC.Logger import logger +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Peers import Peer, Peers +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.KeyTypes import KeyType, KeyTypes +from PLC.Keys import Key, Keys +from PLC.BootStates import BootState, BootStates +from PLC.Nodes import Node, Nodes +from PLC.SliceInstantiations import SliceInstantiations +from PLC.Slices import Slice, Slices +from PLC.Roles import Role, Roles + +commit_mode = True + +dry_run = False +# debug +#dry_run = True + +########## helpers + +def message(to_print=None, verbose_only=False): + if verbose_only and not verbose: + return + logger.info(to_print) + + +def message_verbose(to_print=None, header='VERBOSE'): + message("{}> {}".format(header, to_print), verbose_only=True) + + +class DeleteAllPeerEntries(Method): + """ + This method is designed for situations where a federation link + is misbehaving and one wants to restart from a clean slate. + It is *not* designed for regular operations, but as a repairing + tool only. + + As the name suggests, clear all local entries that are marked as + belonging to peer peer_id - or peername + if verbose is True said entries are only printed + + Note that remote/foreign entities cannot be deleted + normally with the API + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Peer.fields['peer_id'], + Peer.fields['peername']), + ] + + returns = Parameter(int, "1 if successful") + + def call(self, auth, peer_id_or_peername): + + peer = Peers(self.api, [peer_id_or_peername])[0] + peer_id = peer['peer_id'] + peername = peer['peername'] + + logger.info("DeleteAllPeerEntries on peer {} = {}" + .format(peername, peer_id)) + for singular, plural in ( + (Slice, Slices), + (Key, Keys), + (Person, Persons), + (Node, Nodes), + (Site, Sites)): + classname = singular.__name__ + objs = plural(self.api, {'peer_id': peer_id}) + print("Found {len} {classname}s from peer {peername}" + .format(len=len(objs), + classname=classname, + peername=peername)) + if dry_run: + print("dry-run mode: skipping actual deletion") + else: + print("Deleting {classname}s".format(classname=classname)) + for obj in objs: + print '.', + sys.stdout.flush() + obj.delete(commit=commit_mode) + print + + # Update peer itself and commit + peer.sync(commit=True) + + return 1 diff --git a/PLC/Methods/DeleteBootState.py b/PLC/Methods/DeleteBootState.py new file mode 100644 index 0000000..1bea3db --- /dev/null +++ b/PLC/Methods/DeleteBootState.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.BootStates import BootState, BootStates +from PLC.Auth import Auth + +class DeleteBootState(Method): + """ + Deletes a node boot state. + + WARNING: This will cause the deletion of all nodes in this boot + state. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + BootState.fields['boot_state'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + boot_states = BootStates(self.api, [name]) + if not boot_states: + raise PLCInvalidArgument, "No such boot state" + boot_state = boot_states[0] + + boot_state.delete() + + return 1 diff --git a/PLC/Methods/DeleteConfFile.py b/PLC/Methods/DeleteConfFile.py new file mode 100644 index 0000000..360ccfb --- /dev/null +++ b/PLC/Methods/DeleteConfFile.py @@ -0,0 +1,33 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Auth import Auth + +class DeleteConfFile(Method): + """ + Returns an array of structs containing details about node + configuration files. If conf_file_ids is specified, only the + specified configuration files will be queried. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, conf_file_id): + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + + conf_file = conf_files[0] + conf_file.delete() + self.event_objects = {'ConfFile': [conf_file['conf_file_id']]} + + return 1 diff --git a/PLC/Methods/DeleteConfFileFromNode.py b/PLC/Methods/DeleteConfFileFromNode.py new file mode 100644 index 0000000..53c808c --- /dev/null +++ b/PLC/Methods/DeleteConfFileFromNode.py @@ -0,0 +1,48 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Nodes import Node, Nodes +from PLC.Auth import Auth + +class DeleteConfFileFromNode(Method): + """ + Deletes a configuration file from the specified node. If the node + is not linked to the configuration file, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'], + Mixed(Node.fields['node_id'], + Node.fields['hostname']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, conf_file_id, node_id_or_hostname): + # Get configuration file + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + conf_file = conf_files[0] + + # Get node + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + # Link configuration file to node + if node['node_id'] in conf_file['node_ids']: + conf_file.remove_node(node) + + # Log affected objects + self.event_objects = {'ConfFile': [conf_file_id], + 'Node': [node['node_id']]} + + return 1 diff --git a/PLC/Methods/DeleteConfFileFromNodeGroup.py b/PLC/Methods/DeleteConfFileFromNodeGroup.py new file mode 100644 index 0000000..243a1a1 --- /dev/null +++ b/PLC/Methods/DeleteConfFileFromNodeGroup.py @@ -0,0 +1,49 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.Auth import Auth + +class DeleteConfFileFromNodeGroup(Method): + """ + Deletes a configuration file from the specified nodegroup. If the nodegroup + is not linked to the configuration file, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'], + Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname']) + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, conf_file_id, nodegroup_id_or_name): + # Get configuration file + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + conf_file = conf_files[0] + + # Get nodegroup + nodegroups = NodeGroups(self.api, [nodegroup_id_or_name]) + if not nodegroups: + raise PLCInvalidArgument, "No such nodegroup" + nodegroup = nodegroups[0] + + # Link configuration file to nodegroup + if nodegroup['nodegroup_id'] in conf_file['nodegroup_ids']: + conf_file.remove_nodegroup(nodegroup) + + # Log affected objects + self.event_objects = {'ConfFile': [conf_file_id], + 'NodeGroup': [nodegroup['nodegroup_id']]} + + return 1 diff --git a/PLC/Methods/DeleteIlink.py b/PLC/Methods/DeleteIlink.py new file mode 100644 index 0000000..0d07dfe --- /dev/null +++ b/PLC/Methods/DeleteIlink.py @@ -0,0 +1,68 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Ilinks import Ilink, Ilinks +from PLC.Interfaces import Interface, Interfaces +from PLC.Nodes import Node, Nodes +from PLC.Sites import Site, Sites +from PLC.TagTypes import TagType, TagTypes + +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class DeleteIlink(Method): + """ + Deletes the specified ilink + + Attributes may require the caller to have a particular + role in order to be deleted, depending on the related tag type. + Admins may delete attributes of any slice or sliver. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Ilink.fields['ilink_id'] + ] + + returns = Parameter(int, '1 if successful') + + object_type = 'Interface' + + + def call(self, auth, ilink_id): + ilinks = Ilinks(self.api, [ilink_id]) + if not ilinks: + raise PLCInvalidArgument, "No such ilink %r"%ilink_id + ilink = ilinks[0] + + src_if=Interfaces(self.api,ilink['src_interface_id'])[0] + dst_if=Interfaces(self.api,ilink['dst_interface_id'])[0] + + tag_type_id = ilink['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + # check authorizations + if 'admin' in self.caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (self.api, self.caller, tag_type): + raise PLCPermissionDenied, "%s, forbidden tag %s"%(self.name,tag_type['tagname']) + elif AuthorizeHelpers.interface_belongs_to_person (self.api, src_if, self.caller): + pass + elif src_if_id != dst_if_id and AuthorizeHelpers.interface_belongs_to_person (self.api, dst_if, self.caller): + pass + else: + raise PLCPermissionDenied, "%s: you must own either the src or dst interface"%self.name + + ilink.delete() + self.object_ids = [ilink['src_interface_id'],ilink['dst_interface_id']] + + return 1 diff --git a/PLC/Methods/DeleteInitScript.py b/PLC/Methods/DeleteInitScript.py new file mode 100644 index 0000000..28f6558 --- /dev/null +++ b/PLC/Methods/DeleteInitScript.py @@ -0,0 +1,34 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.InitScripts import InitScript, InitScripts +from PLC.Auth import Auth + +class DeleteInitScript(Method): + """ + Deletes an existing initscript. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(InitScript.fields['initscript_id'], + InitScript.fields['name']), + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, initscript_id_or_name): + initscripts = InitScripts(self.api, [initscript_id_or_name]) + if not initscripts: + raise PLCInvalidArgument, "No such initscript" + + initscript = initscripts[0] + initscript.delete() + self.event_objects = {'InitScript': [initscript['initscript_id']]} + + return 1 diff --git a/PLC/Methods/DeleteInterface.py b/PLC/Methods/DeleteInterface.py new file mode 100644 index 0000000..229a0d7 --- /dev/null +++ b/PLC/Methods/DeleteInterface.py @@ -0,0 +1,57 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces + +class DeleteInterface(Method): + """ + Deletes an existing interface. + + Admins may delete any interface. PIs and techs may only delete + interface interfaces associated with nodes at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Interface.fields['interface_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, interface_id): + + # Get interface information + interfaces = Interfaces(self.api, [interface_id]) + if not interfaces: + raise PLCInvalidArgument, "No such interface %r"%interface_id + interface = interfaces[0] + + # Get node information + nodes = Nodes(self.api, [interface['node_id']]) + if not nodes: + raise PLCInvalidArgument, "No such node %r"%node_id + node = nodes[0] + + # Authenticated functino + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to delete this interface" + + interface.delete() + + # Logging variables + self.event_objects = {'Interface': [interface['interface_id']]} + self.message = "Interface %d deleted" % interface['interface_id'] + + return 1 diff --git a/PLC/Methods/DeleteInterfaceTag.py b/PLC/Methods/DeleteInterfaceTag.py new file mode 100644 index 0000000..5ef9635 --- /dev/null +++ b/PLC/Methods/DeleteInterfaceTag.py @@ -0,0 +1,58 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Sites +from PLC.Nodes import Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes +from PLC.InterfaceTags import InterfaceTag, InterfaceTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class DeleteInterfaceTag(Method): + """ + Deletes the specified interface setting + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepts = [ + Auth(), + InterfaceTag.fields['interface_tag_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, interface_tag_id): + interface_tags = InterfaceTags(self.api, [interface_tag_id]) + if not interface_tags: + raise PLCInvalidArgument, "No such interface tag %r"%interface_tag_id + interface_tag = interface_tags[0] + + tag_type_id = interface_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + interfaces = Interfaces (self.api, interface_tag['interface_id']) + if not interfaces: + raise PLCInvalidArgument, "No such interface %d"%interface_tag['interface_id'] + interface=interfaces[0] + + # check authorizations + interface.caller_may_write_tag(self.api,self.caller,tag_type) + + interface_tag.delete() + self.object_ids = [interface_tag['interface_tag_id']] + + return 1 diff --git a/PLC/Methods/DeleteKey.py b/PLC/Methods/DeleteKey.py new file mode 100644 index 0000000..51c40d4 --- /dev/null +++ b/PLC/Methods/DeleteKey.py @@ -0,0 +1,46 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Keys import Key, Keys +from PLC.Auth import Auth + +class DeleteKey(Method): + """ + Deletes a key. + + Non-admins may only delete their own keys. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + Key.fields['key_id'], + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, key_id): + # Get associated key details + keys = Keys(self.api, [key_id]) + if not keys: + raise PLCInvalidArgument, "No such key" + key = keys[0] + + if key['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local key" + + if 'admin' not in self.caller['roles']: + if key['key_id'] not in self.caller['key_ids']: + raise PLCPermissionDenied, "Key must be associated with your account" + + key.delete() + + # Logging variables + self.event_objects = {'Key': [key['key_id']]} + self.message = 'Key %d deleted' % key['key_id'] + + return 1 diff --git a/PLC/Methods/DeleteKeyType.py b/PLC/Methods/DeleteKeyType.py new file mode 100644 index 0000000..34d4339 --- /dev/null +++ b/PLC/Methods/DeleteKeyType.py @@ -0,0 +1,34 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.KeyTypes import KeyType, KeyTypes +from PLC.Auth import Auth + +class DeleteKeyType(Method): + """ + Deletes a key type. + + WARNING: This will cause the deletion of all keys of this type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + KeyType.fields['key_type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + key_types = KeyTypes(self.api, [name]) + if not key_types: + raise PLCInvalidArgument, "No such key type" + key_type = key_types[0] + + key_type.delete() + + return 1 diff --git a/PLC/Methods/DeleteLeases.py b/PLC/Methods/DeleteLeases.py new file mode 100644 index 0000000..fef501b --- /dev/null +++ b/PLC/Methods/DeleteLeases.py @@ -0,0 +1,60 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Leases import Lease, Leases +from PLC.Slices import Slice, Slices + +class DeleteLeases(Method): + """ + Deletes a lease. + + Users may only delete leases attached to their slices. + PIs may delete any of the leases for slices at their sites, or any + slices of which they are members. Admins may delete any lease. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + Mixed(Lease.fields['lease_id'],[ Lease.fields['lease_id']]), + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, lease_ids): + # Get associated lease details + leases = Leases(self.api, lease_ids) + if len(leases) != len(lease_ids): + raise PLCInvalidArgument, "Could not find all leases %r"%lease_ids + + # fetch related slices + slices = Slices(self.api, [ lease['slice_id'] for lease in leases],['slice_id','person_ids']) + # create hash on slice_id + slice_map = dict ( [ (slice['slice_id'],slice) for slice in slices ] ) + + lease_ids=[lease['lease_id'] for lease in leases] + for lease in leases: + if 'admin' not in self.caller['roles']: + slice=slice_map[lease['slice_id']] + # check slices only once + if not slice.has_key('verified'): + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied, "Not a member of slice %r"%slice['name'] + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Slice %r not associated with any of your sites"%slice['name'] + slice['verified']=True + + lease.delete() + + # Logging variables + self.event_objects = {'Lease': lease_ids } + self.message = 'Leases %r deleted' % lease_ids + + return 1 diff --git a/PLC/Methods/DeleteMessage.py b/PLC/Methods/DeleteMessage.py new file mode 100644 index 0000000..daf7963 --- /dev/null +++ b/PLC/Methods/DeleteMessage.py @@ -0,0 +1,34 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Messages import Message, Messages +from PLC.Auth import Auth + +class DeleteMessage(Method): + """ + Deletes a message template. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Message.fields['message_id'], + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, message_id): + # Get message information + messages = Messages(self.api, [message_id]) + if not messages: + raise PLCInvalidArgument, "No such message" + message = messages[0] + + message.delete() + self.event_objects = {'Message': [message['message_id']]} + + return 1 diff --git a/PLC/Methods/DeleteNetworkMethod.py b/PLC/Methods/DeleteNetworkMethod.py new file mode 100644 index 0000000..b40442c --- /dev/null +++ b/PLC/Methods/DeleteNetworkMethod.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkMethods import NetworkMethod, NetworkMethods +from PLC.Auth import Auth + +class DeleteNetworkMethod(Method): + """ + Deletes a network method. + + WARNING: This will cause the deletion of all network interfaces + that use this method. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NetworkMethod.fields['method'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + network_methods = NetworkMethods(self.api, [name]) + if not network_methods: + raise PLCInvalidArgument, "No such network method" + network_method = network_methods[0] + + network_method.delete() + + return 1 diff --git a/PLC/Methods/DeleteNetworkType.py b/PLC/Methods/DeleteNetworkType.py new file mode 100644 index 0000000..1a0539e --- /dev/null +++ b/PLC/Methods/DeleteNetworkType.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkTypes import NetworkType, NetworkTypes +from PLC.Auth import Auth + +class DeleteNetworkType(Method): + """ + Deletes a network type. + + WARNING: This will cause the deletion of all network interfaces + that use this type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NetworkType.fields['type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + network_types = NetworkTypes(self.api, [name]) + if not network_types: + raise PLCInvalidArgument, "No such network type" + network_type = network_types[0] + + network_type.delete() + + return 1 diff --git a/PLC/Methods/DeleteNode.py b/PLC/Methods/DeleteNode.py new file mode 100644 index 0000000..a819b24 --- /dev/null +++ b/PLC/Methods/DeleteNode.py @@ -0,0 +1,55 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Nodes import Node, Nodes + +class DeleteNode(Method): + """ + Mark an existing node as deleted. + + PIs and techs may only delete nodes at their own sites. ins may + delete nodes at any site. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname): + # Get account information + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node" + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + # Authenticated function + assert self.caller is not None + + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to delete nodes from specified site" + + node_id=node['node_id'] + site_id=node['site_id'] + node.delete() + + # Logging variables + # it's not much use to attach to the node as it's going to vanish... + self.event_objects = {'Node': [node_id], 'Site': [site_id] } + self.message = "Node %d deleted" % node['node_id'] + + return 1 diff --git a/PLC/Methods/DeleteNodeFromPCU.py b/PLC/Methods/DeleteNodeFromPCU.py new file mode 100644 index 0000000..892d77f --- /dev/null +++ b/PLC/Methods/DeleteNodeFromPCU.py @@ -0,0 +1,65 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.PCUs import PCU, PCUs +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +class DeleteNodeFromPCU(Method): + """ + Deletes a node from a PCU. + + Non-admins may only update PCUs at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + PCU.fields['pcu_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname, pcu_id): + # Get node + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + + node = nodes[0] + + # Get PCU + pcus = PCUs(self.api, [pcu_id]) + if not pcus: + raise PLCInvalidArgument, "No such PCU" + + pcu = pcus[0] + + if 'admin' not in self.caller['roles']: + ok = False + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + if pcu['pcu_id'] in site['pcu_ids']: + ok = True + break + if not ok: + raise PLCPermissionDenied, "Not allowed to update that PCU" + + # Removed node from PCU + + if node['node_id'] in pcu['node_ids']: + pcu.remove_node(node) + + # Logging variables + self.event_objects = {'PCU': [pcu['pcu_id']], + 'Node': [node['node_id']]} + self.message = 'Node %d removed from PCU %d' % \ + (node['node_id'], pcu['pcu_id']) + + return 1 diff --git a/PLC/Methods/DeleteNodeGroup.py b/PLC/Methods/DeleteNodeGroup.py new file mode 100644 index 0000000..812af11 --- /dev/null +++ b/PLC/Methods/DeleteNodeGroup.py @@ -0,0 +1,41 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.NodeGroups import NodeGroup, NodeGroups + +class DeleteNodeGroup(Method): + """ + Delete an existing Node Group. + + ins may delete any node group + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname']) + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, node_group_id_or_name): + # Get account information + nodegroups = NodeGroups(self.api, [node_group_id_or_name]) + if not nodegroups: + raise PLCInvalidArgument, "No such node group" + + nodegroup = nodegroups[0] + + nodegroup.delete() + + # Logging variables + self.event_objects = {'NodeGroup': [nodegroup['nodegroup_id']]} + self.message = 'Node group %d deleted' % nodegroup['nodegroup_id'] + + return 1 diff --git a/PLC/Methods/DeleteNodeTag.py b/PLC/Methods/DeleteNodeTag.py new file mode 100644 index 0000000..2afa446 --- /dev/null +++ b/PLC/Methods/DeleteNodeTag.py @@ -0,0 +1,55 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Site, Sites +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagType, TagTypes +from PLC.NodeTags import NodeTag, NodeTags + +class DeleteNodeTag(Method): + """ + Deletes the specified node tag + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepts = [ + Auth(), + NodeTag.fields['node_tag_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_tag_id): + node_tags = NodeTags(self.api, [node_tag_id]) + if not node_tags: + raise PLCInvalidArgument, "No such node tag %r"%node_tag_id + node_tag = node_tags[0] + + tag_type_id = node_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + nodes = Nodes (self.api, node_tag['node_id']) + if not nodes: + raise PLCInvalidArgument, "No such node %d"%node_tag['node_id'] + node=nodes[0] + + # check authorizations + node.caller_may_write_tag(self.api,self.caller,tag_type) + + node_tag.delete() + self.object_ids = [node_tag['node_tag_id']] + + return 1 diff --git a/PLC/Methods/DeleteNodeType.py b/PLC/Methods/DeleteNodeType.py new file mode 100644 index 0000000..e6e9579 --- /dev/null +++ b/PLC/Methods/DeleteNodeType.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NodeTypes import NodeType, NodeTypes +from PLC.Auth import Auth + +class DeleteNodeType(Method): + """ + Deletes a node node type. + + WARNING: This will cause the deletion of all nodes in this boot + state. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + NodeType.fields['node_type'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, name): + node_types = NodeTypes(self.api, [name]) + if not node_types: + raise PLCInvalidArgument, "No such node type" + node_type = node_types[0] + + node_type.delete() + + return 1 diff --git a/PLC/Methods/DeletePCU.py b/PLC/Methods/DeletePCU.py new file mode 100644 index 0000000..fe59abc --- /dev/null +++ b/PLC/Methods/DeletePCU.py @@ -0,0 +1,43 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUs import PCU, PCUs +from PLC.Auth import Auth + +class DeletePCU(Method): + """ + Deletes a PCU. + + Non-admins may only delete PCUs at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + PCU.fields['pcu_id'], + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, pcu_id): + # Get associated PCU details + pcus = PCUs(self.api, [pcu_id]) + if not pcus: + raise PLCInvalidArgument, "No such PCU" + pcu = pcus[0] + + if 'admin' not in self.caller['roles']: + if pcu['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to update that PCU" + + pcu.delete() + + # Logging variables + self.event_objects = {'PCU': [pcu['pcu_id']]} + self.message = 'PCU %d deleted' % pcu['pcu_id'] + + return 1 diff --git a/PLC/Methods/DeletePCUProtocolType.py b/PLC/Methods/DeletePCUProtocolType.py new file mode 100644 index 0000000..1ff162b --- /dev/null +++ b/PLC/Methods/DeletePCUProtocolType.py @@ -0,0 +1,33 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUProtocolTypes import PCUProtocolType, PCUProtocolTypes +from PLC.Auth import Auth + +class DeletePCUProtocolType(Method): + """ + Deletes a PCU protocol type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + PCUProtocolType.fields['pcu_protocol_type_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, protocol_type_id): + protocol_types = PCUProtocolTypes(self.api, [protocol_type_id]) + if not protocol_types: + raise PLCInvalidArgument, "No such pcu protocol type" + + protocol_type = protocol_types[0] + protocol_type.delete() + self.event_objects = {'PCUProtocolType': [protocol_type['pcu_protocol_type_id']]} + + return 1 diff --git a/PLC/Methods/DeletePCUType.py b/PLC/Methods/DeletePCUType.py new file mode 100644 index 0000000..6d545cb --- /dev/null +++ b/PLC/Methods/DeletePCUType.py @@ -0,0 +1,33 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUTypes import PCUType, PCUTypes +from PLC.Auth import Auth + +class DeletePCUType(Method): + """ + Deletes a PCU type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + PCUType.fields['pcu_type_id'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, pcu_type_id): + pcu_types = PCUTypes(self.api, [pcu_type_id]) + if not pcu_types: + raise PLCInvalidArgument, "No such pcu type" + + pcu_type = pcu_types[0] + pcu_type.delete() + self.event_objects = {'PCUType': [pcu_type['pcu_type_id']]} + + return 1 diff --git a/PLC/Methods/DeletePeer.py b/PLC/Methods/DeletePeer.py new file mode 100644 index 0000000..ed0cd79 --- /dev/null +++ b/PLC/Methods/DeletePeer.py @@ -0,0 +1,38 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Peers import Peer, Peers + +class DeletePeer(Method): + """ + Mark an existing peer as deleted. All entities (e.g., slices, + keys, nodes, etc.) for which this peer is authoritative will also + be deleted or marked as deleted. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Peer.fields['peer_id'], + Peer.fields['peername']) + ] + + returns = Parameter(int, "1 if successful") + + def call(self, auth, peer_id_or_name): + # Get account information + peers = Peers(self.api, [peer_id_or_name]) + if not peers: + raise PLCInvalidArgument, "No such peer" + + peer = peers[0] + peer.delete() + + # Log affected objects + self.event_objects = {'Peer': [peer['peer_id']]} + + return 1 diff --git a/PLC/Methods/DeletePerson.py b/PLC/Methods/DeletePerson.py new file mode 100644 index 0000000..e85d2e9 --- /dev/null +++ b/PLC/Methods/DeletePerson.py @@ -0,0 +1,51 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +class DeletePerson(Method): + """ + Mark an existing account as deleted. + + Users and techs can only delete themselves. PIs can only delete + themselves and other non-PIs at their sites. ins can delete + anyone. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Authenticated function + assert self.caller is not None + + # Check if we can update this account + if not self.caller.can_update(person): + raise PLCPermissionDenied, "Not allowed to delete specified account" + + person.delete() + + # Logging variables + self.event_objects = {'Person': [person['person_id']]} + self.message = 'Person %d deleted' % person['person_id'] + + return 1 diff --git a/PLC/Methods/DeletePersonFromSite.py b/PLC/Methods/DeletePersonFromSite.py new file mode 100644 index 0000000..cf4379d --- /dev/null +++ b/PLC/Methods/DeletePersonFromSite.py @@ -0,0 +1,57 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +class DeletePersonFromSite(Method): + """ + Removes the specified person from the specified site. If the + person is not a member of the specified site, no error is + returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Mixed(Site.fields['site_id'], + Site.fields['login_base']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email, site_id_or_login_base): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Get site information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if site['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local site" + + if site['site_id'] in person['site_ids']: + site.remove_person(person) + + # Logging variables + self.event_objects = {'Site': [site['site_id']], + 'Person': [person['person_id']]} + self.message = 'Person %d deleted from site %d ' % \ + (person['person_id'], site['site_id']) + + return 1 diff --git a/PLC/Methods/DeletePersonFromSlice.py b/PLC/Methods/DeletePersonFromSlice.py new file mode 100644 index 0000000..dbec684 --- /dev/null +++ b/PLC/Methods/DeletePersonFromSlice.py @@ -0,0 +1,59 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class DeletePersonFromSlice(Method): + """ + Deletes the specified person from the specified slice. If the person is + not a member of the slice, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email, slice_id_or_name): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account %s"%person_id_or_email + person = persons[0] + + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %s"%slice_id_or_name + slice = slices[0] + + # N.B. Allow foreign users to be added to local slices and + # local users to be added to foreign slices (and, of course, + # local users to be added to local slices). + if person['peer_id'] is not None and slice['peer_id'] is not None: + raise PLCInvalidArgument, "Cannot delete foreign users from foreign slices" + + # If we are not admin, make sure the caller is a pi + # of the site associated with the slice + if 'admin' not in self.caller['roles']: + if slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to delete users from slice %s"%slice_id_or_name + + if slice['slice_id'] in person['slice_ids']: + slice.remove_person(person) + + self.event_objects = {'Slice': [slice['slice_id']], + 'Person': [person['person_id']]} + + return 1 diff --git a/PLC/Methods/DeletePersonTag.py b/PLC/Methods/DeletePersonTag.py new file mode 100644 index 0000000..6b748d3 --- /dev/null +++ b/PLC/Methods/DeletePersonTag.py @@ -0,0 +1,54 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Persons import Person, Persons +from PLC.TagTypes import TagType, TagTypes +from PLC.PersonTags import PersonTag, PersonTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class DeletePersonTag(Method): + """ + Deletes the specified person setting + + Admins have full access. Non-admins can change their own tags. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + PersonTag.fields['person_tag_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_tag_id): + person_tags = PersonTags(self.api, [person_tag_id]) + if not person_tags: + raise PLCInvalidArgument, "No such person tag %r"%person_tag_id + person_tag = person_tags[0] + + tag_type_id = person_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + persons = Persons (self.api, person_tag['person_id']) + if not persons: + raise PLCInvalidArgument, "No such person %d"%person_tag['person_id'] + person=persons[0] + + # check authorizations + person.caller_may_write_tag(self.api,self.caller,tag_type) + + person_tag.delete() + self.object_ids = [person_tag['person_tag_id']] + + return 1 diff --git a/PLC/Methods/DeleteRole.py b/PLC/Methods/DeleteRole.py new file mode 100644 index 0000000..aac0913 --- /dev/null +++ b/PLC/Methods/DeleteRole.py @@ -0,0 +1,38 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Roles import Role, Roles +from PLC.Auth import Auth + +class DeleteRole(Method): + """ + Deletes a role. + + WARNING: This will remove the specified role from all accounts + that possess it, and from all node and slice attributes that refer + to it. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Role.fields['role_id'], + Role.fields['name']) + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, role_id_or_name): + roles = Roles(self.api, [role_id_or_name]) + if not roles: + raise PLCInvalidArgument, "No such role" + role = roles[0] + + role.delete() + self.event_objects = {'Role': [role['role_id']]} + + return 1 diff --git a/PLC/Methods/DeleteRoleFromPerson.py b/PLC/Methods/DeleteRoleFromPerson.py new file mode 100644 index 0000000..4deacf6 --- /dev/null +++ b/PLC/Methods/DeleteRoleFromPerson.py @@ -0,0 +1,67 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Auth import Auth +from PLC.Roles import Role, Roles + +class DeleteRoleFromPerson(Method): + """ + Deletes the specified role from the person. + + PIs can only revoke the tech and user roles from users and techs + at their sites. ins can revoke any role from any user. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Mixed(Role.fields['role_id'], + Role.fields['name']), + Mixed(Person.fields['person_id'], + Person.fields['email']), + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, role_id_or_name, person_id_or_email): + # Get role + roles = Roles(self.api, [role_id_or_name]) + if not roles: + raise PLCInvalidArgument, "Invalid role '%s'" % unicode(role_id_or_name) + role = roles[0] + + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Authenticated function + assert self.caller is not None + + # Check if we can update this account + if not self.caller.can_update(person): + raise PLCPermissionDenied, "Not allowed to update specified account" + + # Can only revoke lesser (higher) roles from others + if 'admin' not in self.caller['roles'] and \ + role['role_id'] <= min(self.caller['role_ids']): + raise PLCPermissionDenied, "Not allowed to revoke that role" + + if role['role_id'] in person['role_ids']: + person.remove_role(role) + + # Logging variables + self.event_objects = {'Person': [person['person_id']], + 'Role': [role['role_id']]} + self.message = "Role %d revoked from person %d" % \ + (role['role_id'], person['person_id']) + + return 1 diff --git a/PLC/Methods/DeleteRoleFromTagType.py b/PLC/Methods/DeleteRoleFromTagType.py new file mode 100644 index 0000000..cdce6fa --- /dev/null +++ b/PLC/Methods/DeleteRoleFromTagType.py @@ -0,0 +1,59 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.TagTypes import TagType, TagTypes +from PLC.Auth import Auth +from PLC.Roles import Role, Roles + +class DeleteRoleFromTagType(Method): + """ + Delete the specified role from the tagtype so that + users with that role can no longer tweak the tag. + + Only admins can call this method + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Role.fields['role_id'], + Role.fields['name']), + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, role_id_or_name, tag_type_id_or_tagname): + # Get role + roles = Roles(self.api, [role_id_or_name]) + if not roles: + raise PLCInvalidArgument, "Invalid role '%s'" % unicode(role_id_or_name) + role = roles[0] + + # Get subject tag type + tag_types = TagTypes(self.api, [tag_type_id_or_tagname]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type" + tag_type = tag_types[0] + + # Authenticated function + assert self.caller is not None + + # Only admins + if 'admin' not in self.caller['roles']: + raise PLCInvalidArgument, "Not allowed to revoke that role" + + if role['role_id'] in tag_type['role_ids']: + tag_type.remove_role(role) + + # Logging variables + self.event_objects = {'TagType': [tag_type['tag_type_id']], + 'Role': [role['role_id']]} + self.message = "Role %d revoked from tag_type %d" % \ + (role['role_id'], tag_type['tag_type_id']) + + return 1 diff --git a/PLC/Methods/DeleteSession.py b/PLC/Methods/DeleteSession.py new file mode 100644 index 0000000..3898f51 --- /dev/null +++ b/PLC/Methods/DeleteSession.py @@ -0,0 +1,30 @@ +import time + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import SessionAuth +from PLC.Sessions import Session, Sessions + +class DeleteSession(Method): + """ + Invalidates the current session. + + Returns 1 if successful. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + accepts = [SessionAuth()] + returns = Parameter(int, '1 if successful') + + + def call(self, auth): + assert auth.has_key('session') + + sessions = Sessions(self.api, [auth['session']]) + if not sessions: + raise PLCAPIError, "No such session" + session = sessions[0] + + session.delete() + + return 1 diff --git a/PLC/Methods/DeleteSite.py b/PLC/Methods/DeleteSite.py new file mode 100644 index 0000000..db2b294 --- /dev/null +++ b/PLC/Methods/DeleteSite.py @@ -0,0 +1,47 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.Nodes import Node, Nodes +from PLC.PCUs import PCU, PCUs +from PLC.Auth import Auth + +class DeleteSite(Method): + """ + Mark an existing site as deleted. The accounts of people who are + not members of at least one other non-deleted site will also be + marked as deleted. Nodes, PCUs, and slices associated with the + site will be deleted. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Site.fields['site_id'], + Site.fields['login_base']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, site_id_or_login_base): + # Get account information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if site['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local site" + + site.delete() + + # Logging variables + self.event_objects = {'Site': [site['site_id']]} + self.message = 'Site %d deleted' % site['site_id'] + + + return 1 diff --git a/PLC/Methods/DeleteSiteTag.py b/PLC/Methods/DeleteSiteTag.py new file mode 100644 index 0000000..036ffda --- /dev/null +++ b/PLC/Methods/DeleteSiteTag.py @@ -0,0 +1,56 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Site, Sites +from PLC.TagTypes import TagType, TagTypes +from PLC.SiteTags import SiteTag, SiteTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class DeleteSiteTag(Method): + """ + Deletes the specified site setting + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + SiteTag.fields['site_tag_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, site_tag_id): + site_tags = SiteTags(self.api, [site_tag_id]) + if not site_tags: + raise PLCInvalidArgument, "No such site tag %r"%site_tag_id + site_tag = site_tags[0] + + tag_type_id = site_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + sites = Sites (self.api, site_tag['site_id']) + if not sites: + raise PLCInvalidArgument, "No such site %d"%site_tag['site_id'] + site=sites[0] + + # check authorizations + site.caller_may_write_tag(self.api,self.caller,tag_type) + + site_tag.delete() + self.object_ids = [site_tag['site_tag_id']] + + return 1 diff --git a/PLC/Methods/DeleteSlice.py b/PLC/Methods/DeleteSlice.py new file mode 100644 index 0000000..297f8a9 --- /dev/null +++ b/PLC/Methods/DeleteSlice.py @@ -0,0 +1,48 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class DeleteSlice(Method): + """ + Deletes the specified slice. + + Users may only delete slices of which they are members. PIs may + delete any of the slices at their sites, or any slices of which + they are members. Admins may delete any slice. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name): + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + if 'admin' not in self.caller['roles']: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied, "Not a member of the specified slice" + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Specified slice not associated with any of your sites" + + slice.delete() + self.event_objects = {'Slice': [slice['slice_id']]} + + return 1 diff --git a/PLC/Methods/DeleteSliceFromNodes.py b/PLC/Methods/DeleteSliceFromNodes.py new file mode 100644 index 0000000..1a82ad1 --- /dev/null +++ b/PLC/Methods/DeleteSliceFromNodes.py @@ -0,0 +1,61 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class DeleteSliceFromNodes(Method): + """ + Deletes the specified slice from the specified nodes. If the slice is + not associated with a node, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + [Mixed(Node.fields['node_id'], + Node.fields['hostname'])] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name, node_id_or_hostname_list): + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + if 'admin' not in self.caller['roles']: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied, "Not a member of the specified slice" + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Specified slice not associated with any of your sites" + + # Remove slice from all nodes found + + # Get specified nodes + nodes = Nodes(self.api, node_id_or_hostname_list) + for node in nodes: + if slice['peer_id'] is not None and node['peer_id'] is not None: + raise PLCPermissionDenied, "Not allowed to remove peer slice from peer node" + if slice['slice_id'] in node['slice_ids']: + slice.remove_node(node, commit = False) + + slice.sync() + + self.event_objects = {'Node': [node['node_id'] for node in nodes], + 'Slice': [slice['slice_id']]} + + return 1 diff --git a/PLC/Methods/DeleteSliceFromNodesWhitelist.py b/PLC/Methods/DeleteSliceFromNodesWhitelist.py new file mode 100644 index 0000000..c369966 --- /dev/null +++ b/PLC/Methods/DeleteSliceFromNodesWhitelist.py @@ -0,0 +1,54 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class DeleteSliceFromNodesWhitelist(Method): + """ + Deletes the specified slice from the whitelist on the specified nodes. Nodes may be + either local or foreign nodes. + + If the slice is already associated with a node, no errors are + returned. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + [Mixed(Node.fields['node_id'], + Node.fields['hostname'])] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name, node_id_or_hostname_list): + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + # Get specified nodes, add them to the slice + nodes = Nodes(self.api, node_id_or_hostname_list) + for node in nodes: + if node['peer_id'] is not None: + raise PLCInvalidArgument, "%s not a local node" % node['hostname'] + if slice['slice_id'] in node['slice_ids_whitelist']: + slice.delete_from_node_whitelist(node, commit = False) + + slice.sync() + + self.event_objects = {'Node': [node['node_id'] for node in nodes], + 'Slice': [slice['slice_id']]} + + return 1 diff --git a/PLC/Methods/DeleteSliceInstantiation.py b/PLC/Methods/DeleteSliceInstantiation.py new file mode 100644 index 0000000..5098a9d --- /dev/null +++ b/PLC/Methods/DeleteSliceInstantiation.py @@ -0,0 +1,34 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.SliceInstantiations import SliceInstantiation, SliceInstantiations +from PLC.Auth import Auth + +class DeleteSliceInstantiation(Method): + """ + Deletes a slice instantiation state. + + WARNING: This will cause the deletion of all slices of this instantiation. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + SliceInstantiation.fields['instantiation'] + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, instantiation): + slice_instantiations = SliceInstantiations(self.api, [instantiation]) + if not slice_instantiations: + raise PLCInvalidArgument, "No such slice instantiation state" + slice_instantiation = slice_instantiations[0] + + slice_instantiation.delete() + + return 1 diff --git a/PLC/Methods/DeleteSliceTag.py b/PLC/Methods/DeleteSliceTag.py new file mode 100644 index 0000000..ec11b41 --- /dev/null +++ b/PLC/Methods/DeleteSliceTag.py @@ -0,0 +1,63 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.TagTypes import TagTypes, TagType +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.SliceTags import SliceTag, SliceTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class DeleteSliceTag(Method): + """ + Deletes the specified slice or sliver attribute. + + Attributes may require the caller to have a particular role in + order to be deleted. Users may only delete attributes of + slices or slivers of which they are members. PIs may only delete + attributes of slices or slivers at their sites, or of which they + are members. Admins may delete attributes of any slice or sliver. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + SliceTag.fields['slice_tag_id'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_tag_id): + slice_tags = SliceTags(self.api, [slice_tag_id]) + if not slice_tags: + raise PLCInvalidArgument, "No such slice attribute" + slice_tag = slice_tags[0] + + tag_type_id = slice_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + slices = Slices(self.api, [slice_tag['slice_id']]) + if not slices: + raise PLCInvalidArgument, "No such slice %d"%slice_tag['slice_id'] + slice = slices[0] + + assert slice_tag['slice_tag_id'] in slice['slice_tag_ids'] + + # check authorizations + node_id_or_hostname=slice_tag['node_id'] + nodegroup_id_or_name=slice_tag['nodegroup_id'] + slice.caller_may_write_tag(self.api,self.caller,tag_type,node_id_or_hostname,nodegroup_id_or_name) + + slice_tag.delete() + self.event_objects = {'SliceTag': [slice_tag['slice_tag_id']]} + + return 1 diff --git a/PLC/Methods/DeleteTagType.py b/PLC/Methods/DeleteTagType.py new file mode 100644 index 0000000..d5d57c9 --- /dev/null +++ b/PLC/Methods/DeleteTagType.py @@ -0,0 +1,37 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.TagTypes import TagType, TagTypes +from PLC.Auth import Auth + +class DeleteTagType(Method): + """ + Deletes the specified node tag type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + ] + + returns = Parameter(int, '1 if successful') + + + def call(self, auth, tag_type_id_or_name): + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such node tag type" + tag_type = tag_types[0] + + tag_type.delete() + self.object_ids = [tag_type['tag_type_id']] + + return 1 diff --git a/PLC/Methods/GenerateNodeConfFile.py b/PLC/Methods/GenerateNodeConfFile.py new file mode 100644 index 0000000..68876d2 --- /dev/null +++ b/PLC/Methods/GenerateNodeConfFile.py @@ -0,0 +1,107 @@ +import random +import base64 + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.Auth import Auth + +class GenerateNodeConfFile(Method): + """ + Creates a new node configuration file if all network settings are + present. This function will generate a new node key for the + specified node, effectively invalidating any old configuration + files. + + Non-admins can only generate files for nodes at their sites. + + Returns the contents of the file if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + Parameter(bool, "True if you want to regenerate node key") + ] + + returns = Parameter(str, "Node configuration file") + + def call(self, auth, node_id_or_hostname, regenerate_node_key = True): + # Get node information + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node" + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to generate a configuration file for that node" + + # Get interfaces for this node + primary = None + interfaces = Interfaces(self.api, node['interface_ids']) + for interface in interfaces: + if interface['is_primary']: + primary = interface + break + if primary is None: + raise PLCInvalidArgument, "No primary network configured" + + # Split hostname into host and domain parts + parts = node['hostname'].split(".", 1) + if len(parts) < 2: + raise PLCInvalidArgument, "Node hostname is invalid" + host = parts[0] + domain = parts[1] + + if regenerate_node_key: + # Generate 32 random bytes + bytes = random.sample(xrange(0, 256), 32) + # Base64 encode their string representation + node['key'] = base64.b64encode("".join(map(chr, bytes))) + # XXX Boot Manager cannot handle = in the key + node['key'] = node['key'].replace("=", "") + # Save it + node.sync() + + # Generate node configuration file suitable for BootCD + file = "" + + file += 'NODE_ID="%d"\n' % node['node_id'] + file += 'NODE_KEY="%s"\n' % node['key'] + + if primary['mac']: + file += 'NET_DEVICE="%s"\n' % primary['mac'].lower() + + file += 'IP_METHOD="%s"\n' % primary['method'] + + if primary['method'] == 'static': + file += 'IP_ADDRESS="%s"\n' % primary['ip'] + file += 'IP_GATEWAY="%s"\n' % primary['gateway'] + file += 'IP_NETMASK="%s"\n' % primary['netmask'] + file += 'IP_NETADDR="%s"\n' % primary['network'] + file += 'IP_BROADCASTADDR="%s"\n' % primary['broadcast'] + file += 'IP_DNS1="%s"\n' % primary['dns1'] + file += 'IP_DNS2="%s"\n' % (primary['dns2'] or "") + + file += 'HOST_NAME="%s"\n' % host + file += 'DOMAIN_NAME="%s"\n' % domain + + for interface in interfaces: + if interface['method'] == 'ipmi': + file += 'IPMI_ADDRESS="%s"\n' % interface['ip'] + if interface['mac']: + file += 'IPMI_MAC="%s"\n' % interface['mac'].lower() + break + + return file diff --git a/PLC/Methods/GetAddressTypes.py b/PLC/Methods/GetAddressTypes.py new file mode 100644 index 0000000..d10be73 --- /dev/null +++ b/PLC/Methods/GetAddressTypes.py @@ -0,0 +1,32 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Auth import Auth + +class GetAddressTypes(Method): + """ + Returns an array of structs containing details about address + types. If address_type_filter is specified and is an array of + address type identifiers, or a struct of address type attributes, + only address types matching the filter will be returned. If + return_fields is specified, only the specified details will be + returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(AddressType.fields['address_type_id'], + AddressType.fields['name'])], + Filter(AddressType.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [AddressType.fields] + + + def call(self, auth, address_type_filter = None, return_fields = None): + return AddressTypes(self.api, address_type_filter, return_fields) diff --git a/PLC/Methods/GetAddresses.py b/PLC/Methods/GetAddresses.py new file mode 100644 index 0000000..ae250f1 --- /dev/null +++ b/PLC/Methods/GetAddresses.py @@ -0,0 +1,30 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth + +class GetAddresses(Method): + """ + Returns an array of structs containing details about addresses. If + address_filter is specified and is an array of address + identifiers, or a struct of address attributes, only addresses + matching the filter will be returned. If return_fields is + specified, only the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Address.fields['address_id']], + Filter(Address.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Address.fields] + + + def call(self, auth, address_filter = None, return_fields = None): + return Addresses(self.api, address_filter, return_fields) diff --git a/PLC/Methods/GetBootMedium.py b/PLC/Methods/GetBootMedium.py new file mode 100644 index 0000000..26b8372 --- /dev/null +++ b/PLC/Methods/GetBootMedium.py @@ -0,0 +1,613 @@ +import random +import base64 +import os +import os.path +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.InterfaceTags import InterfaceTag, InterfaceTags +from PLC.NodeTags import NodeTag, NodeTags + +from PLC.Logger import logger + +from PLC.Accessors.Accessors_standard import * # import node accessors + +# could not define this in the class.. +# create a dict with the allowed actions for each type of node +# reservable nodes being more recent, we do not support the floppy stuff anymore +allowed_actions = { + 'regular' : + [ 'node-preview', + 'node-floppy', + 'node-iso', + 'node-usb', + 'generic-iso', + 'generic-usb', + ], + 'reservable': + [ 'node-preview', + 'node-iso', + 'node-usb', + ], + } + +# compute a new key +def compute_key(): + # Generate 32 random bytes + bytes = random.sample(xrange(0, 256), 32) + # Base64 encode their string representation + key = base64.b64encode("".join(map(chr, bytes))) + # Boot Manager cannot handle = in the key + # XXX this sounds wrong, as it might prevent proper decoding + key = key.replace("=", "") + return key + +class GetBootMedium(Method): + """ + This method is a redesign based on former, supposedly dedicated, + AdmGenerateNodeConfFile + + As compared with its ancestor, this method provides a much more + detailed interface, that allows to + (*) either just preview the node config file -- in which case + the node key is NOT recomputed, and NOT provided in the output + (*) or regenerate the node config file for storage on a floppy + that is, exactly what the ancestor method used todo, + including renewing the node's key + (*) or regenerate the config file and bundle it inside an ISO or USB image + (*) or just provide the generic ISO or USB boot images + in which case of course the node_id_or_hostname parameter is not used + + action is expected among the following string constants according the + node type value: + + for a 'regular' node: + (*) node-preview + (*) node-floppy + (*) node-iso + (*) node-usb + (*) generic-iso + (*) generic-usb + + Apart for the preview mode, this method generates a new node key for the + specified node, effectively invalidating any old boot medium. + Note that 'reservable' nodes do not support 'node-floppy', + 'generic-iso' nor 'generic-usb'. + + In addition, two return mechanisms are supported. + (*) The default behaviour is that the file's content is returned as a + base64-encoded string. This is how the ancestor method used to work. + To use this method, pass an empty string as the file parameter. + + (*) Or, for efficiency -- this makes sense only when the API is used + by the web pages that run on the same host -- the caller may provide + a filename, in which case the resulting file is stored in that location instead. + The filename argument can use the following markers, that are expanded + within the method + - %d : default root dir (some builtin dedicated area under /var/tmp/) + Using this is recommended, and enforced for non-admin users + - %n : the node's name when this makes sense, or a mktemp-like name when + generic media is requested + - %s : a file suffix appropriate in the context (.txt, .iso or the like) + - %v : the bootcd version string (e.g. 4.0) + - %p : the PLC name + - %f : the nodefamily + - %a : arch + With the file-based return mechanism, the method returns the full pathname + of the result file; + ** WARNING ** + It is the caller's responsability to remove this file after use. + + Options: an optional array of keywords. + options are not supported for generic images + Currently supported are + - 'partition' - for USB actions only + - 'cramfs' + - 'serial' or 'serial:' + console_spec (or 'default') is passed as-is to bootcd/build.sh + it is expected to be a colon separated string denoting + tty - baudrate - parity - bits + e.g. ttyS0:115200:n:8 + - 'variant:' + passed to build.sh as -V + variants are used to run a different kernel on the bootCD + see kvariant.sh for how to create a variant + - 'no-hangcheck' - disable hangcheck + - 'systemd-debug' - turn on systemd debug in bootcd + + Tags: the following tags are taken into account when attached to the node: + 'serial', 'cramfs', 'kvariant', 'kargs', 'no-hangcheck', 'systemd-debug' + + Security: + - Non-admins can only generate files for nodes at their sites. + - Non-admins, when they provide a filename, *must* specify it in the %d area + + Housekeeping: + Whenever needed, the method stores intermediate files in a + private area, typically not located under the web server's + accessible area, and are cleaned up by the method. + + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + Parameter (str, "Action mode, expected value depends of the type of node"), + Parameter (str, "Empty string for verbatim result, resulting file full path otherwise"), + Parameter ([str], "Options"), + ] + + returns = Parameter(str, "Node boot medium, either inlined, or filename, depending on the filename parameter") + + # define globals for regular nodes, override later for other types + BOOTCDDIR = "/usr/share/bootcd-@NODEFAMILY@/" + BOOTCDBUILD = "/usr/share/bootcd-@NODEFAMILY@/build.sh" + GENERICDIR = "/var/www/html/download-@NODEFAMILY@/" + WORKDIR = "/var/tmp/bootmedium" + LOGDIR = "/var/tmp/bootmedium/logs/" + DEBUG = False + # uncomment this to preserve temporary area and bootcustom logs + #DEBUG = True + + ### returns (host, domain) : + # 'host' : host part of the hostname + # 'domain' : domain part of the hostname + def split_hostname (self, node): + # Split hostname into host and domain parts + parts = node['hostname'].split(".", 1) + if len(parts) < 2: + raise PLCInvalidArgument("Node hostname {} is invalid".format(node['hostname'])) + return parts + + # Generate the node (plnode.txt) configuration content. + # + # This function will create the configuration file a node + # composed by: + # - a common part, regardless of the 'node_type' tag + # - XXX a special part, depending on the 'node_type' tag value. + def floppy_contents (self, node, renew_key): + + # Do basic checks + if node['peer_id'] is not None: + raise PLCInvalidArgument("Not a local node {}".format(node['hostname'])) + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied( + "Not allowed to generate a configuration file for {}"\ + .format(node['hostname'])) + + # Get interface for this node + primary = None + interfaces = Interfaces(self.api, node['interface_ids']) + for interface in interfaces: + if interface['is_primary']: + primary = interface + break + if primary is None: + raise PLCInvalidArgument( + "No primary network configured on {}".format(node['hostname'])) + + host, domain = self.split_hostname (node) + + # renew the key and save it on the database + if renew_key: + node['key'] = compute_key() + node.update_last_download(commit=False) + node.sync() + + # Generate node configuration file suitable for BootCD + file = "" + + if renew_key: + file += 'NODE_ID="{}"\n'.format(node['node_id']) + file += 'NODE_KEY="{}"\n'.format(node['key']) + # not used anywhere, just a note for operations people + file += 'KEY_RENEWAL_DATE="{}"\n'\ + .format(time.strftime('%Y/%m/%d at %H:%M +0000',time.gmtime())) + + if primary['mac']: + file += 'NET_DEVICE="{}"\n'.format(primary['mac'].lower()) + + file += 'IP_METHOD="{}"\n'.format(primary['method']) + + if primary['method'] == 'static': + file += 'IP_ADDRESS="{}"\n'.format(primary['ip']) + file += 'IP_GATEWAY="{}"\n'.format(primary['gateway']) + file += 'IP_NETMASK="{}"\n'.format(primary['netmask']) + file += 'IP_NETADDR="{}"\n'.format(primary['network']) + file += 'IP_BROADCASTADDR="{}"\n'.format(primary['broadcast']) + file += 'IP_DNS1="{}"\n'.format(primary['dns1']) + file += 'IP_DNS2="{}"\n'.format(primary['dns2'] or "") + + file += 'HOST_NAME="{}"\n'.format(host) + file += 'DOMAIN_NAME="{}"\n'.format(domain) + + # define various interface settings attached to the primary interface + settings = InterfaceTags (self.api, {'interface_id':interface['interface_id']}) + + categories = set() + for setting in settings: + if setting['category'] is not None: + categories.add(setting['category']) + + for category in categories: + category_settings = InterfaceTags(self.api,{'interface_id' : interface['interface_id'], + 'category' : category}) + if category_settings: + file += '### Category : {}\n'.format(category) + for setting in category_settings: + file += '{}_{}="{}"\n'\ + .format(category.upper(), setting['tagname'].upper(), setting['value']) + + for interface in interfaces: + if interface['method'] == 'ipmi': + file += 'IPMI_ADDRESS="{}"\n'.format(interface['ip']) + if interface['mac']: + file += 'IPMI_MAC="{}"\n'.format(interface['mac'].lower()) + break + + return file + + # see also GetNodeFlavour that does similar things + def get_nodefamily (self, node, auth): + pldistro = self.api.config.PLC_FLAVOUR_NODE_PLDISTRO + fcdistro = self.api.config.PLC_FLAVOUR_NODE_FCDISTRO + arch = self.api.config.PLC_FLAVOUR_NODE_ARCH + if not node: + return (pldistro,fcdistro,arch) + + node_id = node['node_id'] + + # no support for deployment-based BootCD's, use kvariants instead + node_pldistro = GetNodePldistro (self.api,self.caller).call(auth, node_id) + if node_pldistro: + pldistro = node_pldistro + + node_fcdistro = GetNodeFcdistro (self.api,self.caller).call(auth, node_id) + if node_fcdistro: + fcdistro = node_fcdistro + + node_arch = GetNodeArch (self.api,self.caller).call(auth,node_id) + if node_arch: + arch = node_arch + + return (pldistro,fcdistro,arch) + + def bootcd_version (self): + try: + return file(self.BOOTCDDIR + "/build/version.txt").readline().strip() + except: + raise Exception("Unknown boot cd version - probably wrong bootcd dir : {}"\ + .format(self.BOOTCDDIR)) + + def cleantrash (self): + for file in self.trash: + if self.DEBUG: + logger.debug('DEBUG -- preserving trash file {}'.format(file)) + else: + os.unlink(file) + + ### handle filename + # build the filename string + # check for permissions and concurrency + # returns the filename + def handle_filename (self, filename, nodename, suffix, arch): + # allow to set filename to None or any other empty value + if not filename: filename='' + filename = filename.replace ("%d",self.WORKDIR) + filename = filename.replace ("%n",nodename) + filename = filename.replace ("%s",suffix) + filename = filename.replace ("%p",self.api.config.PLC_NAME) + # let's be cautious + try: filename = filename.replace ("%f", self.nodefamily) + except: pass + try: filename = filename.replace ("%a", arch) + except: pass + try: filename = filename.replace ("%v",self.bootcd_version()) + except: pass + + ### Check filename location + if filename != '': + if 'admin' not in self.caller['roles']: + if ( filename.index(self.WORKDIR) != 0): + raise PLCInvalidArgument("File {} not under {}".format(filename, self.WORKDIR)) + + ### output should not exist (concurrent runs ..) + # numerous reports of issues with this policy + # looks like people sometime suspend/cancel their download + # and this leads to the old file sitting in there forever + # so, if the file is older than 5 minutes, we just trash + grace=5 + if os.path.exists(filename) and (time.time()-os.path.getmtime(filename)) >= (grace*60): + os.unlink(filename) + if os.path.exists(filename): + raise PLCInvalidArgument( + "Resulting file {} already exists - please try again in {} minutes"\ + .format(filename, grace)) + + ### we can now safely create the file, + ### either we are admin or under a controlled location + filedir=os.path.dirname(filename) + # dirname does not return "." for a local filename like its shell counterpart + if filedir: + if not os.path.exists(filedir): + try: + os.makedirs (filedir,0777) + except: + raise PLCPermissionDenied("Could not create dir {}".format(filedir)) + + return filename + + def build_command(self, nodename, node_type, build_sh_spec, node_image, type, floppy_file): + """ + returns a tuple + (*) build command to be run + (*) location of the log_file + """ + + command = "" + + # regular node, make build's arguments + # and build the full command line to be called + if node_type not in [ 'regular', 'reservable' ]: + logger.error("GetBootMedium.build_command: unexpected node_type {}".format(node_type)) + return command, None + + build_sh_options="" + if "cramfs" in build_sh_spec: + type += "_cramfs" + if "serial" in build_sh_spec: + build_sh_options += " -s {}".format(build_sh_spec['serial']) + if "variant" in build_sh_spec: + build_sh_options += " -V {}".format(build_sh_spec['variant']) + + for karg in build_sh_spec['kargs']: + build_sh_options += ' -k "{}"'.format(karg) + + import time + date = time.strftime('%Y-%m-%d-%H-%M', time.gmtime()) + if not os.path.isdir(self.LOGDIR): + os.makedirs(self.LOGDIR) + log_file = "{}/{}-{}.log".format(self.LOGDIR, date, nodename) + + command = '{} -f "{}" -o "{}" -t "{}" {} > {} 2>&1'\ + .format(self.BOOTCDBUILD, + floppy_file, + node_image, + type, + build_sh_options, + log_file) + + logger.info("The build command line is {}".format(command)) + + return command, log_file + + def call(self, auth, node_id_or_hostname, action, filename, options = []): + + self.trash=[] + + ### compute file suffix and type + if action.find("-iso") >= 0 : + suffix = ".iso" + type = "iso" + elif action.find("-usb") >= 0: + suffix = ".usb" + type = "usb" + else: + suffix = ".txt" + type = "txt" + + # check for node existence and get node_type + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument("No such node {}".format(node_id_or_hostname)) + node = nodes[0] + + logger.info("GetBootMedium: {} requested on node {}. Node type is: {}"\ + .format(action, node['node_id'], node['node_type'])) + + # check the required action against the node type + node_type = node['node_type'] + if action not in allowed_actions[node_type]: + raise PLCInvalidArgument("Action {} not valid for {} nodes, valid actions are {}"\ + .format(action, node_type, "|".join(allowed_actions[node_type]))) + + # handle / canonicalize options + if type == "txt": + if options: + raise PLCInvalidArgument("Options are not supported for node configs") + else: + # create a dict for build.sh + build_sh_spec={'kargs':[]} + # use node tags as defaults + # check for node tag equivalents + tags = NodeTags(self.api, + {'node_id': node['node_id'], + 'tagname': ['serial', 'cramfs', 'kvariant', 'kargs', + 'no-hangcheck', 'systemd-debug' ]}, + ['tagname', 'value']) + if tags: + for tag in tags: + if tag['tagname'] == 'serial': + build_sh_spec['serial'] = tag['value'] + elif tag['tagname'] == 'cramfs': + build_sh_spec['cramfs'] = True + elif tag['tagname'] == 'kvariant': + build_sh_spec['variant'] = tag['value'] + elif tag['tagname'] == 'kargs': + build_sh_spec['kargs'] += tag['value'].split() + elif tag['tagname'] == 'no-hangcheck': + build_sh_spec['kargs'].append('hcheck_reboot0') + elif tag['tagname'] == 'systemd-debug': + build_sh_spec['kargs'].append('systemd.log_level=debug') + build_sh_spec['kargs'].append('systemd.log_target=console') + # then options can override tags + for option in options: + if option == "cramfs": + build_sh_spec['cramfs']=True + elif option == 'partition': + if type != "usb": + raise PLCInvalidArgument("option 'partition' is for USB images only") + else: + type="usb_partition" + elif option == "serial": + build_sh_spec['serial']='default' + elif option.find("serial:") == 0: + build_sh_spec['serial']=option.replace("serial:","") + elif option.find("variant:") == 0: + build_sh_spec['variant']=option.replace("variant:","") + elif option == "no-hangcheck": + build_sh_spec['kargs'].append('hcheck_reboot0') + elif option == "systemd-debug": + build_sh_spec['kargs'].append('systemd.log_level=debug') + else: + raise PLCInvalidArgument("unknown option {}".format(option)) + + # compute nodename according the action + if action.find("node-") == 0: + nodename = node['hostname'] + else: + node = None + # compute a 8 bytes random number + tempbytes = random.sample (xrange(0,256), 8); + def hexa2 (c): return chr((c>>4)+65) + chr ((c&16)+65) + nodename = "".join(map(hexa2,tempbytes)) + + # get nodefamily + (pldistro,fcdistro,arch) = self.get_nodefamily(node, auth) + self.nodefamily="{}-{}-{}".format(pldistro, fcdistro, arch) + + # apply on globals + for attr in [ "BOOTCDDIR", "BOOTCDBUILD", "GENERICDIR" ]: + setattr(self,attr,getattr(self,attr).replace("@NODEFAMILY@",self.nodefamily)) + + filename = self.handle_filename(filename, nodename, suffix, arch) + + # log call + if node: + self.message='GetBootMedium on node {} - action={}'.format(nodename, action) + self.event_objects={'Node': [ node ['node_id'] ]} + else: + self.message='GetBootMedium - generic - action={}'.format(action) + + ### generic media + if action == 'generic-iso' or action == 'generic-usb': + if options: + raise PLCInvalidArgument("Options are not supported for generic images") + # this raises an exception if bootcd is missing + version = self.bootcd_version() + generic_name = "{}-BootCD-{}{}".format(self.api.config.PLC_NAME, version, suffix) + generic_path = "{}/{}".format(self.GENERICDIR, generic_name) + + if filename: + ret=os.system ('cp "{}" "{}"'.format(generic_path, filename)) + if ret==0: + return filename + else: + raise PLCPermissionDenied("Could not copy {} into {}"\ + .format(generic_path, filename)) + else: + ### return the generic medium content as-is, just base64 encoded + return base64.b64encode(file(generic_path).read()) + + ### config file preview or regenerated + if action == 'node-preview' or action == 'node-floppy': + renew_key = (action == 'node-floppy') + floppy = self.floppy_contents (node,renew_key) + if filename: + try: + file(filename,'w').write(floppy) + except: + raise PLCPermissionDenied("Could not write into {}".format(filename)) + return filename + else: + return floppy + + ### we're left with node-iso and node-usb + # the steps involved in the image creation are: + # - create and test the working environment + # - generate the configuration file + # - build and invoke the build command + # - delivery the resulting image file + + if action == 'node-iso' or action == 'node-usb': + + ### check we've got required material + version = self.bootcd_version() + + if not os.path.isfile(self.BOOTCDBUILD): + raise PLCAPIError("Cannot locate bootcd/build.sh script {}".format(self.BOOTCDBUILD)) + + # create the workdir if needed + if not os.path.isdir(self.WORKDIR): + try: + os.makedirs(self.WORKDIR,0777) + os.chmod(self.WORKDIR,0777) + except: + raise PLCPermissionDenied("Could not create dir {}".format(self.WORKDIR)) + + try: + # generate floppy config + floppy_text = self.floppy_contents(node, True) + # store it + floppy_file = "{}/{}.txt".format(self.WORKDIR, nodename) + try: + file(floppy_file,"w").write(floppy_text) + except: + raise PLCPermissionDenied("Could not write into {}".format(floppy_file)) + + self.trash.append(floppy_file) + + node_image = "{}/{}{}".format(self.WORKDIR, nodename, suffix) + + command, log_file = self.build_command(nodename, node_type, build_sh_spec, + node_image, type, floppy_file) + + # invoke the image build script + if command != "": + ret = os.system(command) + + if ret != 0: + raise PLCAPIError("{} failed Command line was: {} See logs in {}"\ + .format(self.BOOTCDBUILD, command, log_file)) + + if not os.path.isfile (node_image): + raise PLCAPIError("Unexpected location of build.sh output - {}".format(node_image)) + + # handle result + if filename: + ret = os.system('mv "{}" "{}"'.format(node_image, filename)) + if ret != 0: + self.trash.append(node_image) + self.cleantrash() + raise PLCAPIError("Could not move node image {} into {}"\ + .format(node_image, filename)) + self.cleantrash() + return filename + else: + result = file(node_image).read() + self.trash.append(node_image) + self.cleantrash() + logger.info("GetBootMedium - done with build.sh") + encoded_result = base64.b64encode(result) + logger.info("GetBootMedium - done with base64 encoding - lengths: raw={} - b64={}" + .format(len(result), len(encoded_result))) + return encoded_result + except: + self.cleantrash() + raise + + # we're done here, or we missed something + raise PLCAPIError('Unhandled action {}'.format(action)) diff --git a/PLC/Methods/GetBootStates.py b/PLC/Methods/GetBootStates.py new file mode 100644 index 0000000..35537be --- /dev/null +++ b/PLC/Methods/GetBootStates.py @@ -0,0 +1,22 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.BootStates import BootState, BootStates +from PLC.Auth import Auth + +class GetBootStates(Method): + """ + Returns an array of all valid node boot states. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [BootState.fields['boot_state']] + + + def call(self, auth): + return [boot_state['boot_state'] for boot_state in BootStates(self.api)] diff --git a/PLC/Methods/GetConfFiles.py b/PLC/Methods/GetConfFiles.py new file mode 100644 index 0000000..89d5250 --- /dev/null +++ b/PLC/Methods/GetConfFiles.py @@ -0,0 +1,31 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Auth import Auth + +class GetConfFiles(Method): + """ + Returns an array of structs containing details about configuration + files. If conf_file_filter is specified and is an array of + configuration file identifiers, or a struct of configuration file + attributes, only configuration files matching the filter will be + returned. If return_fields is specified, only the specified + details will be returned. + """ + + roles = ['admin', 'node'] + + accepts = [ + Auth(), + Mixed([ConfFile.fields['conf_file_id']], + Filter(ConfFile.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [ConfFile.fields] + + + def call(self, auth, conf_file_filter = None, return_fields = None): + return ConfFiles(self.api, conf_file_filter, return_fields) diff --git a/PLC/Methods/GetEventObjects.py b/PLC/Methods/GetEventObjects.py new file mode 100644 index 0000000..c71c257 --- /dev/null +++ b/PLC/Methods/GetEventObjects.py @@ -0,0 +1,28 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.EventObjects import EventObject, EventObjects +from PLC.Auth import Auth + +class GetEventObjects(Method): + """ + Returns an array of structs containing details about events and + faults. If event_filter is specified and is an array of event + identifiers, or a struct of event attributes, only events matching + the filter will be returned. If return_fields is specified, only the + specified details will be returned. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Filter(EventObject.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [EventObject.fields] + + def call(self, auth, event_filter = None, return_fields = None): + return EventObjects(self.api, event_filter, return_fields) diff --git a/PLC/Methods/GetEvents.py b/PLC/Methods/GetEvents.py new file mode 100644 index 0000000..4fbb451 --- /dev/null +++ b/PLC/Methods/GetEvents.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Events import Event, Events +from PLC.Auth import Auth + +class GetEvents(Method): + """ + Returns an array of structs containing details about events and + faults. If event_filter is specified and is an array of event + identifiers, or a struct of event attributes, only events matching + the filter will be returned. If return_fields is specified, only the + specified details will be returned. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed([Event.fields['event_id']], + Filter(Event.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Event.fields] + + def call(self, auth, event_filter = None, return_fields = None): + return Events(self.api, event_filter, return_fields) diff --git a/PLC/Methods/GetIlinks.py b/PLC/Methods/GetIlinks.py new file mode 100644 index 0000000..9fc051d --- /dev/null +++ b/PLC/Methods/GetIlinks.py @@ -0,0 +1,42 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.Ilinks import Ilink, Ilinks +from PLC.Sites import Site, Sites +from PLC.Nodes import Node, Nodes + +class GetIlinks(Method): + """ + Returns an array of structs containing details about + nodes and related tags. + + If ilink_filter is specified and is an array of + ilink identifiers, only ilinks matching + the filter will be returned. If return_fields is specified, only + the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed([Ilink.fields['ilink_id']], + Parameter(int,"ilink id"), + Filter(Ilink.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Ilink.fields] + + + def call(self, auth, ilink_filter = None, return_fields = None): + + ilinks = Ilinks(self.api, ilink_filter, return_fields) + + return ilinks diff --git a/PLC/Methods/GetInitScripts.py b/PLC/Methods/GetInitScripts.py new file mode 100644 index 0000000..5a3bf1c --- /dev/null +++ b/PLC/Methods/GetInitScripts.py @@ -0,0 +1,31 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.InitScripts import InitScript, InitScripts +from PLC.Auth import Auth + +class GetInitScripts(Method): + """ + Returns an array of structs containing details about initscripts. + If initscript_filter is specified and is an array of initscript + identifiers, or a struct of initscript attributes, only initscripts + matching the filter will be returned. If return_fields is specified, + only the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(InitScript.fields['initscript_id'], + InitScript.fields['name'])], + Filter(InitScript.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [InitScript.fields] + + + def call(self, auth, initscript_filter = None, return_fields = None): + return InitScripts(self.api, initscript_filter, return_fields) diff --git a/PLC/Methods/GetInterfaceTags.py b/PLC/Methods/GetInterfaceTags.py new file mode 100644 index 0000000..8117cca --- /dev/null +++ b/PLC/Methods/GetInterfaceTags.py @@ -0,0 +1,42 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.InterfaceTags import InterfaceTag, InterfaceTags +from PLC.Sites import Site, Sites +from PLC.Interfaces import Interface, Interfaces + +class GetInterfaceTags(Method): + """ + Returns an array of structs containing details about + interfaces and related settings. + + If interface_tag_filter is specified and is an array of + interface setting identifiers, only interface settings matching + the filter will be returned. If return_fields is specified, only + the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([InterfaceTag.fields['interface_tag_id']], + Parameter(int,"Interface setting id"), + Filter(InterfaceTag.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [InterfaceTag.fields] + + + def call(self, auth, interface_tag_filter = None, return_fields = None): + + interface_tags = InterfaceTags(self.api, interface_tag_filter, return_fields) + + return interface_tags diff --git a/PLC/Methods/GetInterfaces.py b/PLC/Methods/GetInterfaces.py new file mode 100644 index 0000000..a154c8e --- /dev/null +++ b/PLC/Methods/GetInterfaces.py @@ -0,0 +1,34 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Interfaces import Interface, Interfaces +from PLC.Auth import Auth + +class GetInterfaces(Method): + """ + Returns an array of structs containing details about network + interfaces. If interfaces_filter is specified and is an array of + interface identifiers, or a struct of interface fields and + values, only interfaces matching the filter will be + returned. + + If return_fields is given, only the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed([Mixed(Interface.fields['interface_id'], + Interface.fields['ip'])], + Parameter (int, "interface id"), + Parameter (str, "ip address"), + Filter(Interface.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Interface.fields] + + def call(self, auth, interface_filter = None, return_fields = None): + return Interfaces(self.api, interface_filter, return_fields) diff --git a/PLC/Methods/GetKeyTypes.py b/PLC/Methods/GetKeyTypes.py new file mode 100644 index 0000000..32bb658 --- /dev/null +++ b/PLC/Methods/GetKeyTypes.py @@ -0,0 +1,22 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.KeyTypes import KeyType, KeyTypes +from PLC.Auth import Auth + +class GetKeyTypes(Method): + """ + Returns an array of all valid key types. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [KeyType.fields['key_type']] + + + def call(self, auth): + return [key_type['key_type'] for key_type in KeyTypes(self.api)] diff --git a/PLC/Methods/GetKeys.py b/PLC/Methods/GetKeys.py new file mode 100644 index 0000000..70b3a4d --- /dev/null +++ b/PLC/Methods/GetKeys.py @@ -0,0 +1,41 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Persons import Person, Persons +from PLC.Keys import Key, Keys +from PLC.Auth import Auth + +class GetKeys(Method): + """ + Returns an array of structs containing details about keys. If + key_filter is specified and is an array of key identifiers, or a + struct of key attributes, only keys matching the filter will be + returned. If return_fields is specified, only the specified + details will be returned. + + Admin may query all keys. Non-admins may only query their own + keys. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(Key.fields['key_id'])], + Filter(Key.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Key.fields] + + + def call(self, auth, key_filter = None, return_fields = None): + keys = Keys(self.api, key_filter, return_fields) + + # If we are not admin, make sure to only return our own keys + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + keys = filter(lambda key: key['key_id'] in self.caller['key_ids'], keys) + + return keys diff --git a/PLC/Methods/GetLeaseGranularity.py b/PLC/Methods/GetLeaseGranularity.py new file mode 100644 index 0000000..804dd4a --- /dev/null +++ b/PLC/Methods/GetLeaseGranularity.py @@ -0,0 +1,22 @@ +from PLC.Method import Method +from PLC.Auth import Auth +from PLC.Faults import * +from PLC.Parameter import Parameter + +class GetLeaseGranularity(Method): + """ + Returns the granularity in seconds for the reservation system + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + ] + + # for now only return /etc/myplc-release verbatim + returns = Parameter (int, "the granularity in seconds for the reservation system") + + def call(self, auth): + + return self.api.config.PLC_RESERVATION_GRANULARITY diff --git a/PLC/Methods/GetLeases.py b/PLC/Methods/GetLeases.py new file mode 100644 index 0000000..2385012 --- /dev/null +++ b/PLC/Methods/GetLeases.py @@ -0,0 +1,59 @@ +# Thierry Parmentelat -- INRIA + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Leases import Lease, Leases, LeaseFilter + +class GetLeases(Method): + """ + Returns an array of structs containing details about leases. If + lease_filter is specified and is an array of lease identifiers or + lease names, or a struct of lease attributes, only leases matching + the filter will be returned. If return_fields is specified, only the + specified details will be returned. + + All leases are exposed to all users. + + In addition to the usual filter capabilities, the following are supported: + * GetLeases ({ 'alive' : '2010-02-20 20:00' , }) + returns the leases that are active at that point in time + * GetLeases ({ 'alive' : ('2010-02-20 20:00' , '2010-02-20 21:00' ) , ... }) + ditto for a time range + + This is implemented in the LeaseFilter class; negation actually is supported + through the usual '~alive' form, although maybe not really useful. + + """ + + roles = ['admin', 'pi', 'user', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed(Lease.fields['lease_id'], + [Lease.fields['lease_id']], + LeaseFilter(Lease.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Lease.fields] + + def call(self, auth, lease_filter = None, return_fields = None): + + # Must query at least lease_id (see below) + if return_fields is not None and 'lease_id' not in return_fields: + return_fields.append('lease_id') + added_fields = True + else: + added_fields = False + + leases = Leases(self.api, lease_filter, return_fields) + + # Remove lease_id if not specified + if added_fields: + for lease in leases: + if 'lease_id' in lease: + del lease['lease_id'] + + return leases diff --git a/PLC/Methods/GetMessages.py b/PLC/Methods/GetMessages.py new file mode 100644 index 0000000..b0eb44e --- /dev/null +++ b/PLC/Methods/GetMessages.py @@ -0,0 +1,31 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Messages import Message, Messages +from PLC.Auth import Auth + +class GetMessages(Method): + """ + Returns an array of structs containing details about message + templates. If message template_filter is specified and is an array + of message template identifiers, or a struct of message template + attributes, only message templates matching the filter will be + returned. If return_fields is specified, only the specified + details will be returned. + """ + + roles = ['admin', 'node'] + + accepts = [ + Auth(), + Mixed([Message.fields['message_id']], + Filter(Message.fields)), + Parameter([str], "List of fields to return", nullok = True), + ] + + returns = [Message.fields] + + + def call(self, auth, message_filter = None, return_fields = None): + return Messages(self.api, message_filter, return_fields) diff --git a/PLC/Methods/GetNetworkMethods.py b/PLC/Methods/GetNetworkMethods.py new file mode 100644 index 0000000..cee914a --- /dev/null +++ b/PLC/Methods/GetNetworkMethods.py @@ -0,0 +1,22 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkMethods import NetworkMethod, NetworkMethods +from PLC.Auth import Auth + +class GetNetworkMethods(Method): + """ + Returns a list of all valid network methods. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [NetworkMethod.fields['method']] + + + def call(self, auth): + return [network_method['method'] for network_method in NetworkMethods(self.api)] diff --git a/PLC/Methods/GetNetworkTypes.py b/PLC/Methods/GetNetworkTypes.py new file mode 100644 index 0000000..dbddd9f --- /dev/null +++ b/PLC/Methods/GetNetworkTypes.py @@ -0,0 +1,22 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NetworkTypes import NetworkType, NetworkTypes +from PLC.Auth import Auth + +class GetNetworkTypes(Method): + """ + Returns a list of all valid network types. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [NetworkType.fields['type']] + + + def call(self, auth): + return [network_type['type'] for network_type in NetworkTypes(self.api)] diff --git a/PLC/Methods/GetNodeFlavour.py b/PLC/Methods/GetNodeFlavour.py new file mode 100644 index 0000000..f357b2f --- /dev/null +++ b/PLC/Methods/GetNodeFlavour.py @@ -0,0 +1,126 @@ +import traceback + +from PLC.Logger import logger +from PLC.Method import Method +from PLC.Auth import Auth +from PLC.Faults import * +from PLC.Parameter import * +from PLC.Nodes import Node, Nodes + +from PLC.Accessors.Accessors_standard import * # import node accessors + +class GetNodeFlavour(Method): + """ + Returns detailed information on a given node's flavour, i.e. its + base installation. + + This depends on the global PLC settings in the PLC_FLAVOUR area, + optionnally overridden by any of the following tags if set on that node: + + 'arch', 'pldistro', 'fcdistro', + 'deployment', 'extensions', 'virt', + """ + + roles = ['admin', 'user', 'node'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + ] + + returns = { + 'nodefamily' : Parameter (str, "the nodefamily this node should be based upon"), + 'fcdistro': Parameter (str, "the fcdistro this node should be based upon"), + 'extensions' : [ Parameter (str, "extensions to add to the base install") ], + 'plain' : Parameter (bool, "use plain bootstrapfs image if set (for tests)" ) , + } + + + ########## nodefamily + def nodefamily (self, auth, node_id, fcdistro, pldistro, arch): + + # the deployment tag, if set, wins + # xxx Thierry: this probably is wrong; we need fcdistro to be set anyway + # for generating the proper yum config.... + deployment = GetNodeDeployment (self.api,self.caller).call(auth,node_id) + if deployment: return deployment + + # xxx would make sense to check the corresponding bootstrapfs is available + return "%s-%s-%s"%(pldistro,fcdistro,arch) + + ########## + # parse PLC_FLAVOUR_VIRT_MAP + known_virts=['vs','lxc'] + default_virt='vs' + def virt_from_virt_map (self, node_fcdistro): + map={} + try: + assigns=[x.strip() for x in self.api.config.PLC_FLAVOUR_VIRT_MAP.split(';')] + for assign in assigns: + (left,right)=[x.strip() for x in assign.split(':')] + if right not in GetNodeFlavour.known_virts: + logger.error("GetNodeFlavour, unknown 'virt' %s - ignored" % right) + continue + for fcdistro in [ x.strip() for x in left.split(',')]: + map[fcdistro]=right + except: + logger.exception("GetNodeFlavour, issue with parsing PLC_FLAVOUR_VIRT_MAP=%s - returning '%s'"%\ + (self.api.config.PLC_FLAVOUR_VIRT_MAP, GetNodeFlavour.default_virt)) + return GetNodeFlavour.default_virt +# print 'virt_from_virt_map, using map',map + if node_fcdistro in map: return map[node_fcdistro] + if 'default' in map: return map['default'] + return GetNodeFlavour.default_virt + + + def extensions (self, auth, node_id, fcdistro, arch): + try: + return [ "%s-%s-%s"%(e,fcdistro,arch) for e in GetNodeExtensions(self.api,self.caller).call(auth,node_id).split() ] + except: + return [] + + def plain (self, auth, node_id): + return not not GetNodePlainBootstrapfs(self.api,self.caller).call(auth,node_id) + + def call(self, auth, node_id_or_name): + # Get node information + nodes = Nodes(self.api, [node_id_or_name]) + if not nodes: + raise PLCInvalidArgument, "No such node %r"%node_id_or_name + node = nodes[0] + node_id = node['node_id'] + + arch = GetNodeArch (self.api,self.caller).call(auth,node_id) + # if not set, use the global default and tag the node, in case the global default changes later on + if not arch: + arch = self.api.config.PLC_FLAVOUR_NODE_ARCH + SetNodeArch (self.api,self.caller).call(auth,node_id,arch) + + fcdistro = GetNodeFcdistro (self.api,self.caller).call(auth, node_id) + if not fcdistro: + fcdistro = self.api.config.PLC_FLAVOUR_NODE_FCDISTRO + SetNodeFcdistro (self.api,self.caller).call (auth, node_id, fcdistro) + + pldistro = GetNodePldistro (self.api,self.caller).call(auth, node_id) + if not pldistro: + pldistro = self.api.config.PLC_FLAVOUR_NODE_PLDISTRO + SetNodePldistro(self.api,self.caller).call(auth,node_id,pldistro) + + virt = GetNodeVirt (self.api,self.caller).call(auth, node_id) + if not virt: + virt = self.virt_from_virt_map (fcdistro) + # do not save in node - if a node was e.g. f14 and it gets set to f16 + # we do not want to have to re-set virt + # SetNodeVirt (self.api, self.caller).call (auth, node_id, virt) + + # xxx could use some sanity checking, and could provide fallbacks + return { + 'arch' : arch, + 'fcdistro' : fcdistro, + 'pldistro' : pldistro, + 'virt' : virt, + 'nodefamily': self.nodefamily(auth,node_id, fcdistro, pldistro, arch), + 'extensions': self.extensions(auth,node_id, fcdistro, arch), + 'plain' : self.plain(auth,node_id), + } diff --git a/PLC/Methods/GetNodeGroups.py b/PLC/Methods/GetNodeGroups.py new file mode 100644 index 0000000..d4d9aa1 --- /dev/null +++ b/PLC/Methods/GetNodeGroups.py @@ -0,0 +1,30 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.NodeGroups import NodeGroup, NodeGroups + +class GetNodeGroups(Method): + """ + Returns an array of structs containing details about node groups. + If nodegroup_filter is specified and is an array of node group + identifiers or names, or a struct of node group attributes, only + node groups matching the filter will be returned. If return_fields + is specified, only the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed([Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname'])], + Filter(NodeGroup.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [NodeGroup.fields] + + def call(self, auth, nodegroup_filter = None, return_fields = None): + return NodeGroups(self.api, nodegroup_filter, return_fields) diff --git a/PLC/Methods/GetNodeTags.py b/PLC/Methods/GetNodeTags.py new file mode 100644 index 0000000..3ae9fd6 --- /dev/null +++ b/PLC/Methods/GetNodeTags.py @@ -0,0 +1,42 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.NodeTags import NodeTag, NodeTags +from PLC.Sites import Site, Sites +from PLC.Nodes import Node, Nodes + +class GetNodeTags(Method): + """ + Returns an array of structs containing details about + nodes and related tags. + + If node_tag_filter is specified and is an array of + node tag identifiers, only node tags matching + the filter will be returned. If return_fields is specified, only + the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([NodeTag.fields['node_tag_id']], + Parameter(int,"Node tag id"), + Filter(NodeTag.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [NodeTag.fields] + + + def call(self, auth, node_tag_filter = None, return_fields = None): + + node_tags = NodeTags(self.api, node_tag_filter, return_fields) + + return node_tags diff --git a/PLC/Methods/GetNodeTypes.py b/PLC/Methods/GetNodeTypes.py new file mode 100644 index 0000000..1810343 --- /dev/null +++ b/PLC/Methods/GetNodeTypes.py @@ -0,0 +1,22 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NodeTypes import NodeType, NodeTypes +from PLC.Auth import Auth + +class GetNodeTypes(Method): + """ + Returns an array of all valid node node types. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [NodeType.fields['node_type']] + + + def call(self, auth): + return [node_type['node_type'] for node_type in NodeTypes(self.api)] diff --git a/PLC/Methods/GetNodes.py b/PLC/Methods/GetNodes.py new file mode 100644 index 0000000..99669e4 --- /dev/null +++ b/PLC/Methods/GetNodes.py @@ -0,0 +1,89 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +admin_only = ['key', 'session', 'boot_nonce' ] + +class GetNodes(Method): + """ + Returns an array of structs containing details about nodes. If + node_filter is specified and is an array of node identifiers or + hostnames, or a struct of node attributes, only nodes matching the + filter will be returned. + + If return_fields is specified, only the specified details will be + returned. NOTE that if return_fields is unspecified, the complete + set of native fields are returned, which DOES NOT include tags at + this time. + + Some fields may only be viewed by admins. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed([Mixed(Node.fields['node_id'], + Node.fields['hostname'])], + Parameter(str,"hostname"), + Parameter(int,"node_id"), + Filter(Node.fields)), + Parameter([str], "List of fields to return", nullok = True), + ] + + returns = [Node.fields] + + + def call(self, auth, node_filter = None, return_fields = None): + + # Must query at least slice_ids_whitelist + if return_fields is not None: + added_fields = set(['slice_ids_whitelist', 'site_id']).difference(return_fields) + return_fields += added_fields + else: + added_fields =[] + + # Get node information + nodes = Nodes(self.api, node_filter, return_fields) + + # Remove admin only fields + if not isinstance(self.caller, Person) or \ + 'admin' not in self.caller['roles']: + slice_ids = set() + site_ids = set() + + if self.caller: + slice_ids.update(self.caller['slice_ids']) + if isinstance(self.caller, Node): + site_ids.update([self.caller['site_id']]) + else: + site_ids.update(self.caller['site_ids']) + + # if node has whitelist, only return it if users is at + # the same site or user has a slice on the whitelist + for node in nodes[:]: + if 'site_id' in node and \ + site_ids.intersection([node['site_id']]): + continue + if 'slice_ids_whitelist' in node and \ + node['slice_ids_whitelist'] and \ + not slice_ids.intersection(node['slice_ids_whitelist']): + nodes.remove(node) + + # remove remaining admin only fields + for node in nodes: + for field in admin_only: + if field in node: + del node[field] + + # remove added fields if not specified + if added_fields: + for node in nodes: + for field in added_fields: + del node[field] + + return nodes diff --git a/PLC/Methods/GetPCUProtocolTypes.py b/PLC/Methods/GetPCUProtocolTypes.py new file mode 100644 index 0000000..9c9da1e --- /dev/null +++ b/PLC/Methods/GetPCUProtocolTypes.py @@ -0,0 +1,40 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUProtocolTypes import PCUProtocolType, PCUProtocolTypes +from PLC.Auth import Auth +from PLC.Filter import Filter + +class GetPCUProtocolTypes(Method): + """ + Returns an array of PCU Types. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([PCUProtocolType.fields['pcu_type_id']], + Filter(PCUProtocolType.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [PCUProtocolType.fields] + + + def call(self, auth, protocol_type_filter = None, return_fields = None): + + #Must query at least pcu_type_id + if return_fields is not None and 'pcu_protocol_type_id' not in return_fields: + return_fields.append('pcu_protocol_type_id') + added_fields = ['pcu_protocol_type_id'] + else: + added_fields = [] + + protocol_types = PCUProtocolTypes(self.api, protocol_type_filter, return_fields) + + for added_field in added_fields: + for protocol_type in protocol_types: + del protocol_type[added_field] + + return protocol_types diff --git a/PLC/Methods/GetPCUTypes.py b/PLC/Methods/GetPCUTypes.py new file mode 100644 index 0000000..286c53b --- /dev/null +++ b/PLC/Methods/GetPCUTypes.py @@ -0,0 +1,50 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUTypes import PCUType, PCUTypes +from PLC.Auth import Auth +from PLC.Filter import Filter + +class GetPCUTypes(Method): + """ + Returns an array of PCU Types. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(PCUType.fields['pcu_type_id'], + PCUType.fields['model'])], + Parameter(str, 'model'), + Parameter(int, 'node_id'), + Filter(PCUType.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [PCUType.fields] + + + def call(self, auth, pcu_type_filter = None, return_fields = None): + + #Must query at least pcu_type_id + if return_fields is not None: + added_fields = [] + if 'pcu_type_id' not in return_fields: + return_fields.append('pcu_type_id') + added_fields.append('pcu_type_id') + if 'pcu_protocol_types' in return_fields and \ + 'pcu_protocol_type_ids' not in return_fields: + return_fields.append('pcu_protocol_type_ids') + added_fields.append('pcu_protocol_type_ids') + else: + added_fields = [] + + pcu_types = PCUTypes(self.api, pcu_type_filter, return_fields) + + # remove added fields and protocol_types + for added_field in added_fields: + for pcu_type in pcu_types: + del pcu_type[added_field] + + return pcu_types diff --git a/PLC/Methods/GetPCUs.py b/PLC/Methods/GetPCUs.py new file mode 100644 index 0000000..8b3b91f --- /dev/null +++ b/PLC/Methods/GetPCUs.py @@ -0,0 +1,73 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.Nodes import Node, Nodes +from PLC.PCUs import PCU, PCUs +from PLC.Auth import Auth + +class GetPCUs(Method): + """ + Returns an array of structs containing details about power control + units (PCUs). If pcu_filter is specified and is an array of PCU + identifiers, or a struct of PCU attributes, only PCUs matching the + filter will be returned. If return_fields is specified, only the + specified details will be returned. + + Admin may query all PCUs. Non-admins may only query the PCUs at + their sites. + """ + + roles = ['admin', 'pi', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([PCU.fields['pcu_id']], + Filter(PCU.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [PCU.fields] + + def call(self, auth, pcu_filter = None, return_fields = None): + # If we are not admin + if not (isinstance(self.caller, Person) and 'admin' in self.caller['roles']): + # Return only the PCUs at our site + valid_pcu_ids = [] + + if isinstance(self.caller, Person): + site_ids = self.caller['site_ids'] + elif isinstance(self.caller, Node): + site_ids = [self.caller['site_id']] + + for site in Sites(self.api, site_ids): + valid_pcu_ids += site['pcu_ids'] + + if not valid_pcu_ids: + return [] + + if pcu_filter is None: + pcu_filter = valid_pcu_ids + + # Must query at least slice_id (see below) + if return_fields is not None and 'pcu_id' not in return_fields: + return_fields.append('pcu_id') + added_fields = True + else: + added_fields = False + + pcus = PCUs(self.api, pcu_filter, return_fields) + + # Filter out PCUs that are not viewable + if not (isinstance(self.caller, Person) and 'admin' in self.caller['roles']): + pcus = filter(lambda pcu: pcu['pcu_id'] in valid_pcu_ids, pcus) + + # Remove pcu_id if not specified + if added_fields: + for pcu in pcus: + if 'pcu_id' in pcu: + del pcu['pcu_id'] + + return pcus diff --git a/PLC/Methods/GetPeerData.py b/PLC/Methods/GetPeerData.py new file mode 100644 index 0000000..86193d0 --- /dev/null +++ b/PLC/Methods/GetPeerData.py @@ -0,0 +1,120 @@ +# +# Thierry Parmentelat - INRIA +# + +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Peers import Peer, Peers + +from PLC.Sites import Site, Sites +from PLC.Keys import Key, Keys +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons +from PLC.Slices import Slice, Slices +from PLC.SliceTags import SliceTags + +class GetPeerData(Method): + """ + Returns lists of local objects that a peer should cache in its + database as foreign objects. Also returns the list of foreign + nodes in this database, for which the calling peer is + authoritative, to assist in synchronization of slivers. + + See the implementation of RefreshPeer for how this data is used. + """ + + roles = ['admin', 'peer'] + + accepts = [Auth()] + + returns = { + 'Sites': Parameter([dict], "List of local sites"), + 'Keys': Parameter([dict], "List of local keys"), + 'Nodes': Parameter([dict], "List of local nodes"), + 'Persons': Parameter([dict], "List of local users"), + 'Slices': Parameter([dict], "List of local slices"), + 'db_time': Parameter(float, "(Debug) Database fetch time"), + } + + def call (self, auth): + start = time.time() + + # Filter out various secrets + node_fields = [ field for field in Node.fields if field \ + not in ['boot_nonce', 'key', 'session', 'root_person_ids']] + try: + node_fields += ['hrn'] + nodes = Nodes(self.api, {'peer_id': None}, node_fields) + except: + nodes = Nodes(self.api, {'peer_id': None}, node_fields) + # filter out whitelisted nodes + nodes = [ n for n in nodes if not n['slice_ids_whitelist']] + + + person_fields = [ field for field in Person.fields if field \ + not in ['password', 'verification_key', 'verification_expires']] + + site_fields = [field for field in Site.fields] + slice_fields = [field for field in Slice.fields] + + try: + person_fields += ['sfa_created','hrn'] + site_fields += ['sfa_created','hrn'] + slice_fields += ['sfa_created','hrn'] + + # XXX Optimize to return only those Persons, Keys, and Slices + # necessary for slice creation on the calling peer's nodes. + + # filter out special person + + persons = Persons(self.api, {'~email':[self.api.config.PLC_API_MAINTENANCE_USER, self.api.config.PLC_ROOT_USER], 'peer_id': None}, person_fields) + + # filter out system slices + system_slice_ids = SliceTags(self.api, {'name': 'system', 'value': '1'}).dict('slice_id') + slices = Slices(self.api, {'peer_id': None,'~slice_id':system_slice_ids.keys()}, slice_fields) + + sites = Sites(self.api, {'peer_id': None}, site_fields) + + # filter out objects with sfa_created=True + filtered_sites = [site for site in sites if site.get('sfa_created', None) != 'True'] + filtered_slices = [slice for slice in slices if slice.get('sfa_created', None) != 'True'] + filtered_persons = [person for person in persons if person.get('sfa_created', None) != 'True'] + + except: + # handle peers with old version of MyPLC that does not support 'sfa_created' and 'hrn' fields for Site/Slice/Person + + # XXX Optimize to return only those Persons, Keys, and Slices + # necessary for slice creation on the calling peer's nodes. + + # filter out special person + + filtered_persons = Persons(self.api, {'~email':[self.api.config.PLC_API_MAINTENANCE_USER, self.api.config.PLC_ROOT_USER], 'peer_id': None}, person_fields) + + # filter out system slices + system_slice_ids = SliceTags(self.api, {'name': 'system', 'value': '1'}).dict('slice_id') + filtered_slices = Slices(self.api, {'peer_id': None, + '~slice_id':system_slice_ids.keys()}, slice_fields) + + filtered_sites = Sites(self.api, {'peer_id': None}, site_fields) + + + result = { + 'Sites': filtered_sites, + 'Keys': Keys(self.api, {'peer_id': None}), + 'Nodes': nodes, + 'Persons': filtered_persons, + 'Slices': filtered_slices, + } + + + if isinstance(self.caller, Peer): + result['PeerNodes'] = Nodes(self.api, {'peer_id': self.caller['peer_id']}) + + result['db_time'] = time.time() - start + + return result diff --git a/PLC/Methods/GetPeerName.py b/PLC/Methods/GetPeerName.py new file mode 100644 index 0000000..30fbd94 --- /dev/null +++ b/PLC/Methods/GetPeerName.py @@ -0,0 +1,19 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter +from PLC.Auth import Auth + +from PLC.Peers import Peer, Peers + +class GetPeerName (Method): + """ + Returns this peer's name, as defined in the config as PLC_NAME + """ + + roles = ['admin', 'peer', 'node'] + + accepts = [Auth()] + + returns = Peer.fields['peername'] + + def call (self, auth): + return self.api.config.PLC_NAME diff --git a/PLC/Methods/GetPeers.py b/PLC/Methods/GetPeers.py new file mode 100644 index 0000000..dca352a --- /dev/null +++ b/PLC/Methods/GetPeers.py @@ -0,0 +1,47 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.Persons import Person +from PLC.Peers import Peer, Peers + +class GetPeers (Method): + """ + Returns an array of structs containing details about peers. If + person_filter is specified and is an array of peer identifiers or + peer names, or a struct of peer attributes, only peers matching + the filter will be returned. If return_fields is specified, only the + specified details will be returned. + """ + + roles = ['admin', 'node','pi','user'] + + accepts = [ + Auth(), + Mixed([Mixed(Peer.fields['peer_id'], + Peer.fields['peername'])], + Filter(Peer.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Peer.fields] + + def call (self, auth, peer_filter = None, return_fields = None): + + peers = Peers(self.api, peer_filter, return_fields) + + # Remove admin only fields + if not isinstance(self.caller, Person) or \ + 'admin' not in self.caller['roles']: + for peer in peers: + for field in ['key', 'cacert']: + if field in peer: + del peer[field] + + return peers diff --git a/PLC/Methods/GetPersonTags.py b/PLC/Methods/GetPersonTags.py new file mode 100644 index 0000000..6c0c9b3 --- /dev/null +++ b/PLC/Methods/GetPersonTags.py @@ -0,0 +1,75 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.Persons import Person, Persons +from PLC.PersonTags import PersonTag, PersonTags +from PLC.Sites import Sites, Site + +class GetPersonTags(Method): + """ + Returns an array of structs containing details about + persons and related settings. + + If person_tag_filter is specified and is an array of + person setting identifiers, only person settings matching + the filter will be returned. If return_fields is specified, only + the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepts = [ + Auth(), + Mixed([PersonTag.fields['person_tag_id']], + Parameter(int,"Person setting id"), + Filter(PersonTag.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [PersonTag.fields] + + + def call(self, auth, person_tag_filter = None, return_fields = None): + + # only persons can call this (as per roles, but..) + if not isinstance(self.caller,Person): + return [] + + # If we are not admin, make sure to only return viewable accounts + valid_person_ids=None + added_fields=[] + if 'admin' not in self.caller['roles']: + # Get accounts that we are able to view + valid_person_ids = [self.caller['person_id']] + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_person_ids += site['person_ids'] + + if not valid_person_ids: + return [] + + # if we have to filter out on person_id, make sure this is returned from db + if return_fields: + added_fields = set(['person_id']).difference(return_fields) + return_fields += added_fields + + person_tags = PersonTags(self.api, person_tag_filter, return_fields) + + if valid_person_ids is not None: + person_tags = [ person_tag for person_tag in person_tags + if person_tag['person_id'] in valid_person_ids] + + # Remove added fields if not initially specified + if added_fields: + for person_tag in person_tags: + for field in added_fields: + if field in person_tag: + del person_tag[field] + return person_tags diff --git a/PLC/Methods/GetPersons.py b/PLC/Methods/GetPersons.py new file mode 100644 index 0000000..263c663 --- /dev/null +++ b/PLC/Methods/GetPersons.py @@ -0,0 +1,97 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Auth import Auth +from PLC.Logger import logger + +hidden_fields = ['password', 'verification_key', 'verification_expires'] + +class GetPersons(Method): + """ + Returns an array of structs containing details about users. If + person_filter is specified and is an array of user identifiers or + usernames, or a struct of user attributes, only users matching the + filter will be returned. If return_fields is specified, only the + specified details will be returned. + + Users and techs may only retrieve details about themselves. PIs + may retrieve details about themselves and others at their + sites. Admins and nodes may retrieve details about all accounts. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(Person.fields['person_id'], + Person.fields['email'])], + Parameter(str,"email"), + Parameter(int,"person_id"), + Filter(Person.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + # Filter out password field + return_fields = dict(filter(lambda (field, value): field not in hidden_fields, + Person.fields.items())) + returns = [return_fields] + + def call(self, auth, person_filter = None, return_fields = None): + + logger.info("incoming GetPersons, filter={}, return fields={}" + .format(person_filter, return_fields)) + + # If we are not admin, make sure to only return viewable accounts + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + # Get accounts that we are able to view + valid_person_ids = [self.caller['person_id']] + if ('pi' in self.caller['roles'] or 'tech' in self.caller['roles']) \ + and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_person_ids += site['person_ids'] + if not valid_person_ids: + return [] + + # this may look suspicious; what if person_filter is not None ? + # turns out the results are getting filtered again below, so we're safe + # although this part of the code does not always trigger, it's probably + # a sensible performance enhancement for all the times + # when GetPersons() gets called without an argument + if person_filter is None: + person_filter = valid_person_ids + + # Filter out password field + if return_fields: + return_fields = filter(lambda field: field not in hidden_fields, + return_fields) + else: + return_fields = self.return_fields.keys() + + # Must query at least person_id, site_ids, and role_ids (see + # Person.can_view() and below). + if return_fields is not None: + added_fields = set(['person_id', 'site_ids', 'role_ids','roles']).difference(return_fields) + return_fields += added_fields + else: + added_fields = [] + + persons = Persons(self.api, person_filter, return_fields) + + # Filter out accounts that are not viewable + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + persons = filter(self.caller.can_view, persons) + + # Remove added fields if not specified + if added_fields: + for person in persons: + for field in added_fields: + if field in person: + del person[field] + + return persons diff --git a/PLC/Methods/GetPlcRelease.py b/PLC/Methods/GetPlcRelease.py new file mode 100644 index 0000000..796ae7e --- /dev/null +++ b/PLC/Methods/GetPlcRelease.py @@ -0,0 +1,59 @@ +from PLC.Method import Method +from PLC.Auth import Auth +from PLC.Faults import * + +import re + +comment_regexp = '\A\s*#.|\A\s*\Z|\Axxxxx' + +regexps = { 'build' : '\A[bB]uild\s+(?P[^:]+)\s*:\s*(?P.*)\Z', + 'tags' : '\A(?P[^:]+)\s*:=\s*(?P.*)\Z', +# spaces not part of key : ungreedy + 'rpms' : '\A(?P[^:]+?)\s*::\s*(?P.*)\Z', +} + +class GetPlcRelease(Method): + """ + Returns various information about the current myplc installation. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + ] + + # for now only return /etc/myplc-release verbatim + returns = { 'build' : 'information about the build', + 'tags' : 'describes the codebase location and tags used for building', + 'rpms' : 'details the rpm installed in the myplc chroot jail' } + + def call(self, auth): + + comment_matcher = re.compile(comment_regexp) + + matchers = {} + result = {} + for field in regexps.keys(): + matchers[field] = re.compile(regexps[field]) + result[field]={} + + try: + release = open('/etc/myplc-release') + for line in release.readlines(): + line=line.strip() + if comment_matcher.match(line): + continue + for field in regexps.keys(): + m=matchers[field].match(line) + if m: + (key,value)=m.groups(['key','value']) + result[field][key]=value + break + else: + if not result.has_key('unexpected'): + result['unexpected']="" + result['unexpected'] += (line+"\n") + except: + raise PLCNotImplemented, 'Cannot open /etc/myplc-release' + return result diff --git a/PLC/Methods/GetRoles.py b/PLC/Methods/GetRoles.py new file mode 100644 index 0000000..fb905e5 --- /dev/null +++ b/PLC/Methods/GetRoles.py @@ -0,0 +1,21 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Roles import Role, Roles +from PLC.Auth import Auth + +class GetRoles(Method): + """ + Get an array of structs containing details about all roles. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [Role.fields] + + def call(self, auth): + return Roles(self.api) diff --git a/PLC/Methods/GetSession.py b/PLC/Methods/GetSession.py new file mode 100644 index 0000000..82dccbf --- /dev/null +++ b/PLC/Methods/GetSession.py @@ -0,0 +1,46 @@ +import time + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Sessions import Session, Sessions +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons + +class GetSession(Method): + """ + Returns a new session key if a user or node authenticated + successfully, faults otherwise. + + Default value for 'expires' is 24 hours. Otherwise, the returned + session 'expires' in the given number of seconds. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + accepts = [Auth(), + Parameter(int,"expires", nullok=True)] + returns = Session.fields['session_id'] + + + def call(self, auth, expires=None): + # Authenticated with a session key, just return it + if auth.has_key('session'): + return auth['session'] + + session = Session(self.api) + + if isinstance(self.caller, Person): + # XXX Make this configurable + if expires is None: + session['expires'] = int(time.time()) + (24 * 60 * 60) + else: + session['expires'] = int(time.time()) + int(expires) + + session.sync(commit = False) + + if isinstance(self.caller, Node): + session.add_node(self.caller, commit = True) + elif isinstance(self.caller, Person): + session.add_person(self.caller, commit = True) + + return session['session_id'] diff --git a/PLC/Methods/GetSessions.py b/PLC/Methods/GetSessions.py new file mode 100644 index 0000000..b50e1c9 --- /dev/null +++ b/PLC/Methods/GetSessions.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Sessions import Session, Sessions +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +class GetSessions(Method): + """ + Returns an array of structs containing details about users sessions. If + session_filter is specified and is an array of user identifiers or + session_keys, or a struct of session attributes, only sessions matching the + filter will be returned. If return_fields is specified, only the + specified details will be returned. + + + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed([Mixed(Session.fields['person_id'], + Session.fields['session_id'])], + Filter(Session.fields)) + ] + + returns = [Session.fields] + + def call(self, auth, session_filter = None): + + sessions = Sessions(self.api, session_filter) + + return sessions diff --git a/PLC/Methods/GetSiteTags.py b/PLC/Methods/GetSiteTags.py new file mode 100644 index 0000000..03a857b --- /dev/null +++ b/PLC/Methods/GetSiteTags.py @@ -0,0 +1,41 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth + +from PLC.SiteTags import SiteTag, SiteTags +from PLC.Sites import Site, Sites + +class GetSiteTags(Method): + """ + Returns an array of structs containing details about + sites and related settings. + + If site_tag_filter is specified and is an array of + site setting identifiers, only site settings matching + the filter will be returned. If return_fields is specified, only + the specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed([SiteTag.fields['site_tag_id']], + Parameter(int,"Site setting id"), + Filter(SiteTag.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [SiteTag.fields] + + + def call(self, auth, site_tag_filter = None, return_fields = None): + + site_tags = SiteTags(self.api, site_tag_filter, return_fields) + + return site_tags diff --git a/PLC/Methods/GetSites.py b/PLC/Methods/GetSites.py new file mode 100644 index 0000000..d277c28 --- /dev/null +++ b/PLC/Methods/GetSites.py @@ -0,0 +1,31 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Sites import Site, Sites + +class GetSites(Method): + """ + Returns an array of structs containing details about sites. If + site_filter is specified and is an array of site identifiers or + hostnames, or a struct of site attributes, only sites matching the + filter will be returned. If return_fields is specified, only the + specified details will be returned. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed([Mixed(Site.fields['site_id'], + Site.fields['login_base'])], + Parameter(str,"login_base"), + Parameter(int,"site_id"), + Filter(Site.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Site.fields] + + def call(self, auth, site_filter = None, return_fields = None): + return Sites(self.api, site_filter, return_fields) diff --git a/PLC/Methods/GetSliceFamily.py b/PLC/Methods/GetSliceFamily.py new file mode 100644 index 0000000..910fe2f --- /dev/null +++ b/PLC/Methods/GetSliceFamily.py @@ -0,0 +1,61 @@ +from PLC.Method import Method +from PLC.Auth import Auth +from PLC.Faults import * +from PLC.Parameter import * +from PLC.Slices import Slice, Slices + +from PLC.Accessors.Accessors_standard import * # import slice accessors +from PLC.Accessors.Accessors_sliverauth import * # import slice accessors + +class GetSliceFamily(Method): + """ + Returns the slice vserver reference image that a given slice + should be based on. This depends on the global PLC settings in the + PLC_FLAVOUR area, optionnally overridden by any of the 'vref', + 'arch', 'pldistro', 'fcdistro' tag if set on the slice. + """ + + roles = ['admin', 'user', 'node'] + + # don't support sliver-specific settings yet + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + ] + + returns = Parameter (str, "the slicefamily this slice should be based upon") + + # + ### system slices - at least planetflow - still rely on 'vref' + # + def call(self, auth, slice_id_or_name): + # Get slice information + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name + slice = slices[0] + slice_id = slice['slice_id'] + + # omf-control'ed slivers need the omf vserver reference image + # we used to issue SetSliceVref (self.api) (auth,slice_id,'omf') + # to avoid asking users to set both tags 'omf_control' and 'vref' + # however we can't use SetSliceVref here because a node is only allowed + # to set a sliver tag, not a slice tag + # and this somehow gets called from GetSlivers + # anyways it was a bad idea, let's have the UI do that instead + + # the vref tag, if set, wins over pldistro + pldistro = GetSliceVref(self.api,self.caller).call(auth,slice_id) \ + or GetSlicePldistro (self.api,self.caller).call(auth, slice_id) \ + or self.api.config.PLC_FLAVOUR_SLICE_PLDISTRO + + fcdistro = GetSliceFcdistro (self.api,self.caller).call(auth, slice_id) \ + or self.api.config.PLC_FLAVOUR_SLICE_FCDISTRO + + arch = GetSliceArch (self.api,self.caller).call(auth,slice_id) \ + or self.api.config.PLC_FLAVOUR_SLICE_ARCH + + # xxx would make sense to check the corresponding vserver rpms are available + # in all node-families yum repos (and yumgroups, btw) + return "%s-%s-%s"%(pldistro,fcdistro,arch) diff --git a/PLC/Methods/GetSliceInstantiations.py b/PLC/Methods/GetSliceInstantiations.py new file mode 100644 index 0000000..174c209 --- /dev/null +++ b/PLC/Methods/GetSliceInstantiations.py @@ -0,0 +1,21 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.SliceInstantiations import SliceInstantiation, SliceInstantiations +from PLC.Auth import Auth + +class GetSliceInstantiations(Method): + """ + Returns an array of all valid slice instantiation states. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth() + ] + + returns = [SliceInstantiation.fields['instantiation']] + + def call(self, auth): + return [slice_instantiation['instantiation'] for slice_instantiation in SliceInstantiations(self.api)] diff --git a/PLC/Methods/GetSliceKeys.py b/PLC/Methods/GetSliceKeys.py new file mode 100644 index 0000000..2e4e758 --- /dev/null +++ b/PLC/Methods/GetSliceKeys.py @@ -0,0 +1,133 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Slices import Slice, Slices +from PLC.Keys import Key, Keys + +class GetSliceKeys(Method): + """ + Returns an array of structs containing public key info for users in + the specified slices. If slice_filter is specified and is an array + of slice identifiers or slice names, or a struct of slice + attributes, only slices matching the filter will be returned. If + return_fields is specified, only the specified details will be + returned. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins and nodes may query + any slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(Slice.fields['slice_id'], + Slice.fields['name'])], + Filter(Slice.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [ + { + 'slice_id': Slice.fields['slice_id'], + 'name': Slice.fields['name'], + 'person_id': Person.fields['person_id'], + 'email': Person.fields['email'], + 'key': Key.fields['key'] + }] + + def call(self, auth, slice_filter = None, return_fields = None): + slice_fields = ['slice_id', 'name'] + person_fields = ['person_id', 'email'] + key_fields = ['key'] + + # If we are not admin, make sure to return only viewable + # slices. + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + # Get slices that we are able to view + valid_slice_ids = self.caller['slice_ids'] + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_slice_ids += site['slice_ids'] + + if not valid_slice_ids: + return [] + + if slice_filter is None: + slice_filter = valid_slice_ids + + if return_fields: + slice_return_fields = filter(lambda field: field in slice_fields, return_fields) + person_return_fields = filter(lambda field: field in person_fields, return_fields) + key_return_fields = filter(lambda field: field in key_fields, return_fields) + else: + slice_return_fields = slice_fields + person_return_fields = person_fields + key_return_fields = key_fields + + # Must query at least Slice.slice_id, Slice.person_ids, + # and Person.person_id and Person.key_ids so we can join data correctly + slice_added_fields = set(['slice_id', 'person_ids']).difference(slice_return_fields) + slice_return_fields += slice_added_fields + person_added_fields = set(['person_id', 'key_ids']).difference(person_return_fields) + person_return_fields += person_added_fields + key_added_fields = set(['key_id']).difference(key_return_fields) + key_return_fields += key_added_fields + + # Get the slices + all_slices = Slices(self.api, slice_filter, slice_return_fields).dict('slice_id') + slice_ids = all_slices.keys() + slices = all_slices.values() + + # Filter out slices that are not viewable + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + slices = filter(lambda slice: slice['slice_id'] in valid_slice_ids, slices) + + # Get the persons + person_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + + all_persons = Persons(self.api, list(person_ids), person_return_fields).dict('person_id') + person_ids = all_persons.keys() + persons = all_persons.values() + + # Get the keys + key_ids = set() + for person in persons: + key_ids.update(person['key_ids']) + + all_keys = Keys(self.api, list(key_ids), key_return_fields).dict('key_id') + key_ids = all_keys.keys() + keys = all_keys.values() + + # Create slice_keys list + slice_keys = [] + slice_fields = list(set(slice_return_fields).difference(slice_added_fields)) + person_fields = list(set(person_return_fields).difference(person_added_fields)) + key_fields = list(set(key_return_fields).difference(key_added_fields)) + + for slice in slices: + slice_key = dict.fromkeys(slice_fields + person_fields + key_fields) + if not slice['person_ids']: + continue + for person_id in slice['person_ids']: + person = all_persons[person_id] + if not person['key_ids']: + continue + for key_id in person['key_ids']: + key = all_keys[key_id] + slice_key.update(dict(filter(lambda (k, v): k in slice_fields, slice.items()))) + slice_key.update(dict(filter(lambda (k, v): k in person_fields, person.items()))) + slice_key.update(dict(filter(lambda (k, v): k in key_fields, key.items()))) + slice_keys.append(slice_key.copy()) + + return slice_keys diff --git a/PLC/Methods/GetSliceTags.py b/PLC/Methods/GetSliceTags.py new file mode 100644 index 0000000..c6db287 --- /dev/null +++ b/PLC/Methods/GetSliceTags.py @@ -0,0 +1,94 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.SliceTags import SliceTag, SliceTags +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Nodes import Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth + +class GetSliceTags(Method): + """ + Returns an array of structs containing details about slice and + sliver attributes. An attribute is a sliver attribute if the + node_id field is set. If slice_tag_filter is specified and + is an array of slice attribute identifiers, or a struct of slice + attribute attributes, only slice attributes matching the filter + will be returned. If return_fields is specified, only the + specified details will be returned. + + Users may only query attributes of slices or slivers of which they + are members. PIs may only query attributes of slices or slivers at + their sites, or of which they are members. Admins may query + attributes of any slice or sliver. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed([SliceTag.fields['slice_tag_id']], + Filter(SliceTag.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [SliceTag.fields] + + + def call(self, auth, slice_tag_filter = None, return_fields = None): + # If we are not admin, make sure to only return our own slice + # and sliver attributes. + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + # Get slices that we are able to view + valid_slice_ids = self.caller['slice_ids'] + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_slice_ids += site['slice_ids'] + # techs can view all slices on the nodes at their site + if 'tech' in self.caller['roles'] and self.caller['site_ids']: + nodes = Nodes(self.api, {'site_id': self.caller['site_ids']}, ['site_id', 'slice_ids']) + for node in nodes: + valid_slice_ids.extend(node['slice_ids']) + + if not valid_slice_ids: + return [] + + # Get slice attributes that we are able to view + valid_slice_tag_ids = [] + slices = Slices(self.api, valid_slice_ids) + for slice in slices: + valid_slice_tag_ids += slice['slice_tag_ids'] + + if not valid_slice_tag_ids: + return [] + + if slice_tag_filter is None: + slice_tag_filter = valid_slice_tag_ids + + # Must query at least slice_tag_id (see below) + if return_fields is not None and 'slice_tag_id' not in return_fields: + return_fields.append('slice_tag_id') + added_fields = True + else: + added_fields = False + + slice_tags = SliceTags(self.api, slice_tag_filter, return_fields) + + # Filter out slice attributes that are not viewable + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + slice_tags = filter(lambda slice_tag: \ + slice_tag['slice_tag_id'] in valid_slice_tag_ids, + slice_tags) + + # Remove slice_tag_id if not specified + if added_fields: + for slice_tag in slice_tags: + if 'slice_tag_id' in slice_tag: + del slice_tag['slice_tag_id'] + + return slice_tags diff --git a/PLC/Methods/GetSliceTicket.py b/PLC/Methods/GetSliceTicket.py new file mode 100644 index 0000000..13c7840 --- /dev/null +++ b/PLC/Methods/GetSliceTicket.py @@ -0,0 +1,77 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.GPG import gpg_sign, gpg_verify +from PLC.InitScripts import InitScript, InitScripts + +from PLC.Methods.GetSlivers import get_slivers + +class GetSliceTicket(Method): + """ + Returns a ticket for, or signed representation of, the specified + slice. Slice tickets may be used to manually instantiate or update + a slice on a node. Present this ticket to the local Node Manager + interface to redeem it. + + If the slice has not been added to a node with AddSliceToNodes, + and the ticket is redeemed on that node, it will be deleted the + next time the Node Manager contacts the API. + + Users may only obtain tickets for slices of which they are + members. PIs may obtain tickets for any of the slices at their + sites, or any slices of which they are members. Admins may obtain + tickets for any slice. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'peer'] + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + ] + + returns = Parameter(str, 'Signed slice ticket') + + def call(self, auth, slice_id_or_name): + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + # Allow peers to obtain tickets for their own slices + if slice['peer_id'] is not None: + if not isinstance(self.caller, Peer): + raise PLCInvalidArgument, "Not a local slice" + elif slice['peer_id'] != self.caller['peer_id']: + raise PLCInvalidArgument, "Only the authoritative peer may obtain tickets for that slice" + + # Tickets are the canonicalized XML-RPC methodResponse + # representation of a partial GetSlivers() response, i.e., + + initscripts = InitScripts(self.api, {'enabled': True}) + + data = { + 'timestamp': int(time.time()), + 'initscripts': initscripts, + 'slivers': get_slivers(self.api, self.caller, auth, [slice['slice_id']]), + } + + # Sign ticket + signed_ticket = gpg_sign((data,), + self.api.config.PLC_ROOT_GPG_KEY, + self.api.config.PLC_ROOT_GPG_KEY_PUB, + methodresponse = True, + detach_sign = False) + + # Verify ticket + gpg_verify(signed_ticket, + self.api.config.PLC_ROOT_GPG_KEY_PUB) + + return signed_ticket diff --git a/PLC/Methods/GetSlices.py b/PLC/Methods/GetSlices.py new file mode 100644 index 0000000..c06fbe2 --- /dev/null +++ b/PLC/Methods/GetSlices.py @@ -0,0 +1,82 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Persons import Person, Persons +from PLC.Nodes import Nodes +from PLC.Sites import Site, Sites +from PLC.Slices import Slice, Slices + +class GetSlices(Method): + """ + Returns an array of structs containing details about slices. If + slice_filter is specified and is an array of slice identifiers or + slice names, or a struct of slice attributes, only slices matching + the filter will be returned. If return_fields is specified, only the + specified details will be returned. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins and nodes may query + any slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(Slice.fields['slice_id'], + Slice.fields['name'])], + Parameter(str,"name"), + Parameter(int,"slice_id"), + Filter(Slice.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [Slice.fields] + + def call(self, auth, slice_filter = None, return_fields = None): + # If we are not admin, make sure to return only viewable + # slices. + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + # Get slices that we are able to view + valid_slice_ids = self.caller['slice_ids'] + # pis can view all slices at their site + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_slice_ids += site['slice_ids'] + # techs can view all slices on the nodes at their site + if 'tech' in self.caller['roles'] and self.caller['site_ids']: + nodes = Nodes(self.api, {'site_id': self.caller['site_ids']}, ['site_id', 'slice_ids']) + for node in nodes: + valid_slice_ids.extend(node['slice_ids']) + + if not valid_slice_ids: + return [] + + if slice_filter is None: + slice_filter = valid_slice_ids + + # Must query at least slice_id (see below) + if return_fields is not None and 'slice_id' not in return_fields: + return_fields.append('slice_id') + added_fields = True + else: + added_fields = False + + slices = Slices(self.api, slice_filter, return_fields) + + # Filter out slices that are not viewable + if isinstance(self.caller, Person) and \ + 'admin' not in self.caller['roles']: + slices = filter(lambda slice: slice['slice_id'] in valid_slice_ids, slices) + + # Remove slice_id if not specified + if added_fields: + for slice in slices: + if 'slice_id' in slice: + del slice['slice_id'] + + return slices diff --git a/PLC/Methods/GetSlivers.py b/PLC/Methods/GetSlivers.py new file mode 100644 index 0000000..9e3b73d --- /dev/null +++ b/PLC/Methods/GetSlivers.py @@ -0,0 +1,386 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Slices import Slice, Slices +from PLC.Persons import Person, Persons +from PLC.Sites import Sites +from PLC.Roles import Roles +from PLC.Keys import Key, Keys +from PLC.SliceTags import SliceTag, SliceTags +from PLC.InitScripts import InitScript, InitScripts +from PLC.Leases import Lease, Leases +from PLC.Timestamp import Duration +from PLC.Methods.GetSliceFamily import GetSliceFamily +from PLC.PersonTags import PersonTag,PersonTags + +from PLC.Accessors.Accessors_standard import * + +# XXX used to check if slice expiration time is sane +MAXINT = 2L**31-1 + +# slice_filter essentially contains the slice_ids for the relevant slices (on the node + system & delegated slices) +def get_slivers(api, caller, auth, slice_filter, node = None): + # Get slice information + slices = Slices(api, slice_filter, ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']) + + # Build up list of users and slice attributes + person_ids = set() + slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + slice_tag_ids.update(slice['slice_tag_ids']) + + # Get user information + all_persons = Persons(api, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']).dict() + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + + # Get user account keys + all_keys = Keys(api, key_ids, ['key_id', 'key', 'key_type']).dict() + + # Get slice attributes + all_slice_tags = SliceTags(api, slice_tag_ids).dict() + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + + attributes = [] + + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in [ a for a in slice_tags if a['node_id'] == node['node_id'] ]: + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in [ a for a in slice_tags if a['nodegroup_id'] in node['nodegroup_ids'] ]: + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + sliver_attributes.append(slice_tag['tagname']) + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in [ a for a in slice_tags if a['node_id'] is None and a['nodegroup_id'] is None ]: + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + # expose the slice vref as computed by GetSliceFamily + family = GetSliceFamily (api,caller).call(auth, slice['slice_id']) + + slivers.append({ + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes, + 'GetSliceFamily': family, + }) + + return slivers + +### The pickle module, used in conjunction with caching has a restriction that it does not +### work on "connection objects." It doesn't matter if the connection object has +### an 'str' or 'repr' method, there is a taint check that throws an exception if +### the pickled class is found to derive from a connection. +### (To be moved to Method.py) + +def sanitize_for_pickle (obj): + if (isinstance(obj, dict)): + parent = dict(obj) + for k in parent.keys(): parent[k] = sanitize_for_pickle (parent[k]) + return parent + elif (isinstance(obj, list)): + parent = list(obj) + parent = map(sanitize_for_pickle, parent) + return parent + else: + return obj + +class GetSlivers(Method): + """ + Returns a struct containing information about the specified node + (or calling node, if called by a node and node_id_or_hostname is + not specified), including the current set of slivers bound to the + node. + + All of the information returned by this call can be gathered from + other calls, e.g. GetNodes, GetInterfaces, GetSlices, etc. This + function exists almost solely for the benefit of Node Manager. + """ + + roles = ['admin', 'node'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + ] + + returns = { + 'timestamp': Parameter(int, "Timestamp of this call, in seconds since UNIX epoch"), + 'node_id': Node.fields['node_id'], + 'hostname': Node.fields['hostname'], + 'interfaces': [Interface.fields], + 'groups': [NodeGroup.fields['groupname']], + 'conf_files': [ConfFile.fields], + 'initscripts': [InitScript.fields], + 'accounts': [{ + 'name': Parameter(str, "unix style account name", max = 254), + 'keys': [{ + 'key_type': Key.fields['key_type'], + 'key': Key.fields['key'] + }], + }], + 'slivers': [{ + 'name': Slice.fields['name'], + 'slice_id': Slice.fields['slice_id'], + 'instantiation': Slice.fields['instantiation'], + 'expires': Slice.fields['expires'], + 'keys': [{ + 'key_type': Key.fields['key_type'], + 'key': Key.fields['key'] + }], + 'attributes': [{ + 'tagname': SliceTag.fields['tagname'], + 'value': SliceTag.fields['value'] + }] + }], + # how to reach the xmpp server + 'xmpp': {'server':Parameter(str,"hostname for the XMPP server"), + 'user':Parameter(str,"username for the XMPP server"), + 'password':Parameter(str,"username for the XMPP server"), + }, + # we consider three policies (reservation-policy) + # none : the traditional way to use a node + # lease_or_idle : 0 or 1 slice runs at a given time + # lease_or_shared : 1 slice is running during a lease, otherwise all the slices come back + 'reservation_policy': Parameter(str,"one among none, lease_or_idle, lease_or_shared"), + 'leases': [ { 'slice_id' : Lease.fields['slice_id'], + 't_from' : Lease.fields['t_from'], + 't_until' : Lease.fields['t_until'], + }], + } + + def call(self, auth, node_id_or_hostname = None): + return self.raw_call(auth, node_id_or_hostname) + + + def raw_call(self, auth, node_id_or_hostname): + timestamp = int(time.time()) + + # Get node + if node_id_or_hostname is None: + if isinstance(self.caller, Node): + node = self.caller + else: + raise PLCInvalidArgument, "'node_id_or_hostname' not specified" + else: + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node" + + # Get interface information + interfaces = Interfaces(self.api, node['interface_ids']) + + # Get node group information + nodegroups = NodeGroups(self.api, node['nodegroup_ids']).dict('groupname') + groups = nodegroups.keys() + + # Get all (enabled) configuration files + all_conf_files = ConfFiles(self.api, {'enabled': True}).dict() + conf_files = {} + + # Global configuration files are the default. If multiple + # entries for the same global configuration file exist, it is + # undefined which one takes precedence. + for conf_file in all_conf_files.values(): + if not conf_file['node_ids'] and not conf_file['nodegroup_ids']: + conf_files[conf_file['dest']] = conf_file + + # Node group configuration files take precedence over global + # ones. If a node belongs to multiple node groups for which + # the same configuration file is defined, it is undefined + # which one takes precedence. + for nodegroup in nodegroups.values(): + for conf_file_id in nodegroup['conf_file_ids']: + if conf_file_id in all_conf_files: + conf_file = all_conf_files[conf_file_id] + conf_files[conf_file['dest']] = conf_file + + # Node configuration files take precedence over node group + # configuration files. + for conf_file_id in node['conf_file_ids']: + if conf_file_id in all_conf_files: + conf_file = all_conf_files[conf_file_id] + conf_files[conf_file['dest']] = conf_file + + # Get all (enabled) initscripts + initscripts = InitScripts(self.api, {'enabled': True}) + + # Get system slices + system_slice_tags = SliceTags(self.api, {'tagname': 'system', 'value': '1'}).dict('slice_id') + system_slice_ids = system_slice_tags.keys() + + # Get nm-controller slices + # xxx Thierry: should these really be exposed regardless of their mapping to nodes ? + controller_and_delegated_slices = Slices(self.api, {'instantiation': ['nm-controller', 'delegated']}, ['slice_id']).dict('slice_id') + controller_and_delegated_slice_ids = controller_and_delegated_slices.keys() + slice_ids = system_slice_ids + controller_and_delegated_slice_ids + node['slice_ids'] + + slivers = get_slivers(self.api, self.caller, auth, slice_ids, node) + + # get the special accounts and keys needed for the node + # root + # site_admin + accounts = [] + if False and 'site_id' not in node: + nodes = Nodes(self.api, node['node_id']) + node = nodes[0] + + # used in conjunction with reduce to flatten lists, like in + # reduce ( reduce_flatten_list, [ [1] , [2,3] ], []) => [ 1,2,3 ] + def reduce_flatten_list (x,y): return x+y + + # root users are users marked with the tag 'isrootonsite'. Hack for Mlab and other sites in which admins participate in diagnosing problems. + def get_site_root_user_keys(api,site_id_or_name): + site = Sites (api,site_id_or_name,['person_ids'])[0] + all_site_persons = site['person_ids'] + all_site_person_tags = PersonTags(self.api,{'person_id':all_site_persons,'tagname':'isrootonsite'},['value','person_id']) + site_root_person_tags = filter(lambda r:r['value']=='true',all_site_person_tags) + site_root_person_ids = map(lambda r:r['person_id'],site_root_person_tags) + key_ids = reduce (reduce_flatten_list, + [ p['key_ids'] for p in \ + Persons(api,{ 'person_id':site_root_person_ids, + 'enabled':True, '|role_ids' : [20, 40] }, + ['key_ids']) ], + []) + return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh'] + + # power users are pis and techs + def get_site_power_user_keys(api,site_id_or_name): + site = Sites (api,site_id_or_name,['person_ids'])[0] + key_ids = reduce (reduce_flatten_list, + [ p['key_ids'] for p in \ + Persons(api,{ 'person_id':site['person_ids'], + 'enabled':True, '|role_ids' : [20, 40] }, + ['key_ids']) ], + []) + return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh'] + + # all admins regardless of their site + def get_all_admin_keys(api): + key_ids = reduce (reduce_flatten_list, + [ p['key_ids'] for p in \ + Persons(api, {'peer_id':None, 'enabled':True, '|role_ids':[10] }, + ['key_ids']) ], + []) + return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh'] + + # 'site_admin' account setup + personsitekeys=get_site_power_user_keys(self.api,node['site_id']) + accounts.append({'name':'site_admin','keys':personsitekeys}) + + # 'root' account setup on nodes from all 'admin' users and ones marked with 'isrootonsite' for this site + siterootkeys=get_site_root_user_keys(self.api,node['site_id']) + personsitekeys=get_all_admin_keys(self.api) + personsitekeys.extend(siterootkeys) + + accounts.append({'name':'root','keys':personsitekeys}) + + hrn = GetNodeHrn(self.api,self.caller).call(auth,node['node_id']) + + # XMPP config for omf federation + try: + if not self.api.config.PLC_OMF_ENABLED: + raise Exception,"OMF not enabled" + xmpp={'server':self.api.config.PLC_OMF_XMPP_SERVER} + except: + xmpp={'server':None} + + node.update_last_contact() + + # expose leases & reservation policy + # in a first implementation we only support none and lease_or_idle + lease_exposed_fields = [ 'slice_id', 't_from', 't_until', 'name', ] + leases=None + if node['node_type'] != 'reservable': + reservation_policy='none' + else: + reservation_policy='lease_or_idle' + # expose the leases for the next 24 hours + leases = [ dict ( [ (k,l[k]) for k in lease_exposed_fields ] ) + for l in Leases (self.api, {'node_id':node['node_id'], + 'clip': (timestamp, timestamp+24*Duration.HOUR), + '-SORT': 't_from', + }) ] + granularity=self.api.config.PLC_RESERVATION_GRANULARITY + + raw_data = { + 'timestamp': timestamp, + 'node_id': node['node_id'], + 'hostname': node['hostname'], + 'interfaces': interfaces, + 'groups': groups, + 'conf_files': conf_files.values(), + 'initscripts': initscripts, + 'slivers': slivers, + 'accounts': accounts, + 'xmpp':xmpp, + 'hrn':hrn, + 'reservation_policy': reservation_policy, + 'leases':leases, + 'lease_granularity': granularity, + } + + sanitized_data = sanitize_for_pickle (raw_data) + return sanitized_data + diff --git a/PLC/Methods/GetTagTypes.py b/PLC/Methods/GetTagTypes.py new file mode 100644 index 0000000..a117395 --- /dev/null +++ b/PLC/Methods/GetTagTypes.py @@ -0,0 +1,33 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.TagTypes import TagType, TagTypes + +class GetTagTypes(Method): + """ + Returns an array of structs containing details about + node tag types. + + The usual filtering scheme applies on this method. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname'])], + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + Filter(TagType.fields)), + Parameter([str], "List of fields to return", nullok = True) + ] + + returns = [TagType.fields] + + def call(self, auth, tag_type_filter = None, return_fields = None): + return TagTypes(self.api, tag_type_filter, return_fields) diff --git a/PLC/Methods/GetWhitelist.py b/PLC/Methods/GetWhitelist.py new file mode 100644 index 0000000..83531f1 --- /dev/null +++ b/PLC/Methods/GetWhitelist.py @@ -0,0 +1,73 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons +from PLC.Auth import Auth + +class GetWhitelist(Method): + """ + Returns an array of structs containing details about the specified nodes + whitelists. If node_filter is specified and is an array of node identifiers or + hostnames, or a struct of node attributes, only nodes matching the + filter will be returned. If return_fields is specified, only the + specified details will be returned. + + Some fields may only be viewed by admins. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous'] + + accepts = [ + Auth(), + Mixed([Mixed(Node.fields['node_id'], + Node.fields['hostname'])], + Filter(Node.fields)), + Parameter([str], "List of fields to return", nullok = True), + ] + + returns = [Node.fields] + + + def call(self, auth, node_filter = None, return_fields = None): + + # Must query at least slice_ids_whitelist + if return_fields is not None: + added_fields = set(['slice_ids_whitelist']).difference(return_fields) + return_fields += added_fields + else: + added_fields =[] + + # Get node information + nodes = Nodes(self.api, node_filter, return_fields) + + # Remove all nodes without a whitelist + for node in nodes[:]: + if not node['slice_ids_whitelist']: + nodes.remove(node) + + # Remove admin only fields + if not isinstance(self.caller, Person) or \ + 'admin' not in self.caller['roles']: + slice_ids = set() + if self.caller: + slice_ids.update(self.caller['slice_ids']) + #if node has whitelist, make sure the user has a slice on the whitelist + for node in nodes[:]: + if 'slice_ids_whitelist' in node and \ + node['slice_ids_whitelist'] and \ + not slice_ids.intersection(node['slice_ids_whitelist']): + nodes.remove(node) + for node in nodes: + for field in ['boot_nonce', 'key', 'session', 'root_person_ids']: + if field in node: + del node[field] + + # remove added fields if not specified + if added_fields: + for node in nodes: + for field in added_fields: + del node[field] + + return nodes diff --git a/PLC/Methods/NotifyPersons.py b/PLC/Methods/NotifyPersons.py new file mode 100644 index 0000000..70c273d --- /dev/null +++ b/PLC/Methods/NotifyPersons.py @@ -0,0 +1,48 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Persons import Person, Persons +from PLC.sendmail import sendmail + +class NotifyPersons(Method): + """ + Sends an e-mail message to the specified users. If person_filter + is specified and is an array of user identifiers or usernames, or + a struct of user attributes, only users matching the filter will + receive the message. + + Returns 1 if successful. + """ + + roles = ['admin', 'node'] + + accepts = [ + Auth(), + Mixed([Mixed(Person.fields['person_id'], + Person.fields['email'])], + Filter(Person.fields)), + Parameter(str, "E-mail subject"), + Parameter(str, "E-mail body") + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_filter, subject, body): + persons = Persons(self.api, person_filter, + ['person_id', 'first_name', 'last_name', 'email']) + if not persons: + raise PLCInvalidArgument, "No such user(s)" + + # Send email + sendmail(self.api, + To = [("%s %s" % (person['first_name'], person['last_name']), + person['email']) for person in persons], + Subject = subject, + Body = body) + + # Logging variables + self.event_objects = {'Person': [person['person_id'] for person in persons]} + self.message = subject + + return 1 diff --git a/PLC/Methods/NotifySupport.py b/PLC/Methods/NotifySupport.py new file mode 100644 index 0000000..fbd3358 --- /dev/null +++ b/PLC/Methods/NotifySupport.py @@ -0,0 +1,36 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.sendmail import sendmail + +class NotifySupport(Method): + """ + Sends an e-mail message to the configured support address. + + Returns 1 if successful. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Parameter(str, "E-mail subject"), + Parameter(str, "E-mail body") + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, subject, body): + to_name="%s Support"%self.api.config.PLC_NAME + to_address=self.api.config.PLC_MAIL_SUPPORT_ADDRESS + + # Send email + sendmail(self.api, To=(to_name,to_address), + Subject = subject, + Body = body) + + # Logging variables + #self.event_objects = {'Person': [person['person_id'] for person in persons]} + self.message = subject + + return 1 diff --git a/PLC/Methods/RebootNode.py b/PLC/Methods/RebootNode.py new file mode 100644 index 0000000..ed15ce3 --- /dev/null +++ b/PLC/Methods/RebootNode.py @@ -0,0 +1,73 @@ +import socket + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.Auth import Auth +from PLC.POD import udp_pod + +class RebootNode(Method): + """ + Sends the specified node a specially formatted UDP packet which + should cause it to reboot immediately. + + Admins can reboot any node. Techs and PIs can only reboot nodes at + their site. + + Returns 1 if the packet was successfully sent (which only whether + the packet was sent, not whether the reboot was successful). + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname): + # Get account information + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + + node = nodes[0] + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to delete nodes from specified site" + + session = node['session'] + if not session: + raise PLCInvalidArgument, "No session key on record for that node (i.e., has never successfully booted)" + session = session.strip() + + # Only use the hostname as a backup, try to use the primary ID + # address instead. + host = node['hostname'] + interfaces = Interfaces(self.api, node['interface_ids']) + for interface in interfaces: + if interface['is_primary'] == 1: + host = interface['ip'] + break + + try: + udp_pod(host, session) + except socket.error, e: + # Ignore socket errors + pass + + self.event_objects = {'Node': [node['node_id']]} + self.message = "RebootNode called" + + return 1 diff --git a/PLC/Methods/RebootNodeWithPCU.py b/PLC/Methods/RebootNodeWithPCU.py new file mode 100644 index 0000000..2126a2e --- /dev/null +++ b/PLC/Methods/RebootNodeWithPCU.py @@ -0,0 +1,81 @@ +import socket + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Nodes import Node, Nodes +from PLC.PCUs import PCU, PCUs + +try: + from pcucontrol import reboot + external_dependency = True +except: + external_dependency = False + +class RebootNodeWithPCU(Method): + """ + Uses the associated PCU to attempt to reboot the given Node. + + Admins can reboot any node. Techs and PIs can only reboot nodes at + their site. + + Returns 1 if the reboot proceeded without error (Note: this does not guarantee + that the reboot is successful). + Returns -1 if external dependencies for this call are not available. + Returns "error string" if the reboot failed with a specific message. + """ + + roles = ['admin', 'pi', 'tech'] + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + Parameter(bool, "Run as a test, or as a real reboot", nullok = True) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname, testrun=None): + # Get account information + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + + if testrun is None: + testrun = False + + node = nodes[0] + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to reboot nodes from specified site" + + # Verify that the node has pcus associated with it. + pcus = PCUs(self.api, {'pcu_id' : node['pcu_ids']} ) + if not pcus: + raise PLCInvalidArgument, "No PCUs associated with Node" + + pcu = pcus[0] + + if not external_dependency: + raise PLCNotImplemented, "Could not load external module to attempt reboot" + + # model, hostname, port, + # i = pcu['node_ids'].index(node['node_id']) + # p = pcu['ports'][i] + ret = reboot.reboot_api(node, pcu, testrun) + + node.update_last_pcu_reboot(commit=True) # commits new timestamp to node + + self.event_objects = {'Node': [node['node_id']]} + self.message = "RebootNodeWithPCU %s with %s returned %s" % (node['node_id'], pcu['pcu_id'], ret) + + return ret diff --git a/PLC/Methods/RefreshPeer.py b/PLC/Methods/RefreshPeer.py new file mode 100644 index 0000000..7af8569 --- /dev/null +++ b/PLC/Methods/RefreshPeer.py @@ -0,0 +1,879 @@ +# +# Thierry Parmentelat - INRIA +# +import os +import sys +import fcntl +import time + +from PLC.Logger import logger +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Peers import Peer, Peers +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.KeyTypes import KeyType, KeyTypes +from PLC.Keys import Key, Keys +from PLC.BootStates import BootState, BootStates +from PLC.Nodes import Node, Nodes +from PLC.SliceInstantiations import SliceInstantiations +from PLC.Slices import Slice, Slices +from PLC.Roles import Role, Roles + +# settings +# initial version was doing only one final commit +# * set commit_mode to False to get that behaviour +# * set comit_mode to True to get everything synced at once +# the issue with the 'one-commit-at-the-end' approach is +# that the db gets basically totally locked during too long +# causing various issues/crashes in the rest of the system +commit_mode = True + +# turn this to False only if both ends have the same db schema +# compatibility mode is a bit slower but probably safer on the long run +compatibility = True + +# debugging +# for verbose output +verbose = False +use_cache = None +# for debugging specific entries - display detailed info on selected objs +focus_type = None # set to e.g. 'Person' +# set to a list of ids (e.g. person_ids) - remote or local ids should work +focus_ids = [] +# DEBUGGING SETTINGS example +# set to a filename for using cached data when debugging +# WARNING: does *not* actually connect to the peer in this case +# or more precisely, connect only if the file is not found +# i.e. the first time +# use_cache = "/var/log/peers/getpeerdata.json" +# verbose = True +# focus_type = 'Person' +# focus_ids = [621, 1088] + + +########## helpers + +def message(to_print=None, verbose_only=False): + if verbose_only and not verbose: + return + logger.info(to_print) + + +def message_verbose(to_print=None, header='VERBOSE'): + message("{}> {}".format(header, to_print), verbose_only=True) + + +# to avoid several instances running at the same time +class FileLock: + """ + Lock/Unlock file + """ + + def __init__(self, file_path, expire=60 * 60 * 2): + self.expire = expire + self.fpath = file_path + self.fd = None + + def lock(self): + if os.path.exists(self.fpath): + if (time.time() - os.stat(self.fpath).st_ctime) > self.expire: + try: + os.unlink(self.fpath) + except Exception, e: + message('FileLock.lock({}) : {}'.format(self.fpath, e)) + return False + try: + self.fd = open(self.fpath, 'w') + fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError, e: + message('FileLock.lock({}) : {}'.format(self.fpath, e)) + return False + return True + + def unlock(self): + try: + fcntl.flock(self.fd, fcntl.LOCK_UN | fcntl.LOCK_NB) + self.fd.close() + except IOError, e: + message('FileLock.unlock({}) : {}'.format(self.fpath, e)) + + +class RefreshPeer(Method): + """ + Fetches site, node, slice, person and key data from the specified peer + and caches it locally; also deletes stale entries. + Upon successful completion, returns a dict reporting various timers. + Faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Peer.fields['peer_id'], + Peer.fields['peername']), + ] + + returns = Parameter(dict, "various timers") + + # the columns ignored in the comparison - it is safe to ignore: + # (*) the primary key - obviously this is not expected to match + # (*) peer_id and the transcode key, likewise + # (*) all identifiers that refer to other objects in the db + # like e.g. Person.site_ids since this is managed separately + # and does not make sense any way + # (*) we also ignore things like date_created and last_updated + # that refer to local db creation/update + ignore_site_fields = [ + 'site_id', 'peer_id', 'peer_site_id', + 'address_ids', 'node_ids', 'person_ids', 'pcu_ids', 'slice_ids', 'site_tag_ids', + 'date_created', 'last_updated', + ] + ignore_key_fields = [ + 'key_id', 'peer_id', 'peer_key_id', + 'person_id', + ] + ignore_person_fields = [ + 'person_id', 'peer_id', 'peer_person_id', + 'key_ids', 'slice_ids', 'person_tag_ids', 'role_ids', 'roles', 'site_ids', + 'date_created', 'last_updated', + ] + ignore_node_fields = [ + 'node_id', 'peer_id', 'peer_node_id', + 'node_tag_ids', 'interface_ids', 'slice_ids', 'nodegroup_ids', 'pcu_ids', 'ports', + 'date_created', 'last_updated', + # somehow those won't print in the ple db + 'last_download', 'last_contact', 'last_pcu_reboot', 'last_boot', + 'last_time_spent_offline', 'last_time_spent_online', 'last_pcu_confirmation', + ] + ignore_slice_fields = [ + 'slice_id', 'peer_id', 'peer_slice_id', + 'person_ids', 'slice_tag_ids', 'node_ids', + 'created', + ] + + def call(self, auth, peer_id_or_peername): + ret_val = None + peername = Peers(self.api, [peer_id_or_peername], [ + 'peername'])[0]['peername'] + file_lock = FileLock("/tmp/refresh-peer-{peername}.lock" + .format(peername=peername)) + if not file_lock.lock(): + raise Exception, "Another instance of RefreshPeer is running." + try: + ret_val = self.real_call(auth, peer_id_or_peername) + except Exception, e: + file_lock.unlock() + logger.exception("RefreshPeer caught exception - BEG") + message("RefreshPeer caught exception - END") + raise Exception, e + file_lock.unlock() + return ret_val + + def real_call(self, auth, peer_id_or_peername): + # Get peer + peers = Peers(self.api, [peer_id_or_peername]) + if not peers: + raise PLCInvalidArgument, "No such peer '{}'".format(unicode(peer_id_or_peername)) + peer = peers[0] + peer_id = peer['peer_id'] + peername = peer['peername'] + + # Connect to peer API + peer.connect() + + timers = {} + + # Get peer data + start = time.time() + message('RefreshPeer starting up (commit_mode={})'.format(commit_mode)) + if not use_cache: + message('Issuing GetPeerData') + peer_tables = peer.GetPeerData() + else: + import json + if os.path.isfile(use_cache): + message("use_cache: WARNING: using cached getpeerdata") + with open(use_cache) as storage: + peer_tables = json.load(storage) + else: + message("use_cache: issuing GetPeerData") + peer_tables = peer.GetPeerData() + message("use_cache: saving in cache {}".format(use_cache)) + with open(use_cache, 'w') as storage: + json.dump(peer_tables, storage) + + # additions in June 2017 + + # remove entries not marked as enabled + # actually the 'enabled' flag is present on 'Sites' and 'Persons' + # however we accept disabled slices as + # (*) they don't come and go too often + # (*) they may contain vlid nodes, that we would then lose + # if we were to discard those sites + # so bottom line, we filter out only disabled persons + for cls in ('Persons',) : + peer_tables[cls] = [ + obj for obj in peer_tables[cls] if obj['enabled'] + ] + + # somehow we can see GetPeerData from PLC that contains references + # to nodes that are not exposed themselves + # which suggests some inconsistency on their end + # anyway, it's safer to sanitize the dataset to avoid corruption + exposed_peer_node_ids = { n['node_id'] for n in peer_tables['Nodes']} + for slice in peer_tables['Slices']: + before = len(slice['node_ids']) + slice['node_ids'] = [x for x in slice['node_ids'] if x in exposed_peer_node_ids] + after = len(slice['node_ids']) + if after != before: + message("{peername} slice {slicename} got sanitized - {diff} node entries removed out of {before}" + .format(peername=peername, slicename=slice['name'], + diff=before-after, before=before)) + + # end of additions + + # for smooth federation with 4.2 - ignore fields that are useless + # anyway, and rewrite boot_state + boot_state_rewrite = {'dbg': 'safeboot', 'diag': 'safeboot', 'disable': 'disabled', + 'inst': 'reinstall', 'rins': 'reinstall', 'new': 'reinstall', 'rcnf': 'reinstall'} + for node in peer_tables['Nodes']: + for key in ['nodenetwork_ids', 'dummybox_id']: + if key in node: + del node[key] + if node['boot_state'] in boot_state_rewrite: + node['boot_state'] = boot_state_rewrite[node['boot_state']] + for slice in peer_tables['Slices']: + for key in ['slice_attribute_ids']: + if key in slice: + del slice[key] + timers['transport'] = time.time() - start - peer_tables['db_time'] + timers['peer_db'] = peer_tables['db_time'] + message_verbose('GetPeerData returned -> db={} transport={}' + .format(timers['peer_db'], timers['transport'])) + + def sync(objects, peer_objects, classobj, columns): + """ + Synchronizes two dictionaries of objects. objects should + be a dictionary of local objects keyed on their foreign + identifiers. peer_objects should be a dictionary of + foreign objects keyed on their local (i.e., foreign to us) + identifiers. Returns a final dictionary of local objects + keyed on their foreign identifiers. + """ + + classname = classobj(self.api).__class__.__name__ + primary_key = getattr(classobj, 'primary_key') + # display all peer objects of these types while looping + secondary_keys = {'Node': 'hostname', 'Slice': 'name', + 'Person': 'email', 'Site': 'login_base'} + secondary_key = None + if classname in secondary_keys: + secondary_key = secondary_keys[classname] + + message_verbose('Entering sync on {} ({})' + .format(classname, primary_key)) + + synced = {} + + # Delete stale objects + for peer_object_id, object in objects.iteritems(): + if peer_object_id not in peer_objects: + object.delete(commit=commit_mode) + message("{} {} {} deleted" + .format(peername, classname, object[primary_key])) + + total = len(peer_objects) + count = 1 + + # peer_object_id, peer_object and object are open variables in the loop below... + # (local) object might be None if creating a new one + def in_focus(): + if classname != focus_type: + return False + return (peer_object_id in focus_ids) \ + or (object and (primary_key in object) + and (object[primary_key] in focus_ids)) + + def message_focus(message): + if in_focus(): + # always show remote + message_verbose("peer_obj : {} [[{}]]".format(peer_object_id, peer_object), + header='FOCUS ' + message) + # show local object if a match was found + if object: + message_verbose("local_obj : <<{}>>".format(object), + header='FOCUS ' + message) + + # the function to compare a local object with its candidate peer obj + # xxx probably faster when compatibility is False... + def equal_fields(object, peer_object, columns): + # fast version: must use __eq__() instead of == since + # peer_object may be a raw dict instead of a Peer object. + if not compatibility: + result = object.__eq__(peer_object) + if not result: + message_verbose("fast mode: difference found between {} and {}" + .format(object, peer_object)) + return result + else: + for column in columns: + if object[column] != peer_object[column]: + message_verbose("difference found in column {}".format(column)) + message_verbose("our object {}".format(object[column])) + message_verbose("remote object {}".format(peer_object[column])) + return False + return True + + # Add/update new/existing objects + for peer_object_id, peer_object in peer_objects.iteritems(): + peer_object_name = "" + if secondary_key: + peer_object_name = "({})".format(peer_object[secondary_key]) + message_verbose('{} peer_object_id={} {} ({}/{})' + .format(classname, peer_object_id, peer_object_name, count, total)) + count += 1 + if peer_object_id in synced: + message("Warning: {peername} Skipping already added {classname}: {obj}" + .format(peername=peername, + classname=classname, obj=peer_object)) + continue + + if peer_object_id in objects: + # Update existing object + object = objects[peer_object_id] + + # Replace foreign identifier with existing local + # identifier temporarily for the purposes of + # comparison. + peer_object[primary_key] = object[primary_key] + + if not equal_fields(object, peer_object, columns): + # Only update intrinsic fields + object.update(object.db_fields(peer_object)) + message_focus("DIFFERENCES : updated / syncing") + sync = True + action = "changed" + else: + message_focus("UNCHANGED - left intact / not syncing") + sync = False + action = None + + # Restore foreign identifier + peer_object[primary_key] = peer_object_id + else: + object = None + # Add new object + object = classobj(self.api, peer_object) + # Replace foreign identifier with new local identifier + del object[primary_key] + message_focus("NEW -- created with clean id - syncing") + sync = True + action = "added" + + if sync: + message_verbose("syncing {classname} {id} - commit_mode={mode}" + .format(classname=classname, + id=peer_object_id, mode=commit_mode)) + try: + object.sync(commit=commit_mode) + except PLCInvalidArgument, err: + # XXX Log an event instead of printing to logfile + # skip if validation fails + message("Warning: {peername} Skipping invalid {classname} ({err})\n{object}" + .format(peername=peername, classname=classname, + object=peer_object, err=err)) + continue + + synced[peer_object_id] = object + + if action: + message("{peername}: ({count}/{total}) {classname} {primary} {name} {action}" + .format(peername=peername, + count=count, total=total, + classname=classname, primary=object[primary_key], + name=peer_object_name, action=action)) + + message_verbose("Exiting sync on {}".format(classname)) + + return synced + + # over time, we've had issues with a given column being + # added on one side and not on the other + # this helper function computes the intersection of two list of + # fields/columns + def intersect(l1, l2): + if compatibility: + return list(set(l1).intersection(set(l2))) + else: + return l1 + + # some fields definitely need to be ignored + def ignore(l1, l2): + return list(set(l1).difference(set(l2))) + + # + # Synchronize foreign sites + # + + start = time.time() + + message('(1) Dealing with Sites') + + # Compare only the columns returned by the GetPeerData() call + if peer_tables['Sites']: + columns = peer_tables['Sites'][0].keys() + columns = intersect(columns, Site.fields) + else: + columns = None + + # Keyed on foreign site_id + old_peer_sites = Sites( + self.api, {'peer_id': peer_id}, columns).dict('peer_site_id') + sites_at_peer = dict([(site['site_id'], site) + for site in peer_tables['Sites']]) + + # Synchronize new set (still keyed on foreign site_id) + peer_sites = sync(old_peer_sites, sites_at_peer, Site, + ignore(columns, RefreshPeer.ignore_site_fields)) + + for peer_site_id, site in peer_sites.iteritems(): + # Bind any newly cached sites to peer + if peer_site_id not in old_peer_sites: + peer.add_site(site, peer_site_id, commit=commit_mode) + site['peer_id'] = peer_id + site['peer_site_id'] = peer_site_id + + timers['site'] = time.time() - start + + # + # XXX Synchronize foreign key types + # + + message('(2) Dealing with Keys') + + key_types = KeyTypes(self.api).dict() + + # + # Synchronize foreign keys + # + + start = time.time() + + # Compare only the columns returned by the GetPeerData() call + if peer_tables['Keys']: + columns = peer_tables['Keys'][0].keys() + columns = intersect(columns, Key.fields) + else: + columns = None + + # Keyed on foreign key_id + old_peer_keys = Keys( + self.api, {'peer_id': peer_id}, columns).dict('peer_key_id') + keys_at_peer = dict([(key['key_id'], key) + for key in peer_tables['Keys']]) + + # Fix up key_type references + for peer_key_id, key in keys_at_peer.items(): + if key['key_type'] not in key_types: + # XXX Log an event instead of printing to logfile + message("Warning: Skipping invalid {peername} key {key}" + .format(peername=peername, key=key)) + del keys_at_peer[peer_key_id] + continue + + # Synchronize new set (still keyed on foreign key_id) + peer_keys = sync(old_peer_keys, keys_at_peer, Key, + ignore(columns, RefreshPeer.ignore_key_fields)) + for peer_key_id, key in peer_keys.iteritems(): + # Bind any newly cached keys to peer + if peer_key_id not in old_peer_keys: + peer.add_key(key, peer_key_id, commit=commit_mode) + key['peer_id'] = peer_id + key['peer_key_id'] = peer_key_id + + timers['keys'] = time.time() - start + + # + # Synchronize foreign users + # + + start = time.time() + + message('(3) Dealing with Persons') + + # Compare only the columns returned by the GetPeerData() call + if peer_tables['Persons']: + columns = peer_tables['Persons'][0].keys() + columns = intersect(columns, Person.fields) + else: + columns = None + + # Keyed on foreign person_id + old_peer_persons = Persons( + self.api, {'peer_id': peer_id}, columns).dict('peer_person_id') + + # artificially attach the persons returned by GetPeerData to the new peer + # this is because validate_email needs peer_id to be correct when + # checking for duplicates + for person in peer_tables['Persons']: + person['peer_id'] = peer_id + persons_at_peer = dict([(peer_person['person_id'], peer_person) + for peer_person in peer_tables['Persons']]) + + # XXX Do we care about membership in foreign site(s)? + + # Synchronize new set (still keyed on foreign person_id) + peer_persons = sync(old_peer_persons, persons_at_peer, Person, + ignore(columns, RefreshPeer.ignore_person_fields)) + + # transcoder : retrieve a local key_id from a peer_key_id + key_transcoder = dict([(key['key_id'], peer_key_id) + for peer_key_id, key in peer_keys.iteritems()]) + + for peer_person_id, person in peer_persons.iteritems(): + # Bind any newly cached users to peer + if peer_person_id not in old_peer_persons: + peer.add_person(person, peer_person_id, commit=commit_mode) + person['peer_id'] = peer_id + person['peer_person_id'] = peer_person_id + person['key_ids'] = [] + + # User as viewed by peer + peer_person = persons_at_peer[peer_person_id] + + # Foreign keys currently belonging to the user + old_person_key_ids = [key_transcoder[key_id] for key_id in person['key_ids'] + if key_transcoder[key_id] in peer_keys] + + # Foreign keys that should belong to the user + # this is basically peer_person['key_ids'], we just check it makes sense + # (e.g. we might have failed importing it) + person_key_ids = [key_id for key_id in peer_person[ + 'key_ids'] if key_id in peer_keys] + + # Remove stale keys from user + for key_id in (set(old_person_key_ids) - set(person_key_ids)): + person.remove_key(peer_keys[key_id], commit=commit_mode) + message("{peername} Key {key_id} removed from person {email}" + .format(peername=peername, + key_id=key_id, email=person['email'])) + + # Add new keys to user + for key_id in (set(person_key_ids) - set(old_person_key_ids)): + #message("before add_key, passing person={}".format(person)) + #message("before add_key, passing key={}".format(peer_keys[key_id])) + person.add_key(peer_keys[key_id], commit=commit_mode) + message("{} Key {} added into person {}" + .format(peername, key_id, person['email'])) + + timers['persons'] = time.time() - start + + # + # XXX Synchronize foreign boot states + # + + boot_states = BootStates(self.api).dict() + + # + # Synchronize foreign nodes + # + + start = time.time() + + # NOTE: we do import disabled sites + message('(4) Dealing with Nodes (1)') + + # Compare only the columns returned by the GetPeerData() call + if peer_tables['Nodes']: + columns = peer_tables['Nodes'][0].keys() + columns = intersect(columns, Node.fields) + else: + columns = Node.fields + + # Keyed on foreign node_id + old_peer_nodes = Nodes( + self.api, {'peer_id': peer_id}, columns).dict('peer_node_id') + nodes_at_peer = dict([(node['node_id'], node) + for node in peer_tables['Nodes']]) + + # Fix up site_id and boot_states references + for peer_node_id, node in nodes_at_peer.items(): + errors = [] + if node['site_id'] not in peer_sites: + errors.append("invalid (or disabled) site {}".format(node['site_id'])) + if node['boot_state'] not in boot_states: + errors.append("invalid boot state {}".format(node['boot_state'])) + if errors: + # XXX Log an event instead of printing to logfile + message("Warning: Skipping invalid {peername} node {hostname} - {errors}" + .format(peername=peername, + hostname=node['hostname'], errors=", ".join(errors))) + del nodes_at_peer[peer_node_id] + continue + else: + node['site_id'] = peer_sites[node['site_id']]['site_id'] + + # Synchronize new set + peer_nodes = sync(old_peer_nodes, nodes_at_peer, Node, + ignore(columns, RefreshPeer.ignore_node_fields)) + + for peer_node_id, node in peer_nodes.iteritems(): + # Bind any newly cached foreign nodes to peer + if peer_node_id not in old_peer_nodes: + peer.add_node(node, peer_node_id, commit=commit_mode) + node['peer_id'] = peer_id + node['peer_node_id'] = peer_node_id + + timers['nodes'] = time.time() - start + + # + # Synchronize local nodes + # + + start = time.time() + message('(5) Dealing with Nodes (2)') + + # Keyed on local node_id + local_nodes = Nodes(self.api).dict() + + for node in peer_tables['PeerNodes']: + # Foreign identifier for our node as maintained by peer + peer_node_id = node['node_id'] + # Local identifier for our node as cached by peer + node_id = node['peer_node_id'] + if node_id in local_nodes: + # Still a valid local node, add it to the synchronized + # set of local node objects keyed on foreign node_id. + peer_nodes[peer_node_id] = local_nodes[node_id] + + timers['local_nodes'] = time.time() - start + + # + # XXX Synchronize foreign slice instantiation states + # + + slice_instantiations = SliceInstantiations(self.api).dict() + + # + # Synchronize foreign slices + # + + start = time.time() + + message('(6) Dealing with Slices') + + # Compare only the columns returned by the GetPeerData() call + if peer_tables['Slices']: + columns = peer_tables['Slices'][0].keys() + columns = intersect(columns, Slice.fields) + else: + columns = None + + # Keyed on foreign slice_id + old_peer_slices = Slices( + self.api, {'peer_id': peer_id}, columns).dict('peer_slice_id') + slices_at_peer = dict([(slice['slice_id'], slice) + for slice in peer_tables['Slices']]) + + # Fix up site_id, instantiation, and creator_person_id references + for peer_slice_id, slice in slices_at_peer.items(): + errors = [] + if slice['site_id'] not in peer_sites: + errors.append("invalid site {}".format(slice['site_id'])) + if slice['instantiation'] not in slice_instantiations: + errors.append("invalid instantiation {}" + .format(slice['instantiation'])) + if slice['creator_person_id'] not in peer_persons: + # Just NULL it out + slice['creator_person_id'] = None + else: + slice['creator_person_id'] = peer_persons[ + slice['creator_person_id']]['person_id'] + if errors: + message("Warning: Skipping invalid {peername} slice {slice} : {errors}" + .format(peername=peername, + slice=slice, errors=", ".join(errors))) + del slices_at_peer[peer_slice_id] + continue + else: + slice['site_id'] = peer_sites[slice['site_id']]['site_id'] + + # Synchronize new set + peer_slices = sync(old_peer_slices, slices_at_peer, Slice, + ignore(columns, RefreshPeer.ignore_slice_fields)) + + message('(7) Dealing with Nodes in Slices') + # transcoder : retrieve a local node_id from a peer_node_id + node_transcoder = dict([(node['node_id'], peer_node_id) + for peer_node_id, node in peer_nodes.iteritems()]) + person_transcoder = dict([(person['person_id'], peer_person_id) + for peer_person_id, person in peer_persons.iteritems()]) + + for peer_slice_id, slice in peer_slices.iteritems(): + # Bind any newly cached foreign slices to peer + if peer_slice_id not in old_peer_slices: + peer.add_slice(slice, peer_slice_id, commit=commit_mode) + slice['peer_id'] = peer_id + slice['peer_slice_id'] = peer_slice_id + slice['node_ids'] = [] + slice['person_ids'] = [] + + # Slice as viewed by peer + peer_slice = slices_at_peer[peer_slice_id] + + # Nodes that are currently part of the slice + old_slice_node_ids = [node_transcoder[node_id] for node_id in slice['node_ids'] + if node_id in node_transcoder and node_transcoder[node_id] in peer_nodes] + + # Nodes that should be part of the slice + slice_node_ids = [node_id for node_id in peer_slice['node_ids'] + if node_id in peer_nodes] + + # Remove stale nodes from slice + for node_id in (set(old_slice_node_ids) - set(slice_node_ids)): + slice.remove_node(peer_nodes[node_id], commit=commit_mode) + message("{peername} node {hostname} (id {node_id}) removed from slice {slicename} (id {slice_id})" + .format(peername=peername, + hostname=peer_nodes[node_id]['hostname'], node_id=peer_nodes[node_id]['node_id'], + slicename=slice['name'], slice_id=slice['slice_id'])) + + # Add new nodes to slice + for node_id in (set(slice_node_ids) - set(old_slice_node_ids)): + slice.add_node(peer_nodes[node_id], commit=commit_mode) + message("{peername} node {hostname} (id {node_id}) added into slice {slicename} (id {slice_id})" + .format(peername=peername, + hostname=peer_nodes[node_id]['hostname'], node_id=peer_nodes[node_id]['node_id'], + slicename=slice['name'], slice_id=slice['slice_id'])) + + if slice['slice_id'] == 225: + return + + # N.B.: Local nodes that may have been added to the slice + # by hand, are removed. In other words, don't do this. + + # Foreign users that are currently part of the slice + # old_slice_person_ids = [ person_transcoder[person_id] for person_id in slice['person_ids'] \ + # if person_transcoder[person_id] in peer_persons] + # An issue occurred with a user who registered on both sites (same email) + # So the remote person could not get cached locally + # The one-line map/filter style is nicer but ineffective here + old_slice_person_ids = [] + for person_id in slice['person_ids']: + if not person_transcoder.has_key(person_id): + message('WARNING : person_id {person_id} in {slicename} not transcodable (1) - skipped' + .format(person_id=person_id, slicename=slice['name'])) + elif person_transcoder[person_id] not in peer_persons: + message('WARNING : person_id {person_id} in {slicename} not transcodable (2) - skipped' + .format(person_id=person_id, slicename=slice['name'])) + else: + old_slice_person_ids += [person_transcoder[person_id]] + + # Foreign users that should be part of the slice + slice_person_ids = [person_id for person_id in peer_slice[ + 'person_ids'] if person_id in peer_persons] + + # Remove stale users from slice + for person_id in (set(old_slice_person_ids) - set(slice_person_ids)): + slice.remove_person( + peer_persons[person_id], commit=commit_mode) + message("{peername} user {email} removed from slice {slicename}" + .format(peername=peername, + email=peer_persons[person_id]['email'], + slicename=slice['name'])) + + # Add new users to slice + for person_id in (set(slice_person_ids) - set(old_slice_person_ids)): + slice.add_person(peer_persons[person_id], commit=commit_mode) + message("{peername} user {email} added into slice {slicename}" + .format(peername=peername, + email=peer_persons[person_id]['email'], + slicename=slice['name'])) + + # N.B.: Local users that may have been added to the slice + # by hand, are not touched. + + timers['slices'] = time.time() - start + + # + # Persons x Sites + # + start = time.time() + + message('(8) Dealing with Persons in Sites') + + for peer_site_id, site in peer_sites.iteritems(): + # Site as viewed by peer + peer_site = sites_at_peer[peer_site_id] + + # Persons that are currently part of the site + old_site_person_ids = [person_transcoder[person_id] for person_id in site['person_ids'] + if person_id in person_transcoder and person_transcoder[person_id] in peer_persons] + + # Perons that should be part of the site + site_person_ids = [person_id for person_id in peer_site[ + 'person_ids'] if person_id in peer_persons] + + # Remove stale persons from site + for person_id in (set(old_site_person_ids) - set(site_person_ids)): + site.remove_person(peer_persons[person_id], commit=commit_mode) + message("{peername} person {email} removed from site {login_base}" + .format(peername=peername, + email=peer_persons[person_id]['email'], + login_base=site['login_base'])) + + # Add new persons to site + for person_id in (set(site_person_ids) - set(old_site_person_ids)): + site.add_person(peer_persons[person_id], commit=commit_mode) + message("{peername} person {email} added into site {login_base}" + .format(peername=peername, + email=peer_persons[person_id]['email'], + login_base=site['login_base'])) + + timers['sites-persons'] = time.time() - start + + # + # Persons x Roles + # + start = time.time() + + message('(9) Dealing with Roles for Persons') + + roles = Roles(self.api) + roles_dict = dict([(role['role_id'], role) for role in roles]) + for peer_person_id, person in peer_persons.iteritems(): + # Person as viewed by peer + peer_person = persons_at_peer[peer_person_id] + + # Roles that are currently attributed for the person + old_person_role_ids = [role_id for role_id in person['role_ids']] + + # Roles that should be attributed to the person + person_role_ids = [role_id for role_id in peer_person['role_ids']] + + # Remove stale roles + for role_id in (set(old_person_role_ids) - set(person_role_ids)): + person.remove_role(roles_dict[role_id], commit=commit_mode) + message("{peername} role {rolename} removed from person {email}" + .format(peername=peername, + rolename=roles_dict[role_id]['name'], + email=person['email'])) + + # Add new roles to person + for role_id in (set(person_role_ids) - set(old_person_role_ids)): + person.add_role(roles_dict[role_id], commit=commit_mode) + message("{peername} role {rolename} added from person {email}" + .format(peername=peername, + rolename=roles_dict[role_id]['name'], + email=person['email'])) + + timers['persons-roles'] = time.time() - start + + # Update peer itself and commit + peer.sync(commit=True) + + return timers diff --git a/PLC/Methods/ReportRunlevel.py b/PLC/Methods/ReportRunlevel.py new file mode 100644 index 0000000..c2fb9ce --- /dev/null +++ b/PLC/Methods/ReportRunlevel.py @@ -0,0 +1,61 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth, BootAuth, SessionAuth +from PLC.Nodes import Node, Nodes + +can_update = ['run_level'] + +class ReportRunlevel(Method): + """ + report runlevel + """ + roles = ['node', 'admin'] + + accepts = [ + Mixed(BootAuth(), SessionAuth(), Auth()), + {'run_level': Node.fields['run_level'], + }, + Mixed(Node.fields['node_id'], + Node.fields['hostname']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, report_fields, node_id_or_hostname=None): + + if not isinstance(self.caller, Node): + # check admin + if 'admin' not in self.caller['roles']: + raise PLCPermissionDenied, "Not allowed to update node run_level" + + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node" + else: + nodes = [self.caller] + + node = nodes[0] + # avoid logging this even too often + # avoid logging occurrences where run_level does not change + former_level=None + if 'run_level' in node: former_level=node['run_level'] + + node.update_last_contact() + for field in can_update: + if field in report_fields: + node.update({field : report_fields[field]}) + + node.sync(commit=True) + + # skip logging in this case + if former_level and 'run_level' in node and node['run_level'] == former_level: + pass + else: + # handle the 'run_level' key + message="run level " + node['hostname'] + ":" + if 'run_level' in report_fields: + message += str(former_level) + "->" + report_fields['run_level'] + message += ", ".join( [ k + "->" + v for (k,v) in report_fields.items() if k not in ['run_level'] ] ) + + return 1 diff --git a/PLC/Methods/ResetPassword.py b/PLC/Methods/ResetPassword.py new file mode 100644 index 0000000..8e9da53 --- /dev/null +++ b/PLC/Methods/ResetPassword.py @@ -0,0 +1,128 @@ +import random +import base64 +import time +import urllib + +from types import StringTypes + +from PLC.Logger import logger +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Messages import Message, Messages +from PLC.Auth import Auth +from PLC.sendmail import sendmail + +class ResetPassword(Method): + """ + If verification_key is not specified, then a new verification_key + will be generated and stored with the user's account. The key will + be e-mailed to the user in the form of a link to a web page. + + The web page should verify the key by calling this function again + and specifying verification_key. If the key matches what has been + stored in the user's account, a new random password will be + e-mailed to the user. + + Returns 1 if verification_key was not specified, or was specified + and is valid, faults otherwise. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Person.fields['verification_key'], + Person.fields['verification_expires'] + ] + + returns = Parameter(int, '1 if verification_key is valid') + + def call(self, auth, person_id_or_email, verification_key = None, verification_expires = None): + # Get account information + # we need to search in local objects only + if isinstance (person_id_or_email,StringTypes): + filter={'email':person_id_or_email} + else: + filter={'person_id':person_id_or_email} + filter['peer_id']=None + persons = Persons(self.api, filter) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + if not person['enabled']: + raise PLCInvalidArgument, "Account must be enabled" + + # Be paranoid and deny password resets for admins + if 'admin' in person['roles']: + raise PLCInvalidArgument, "Cannot reset admin passwords" + + # Generate 32 random bytes + bytes = random.sample(xrange(0, 256), 32) + # Base64 encode their string representation + random_key = base64.b64encode("".join(map(chr, bytes))) + + if verification_key is not None: + if person['verification_key'] is None or \ + person['verification_expires'] is None or \ + person['verification_expires'] < time.time(): + raise PLCPermissionDenied, "Verification key has expired" + elif person['verification_key'] != verification_key: + raise PLCPermissionDenied, "Verification key incorrect" + else: + # Reset password to random string + person['password'] = random_key + person['verification_key'] = None + person['verification_expires'] = None + person.sync() + + message_id = 'Password reset' + else: + # Only allow one reset at a time + if person['verification_expires'] is not None and \ + person['verification_expires'] > time.time(): + raise PLCPermissionDenied, "Password reset request already pending" + + if verification_expires is None: + verification_expires = int(time.time() + (24 * 60 * 60)) + + person['verification_key'] = random_key + person['verification_expires'] = verification_expires + person.sync() + + message_id = 'Password reset requested' + + messages = Messages(self.api, [message_id]) + if messages: + # Send password to user + message = messages[0] + + params = {'PLC_NAME': self.api.config.PLC_NAME, + 'PLC_MAIL_SUPPORT_ADDRESS': self.api.config.PLC_MAIL_SUPPORT_ADDRESS, + 'PLC_WWW_HOST': self.api.config.PLC_WWW_HOST, + 'PLC_WWW_SSL_PORT': self.api.config.PLC_WWW_SSL_PORT, + 'person_id': person['person_id'], + # Will be used in a URL, so must quote appropriately + 'verification_key': urllib.quote_plus(random_key), + 'password': random_key, + 'email': person['email']} + + sendmail(self.api, + To = ("%s %s" % (person['first_name'], person['last_name']), person['email']), + Subject = message['subject'] % params, + Body = message['template'] % params) + else: + logger.warning("No message template '%s'" % message_id) + + # Logging variables + self.event_objects = {'Person': [person['person_id']]} + self.message = message_id + + return 1 diff --git a/PLC/Methods/ResolveSlices.py b/PLC/Methods/ResolveSlices.py new file mode 100644 index 0000000..6eec238 --- /dev/null +++ b/PLC/Methods/ResolveSlices.py @@ -0,0 +1,42 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices + +class ResolveSlices(Method): + """ + This method is similar to GetSlices, except that (1) the returned + columns are restricted to 'name', 'slice_id' and 'expires', and + (2) it returns expired slices too. This method is designed to help + third-party software solve slice names from their slice_id + (e.g. PlanetFlow Central). For this reason it is accessible with + anonymous authentication (among others). + """ + + roles = ['admin', 'pi', 'user', 'tech', 'anonymous' ] + + applicable_fields = { + 'slice_id' : Slice.fields['slice_id'], + 'name' : Slice.fields['name'], + 'expires': Slice.fields['expires'], + } + + accepts = [ + Auth(), + Mixed([Mixed(Slice.fields['slice_id'], + Slice.fields['name'])], + Parameter(str,"name"), + Parameter(int,"slice_id"), + Filter(applicable_fields)) + ] + + returns = [applicable_fields] + + def call(self, auth, slice_filter = None): + + # Must query at least slice_id (see below) + return_fields = self.applicable_fields.keys() + # pass expires=0 + slices = Slices(self.api, slice_filter, return_fields, 0) + return slices diff --git a/PLC/Methods/RetrieveSlicePersonKeys.py b/PLC/Methods/RetrieveSlicePersonKeys.py new file mode 100644 index 0000000..86a927d --- /dev/null +++ b/PLC/Methods/RetrieveSlicePersonKeys.py @@ -0,0 +1,71 @@ +from types import StringTypes + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Persons import Person, Persons +from PLC.Keys import Key, Keys + +class RetrieveSlicePersonKeys(Method): + """ + This method exposes the public ssh keys for people in a slice + It expects a slice name or id, and returns a dictionary on emails. + This method is designed to help third-party software authenticate + users (e.g. the OMF Experiment Controller). + For this reason it is accessible with anonymous authentication. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'anonymous' ] + + applicable_fields = { + 'slice_id' : Slice.fields['slice_id'], + 'name' : Slice.fields['name'], + } + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + Filter(Person.fields), + ] + + returns = Parameter (dict, " ssh keys hashed on emails") + + def call(self, auth, slice_id_or_name, person_filter=None): + + if person_filter is None: person_filter = {} + + # the people in the slice + slice=Slices (self.api, slice_id_or_name, ['person_ids'])[0] + slice_person_ids = slice['person_ids'] + + # if caller has not specified person_id, use slice_person_ids + if 'person_id' not in person_filter: + person_filter['person_id']=slice_person_ids + # otherwise, compute intersection + else: + caller_provided = person_filter['person_id'] + if not isinstance (caller_provided,list): + caller_provided = [ caller_provided, ] + person_filter['person_id'] = list ( set(caller_provided).intersection(slice_person_ids) ) + + def merge (l1,l2): return l1+l2 + + persons = Persons (self.api, person_filter, ['email','key_ids'] ) + key_id_to_email_hash = \ + dict ( reduce ( merge , [ [ (kid,p['email']) for kid in p['key_ids']] for p in persons ] ) ) + + all_key_ids = reduce (merge, [ p['key_ids'] for p in persons ] ) + + all_keys = Keys (self.api, all_key_ids) + + result={} + for key in all_keys: + key_id=key['key_id'] + email = key_id_to_email_hash[key_id] + if email not in result: result[email]=[] + result[email].append (key['key']) + + return result diff --git a/PLC/Methods/RetrieveSliceSliverKeys.py b/PLC/Methods/RetrieveSliceSliverKeys.py new file mode 100644 index 0000000..6cdf2a0 --- /dev/null +++ b/PLC/Methods/RetrieveSliceSliverKeys.py @@ -0,0 +1,61 @@ +from types import StringTypes + +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Nodes import Node, Nodes +from PLC.SliceTags import SliceTag, SliceTags +from PLC.Slices import Slice, Slices + +class RetrieveSliceSliverKeys(Method): + """ + This method exposes the public ssh keys for a slice's slivers. + It expects a slice name or id, and returns a dictionary on hostnames. + This method is designed to help third-party software authenticate + slivers (e.g. the OMF Experiment Controller). + For this reason it is accessible with anonymous authentication. + """ + + roles = ['admin', 'pi', 'user', 'tech', 'anonymous' ] + + applicable_fields = { + 'slice_id' : Slice.fields['slice_id'], + 'name' : Slice.fields['name'], + } + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + Filter(Node.fields), + ] + + returns = Parameter (dict, " ssh keys hashed on hostnames") + + def call(self, auth, slice_id_or_name, node_filter=None): + + filter={} + if isinstance(slice_id_or_name,int): + filter['slice_id']=slice_id_or_name + elif isinstance(slice_id_or_name,StringTypes): + filter['name']=slice_id_or_name + filter['tagname']='ssh_key' + # retrieve only sliver tags + filter['~node_id']=None + if node_filter: + # make sure we only deal with local nodes + node_filter['peer_id']=None + nodes = Nodes(self.api, node_filter, ['node_id']) + node_ids = [ node ['node_id'] for node in nodes ] + filter['node_id']=node_ids + + # slice_tags don't expose hostname, sigh.. + slice_tags=SliceTags(self.api,filter,['node_id','tagname','value']) + node_ids = [st['node_id'] for st in slice_tags] + # fetch nodes + nodes=Nodes(self.api,node_ids,['node_id','hostname']) + # hash on node_id + nodes_hash=dict( [ (n['node_id'],n['hostname']) for n in nodes]) + # return values hashed on hostname + return dict([ (nodes_hash[st['node_id']],st['value']) for st in slice_tags]) diff --git a/PLC/Methods/SetPersonPrimarySite.py b/PLC/Methods/SetPersonPrimarySite.py new file mode 100644 index 0000000..644826b --- /dev/null +++ b/PLC/Methods/SetPersonPrimarySite.py @@ -0,0 +1,62 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +class SetPersonPrimarySite(Method): + """ + Makes the specified site the person's primary site. The person + must already be a member of the site. + + Admins may update anyone. All others may only update themselves. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Mixed(Site.fields['site_id'], + Site.fields['login_base']) + ] + + returns = Parameter(int, '1 if successful') + + object_type = 'Person' + + def call(self, auth, person_id_or_email, site_id_or_login_base): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account" + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account" + + # Authenticated function + assert self.caller is not None + + # Non-admins can only update their own primary site + if 'admin' not in self.caller['roles'] and \ + self.caller['person_id'] != person['person_id']: + raise PLCPermissionDenied, "Not allowed to update specified account" + + # Get site information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if site['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local site" + + if site['site_id'] not in person['site_ids']: + raise PLCInvalidArgument, "Not a member of the specified site" + + person.set_primary_site(site) + + return 1 diff --git a/PLC/Methods/SliceCreate.py b/PLC/Methods/SliceCreate.py new file mode 100644 index 0000000..e752505 --- /dev/null +++ b/PLC/Methods/SliceCreate.py @@ -0,0 +1,25 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Methods.AddSlice import AddSlice + +class SliceCreate(AddSlice): + """ + Deprecated. See AddSlice. + """ + + status = "deprecated" + + accepts = [ + Auth(), + Slice.fields['name'], + AddSlice.accepts[1] + ] + + returns = Parameter(int, 'New slice_id (> 0) if successful') + + def call(self, auth, name, slice_fields = {}): + slice_fields['name'] = name + return AddSlice.call(self, auth, slice_fields) diff --git a/PLC/Methods/SliceDelete.py b/PLC/Methods/SliceDelete.py new file mode 100644 index 0000000..3376821 --- /dev/null +++ b/PLC/Methods/SliceDelete.py @@ -0,0 +1,29 @@ +import re + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.DeleteSlice import DeleteSlice + +class SliceDelete(DeleteSlice): + """ + Deprecated. See DeleteSlice. + + """ + + status = "deprecated" + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Slice.fields['name'] + ] + + returns = Parameter(int, 'Returns 1 if successful, a fault otherwise.') + + def call(self, auth, slice_name): + + return DeleteSlice.call(self, auth, slice_name) diff --git a/PLC/Methods/SliceExtendedInfo.py b/PLC/Methods/SliceExtendedInfo.py new file mode 100644 index 0000000..26fbd9a --- /dev/null +++ b/PLC/Methods/SliceExtendedInfo.py @@ -0,0 +1,84 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.SliceTags import SliceTag, SliceTags +from PLC.Sites import Site, Sites +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons + +class SliceExtendedInfo(Method): + """ + Deprecated. Can be implemented with GetSlices. + + Returns an array of structs containing details about slices. + The summary can optionally include the list of nodes in and + users of each slice. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins may query any + slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + [Slice.fields['name']], + Parameter(bool, "Whether or not to return users for the slices", nullok = True), + Parameter(bool, "Whether or not to return nodes for the slices", nullok = True) + ] + + returns = [Slice.fields] + + + def call(self, auth, slice_name_list=None, return_users=None, return_nodes=None, return_attributes=None): + # If we are not admin, make sure to return only viewable + # slices. + slice_filter = slice_name_list + slices = Slices(self.api, slice_filter) + if not slices: + raise PLCInvalidArgument, "No such slice" + + if 'admin' not in self.caller['roles']: + # Get slices that we are able to view + valid_slice_ids = self.caller['slice_ids'] + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_slice_ids += site['slice_ids'] + + if not valid_slice_ids: + return [] + + slices = filter(lambda slice: slice['slice_id'] in valid_slice_ids, slices) + + for slice in slices: + index = slices.index(slice) + node_ids = slices[index].pop('node_ids') + person_ids = slices[index].pop('person_ids') + attribute_ids = slices[index].pop('slice_tag_ids') + if return_users or return_users is None: + persons = Persons(self.api, person_ids) + person_info = [{'email': person['email'], + 'person_id': person['person_id']} \ + for person in persons] + slices[index]['users'] = person_info + if return_nodes or return_nodes is None: + nodes = Nodes(self.api, node_ids) + node_info = [{'hostname': node['hostname'], + 'node_id': node['node_id']} \ + for node in nodes] + slices[index]['nodes'] = node_info + if return_attributes or return_attributes is None: + attributes = SliceTags(self.api, attribute_ids) + attribute_info = [{'name': attribute['name'], + 'value': attribute['value']} \ + for attribute in attributes] + slices[index]['attributes'] = attribute_info + + return slices diff --git a/PLC/Methods/SliceGetTicket.py b/PLC/Methods/SliceGetTicket.py new file mode 100644 index 0000000..27c0832 --- /dev/null +++ b/PLC/Methods/SliceGetTicket.py @@ -0,0 +1,249 @@ +import os +import sys +from subprocess import Popen, PIPE, call +from tempfile import NamedTemporaryFile +from xml.sax.saxutils import escape, quoteattr, XMLGenerator + +from PLC.Faults import * +from PLC.Slices import Slice, Slices +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons +from PLC.SliceTags import SliceTag, SliceTags + +from PLC.Methods.GetSliceTicket import GetSliceTicket + +class PrettyXMLGenerator(XMLGenerator): + """ + Adds indentation to the beginning and newlines to the end of + opening and closing tags. + """ + + def __init__(self, out = sys.stdout, encoding = "utf-8", indent = "", addindent = "", newl = ""): + XMLGenerator.__init__(self, out, encoding) + # XMLGenerator does not export _write() + self.write = self.ignorableWhitespace + self.indents = [indent] + self.addindent = addindent + self.newl = newl + + def startDocument(self): + XMLGenerator.startDocument(self) + + def startElement(self, name, attrs, indent = True, newl = True): + if indent: + self.ignorableWhitespace("".join(self.indents)) + self.indents.append(self.addindent) + + XMLGenerator.startElement(self, name, attrs) + + if newl: + self.ignorableWhitespace(self.newl) + + def characters(self, content): + # " to " + # ' to ' + self.write(escape(content, { + '"': '"', + "'": ''', + })) + + def endElement(self, name, indent = True, newl = True): + self.indents.pop() + if indent: + self.ignorableWhitespace("".join(self.indents)) + + XMLGenerator.endElement(self, name) + + if newl: + self.ignorableWhitespace(self.newl) + + def simpleElement(self, name, attrs = {}, indent = True, newl = True): + if indent: + self.ignorableWhitespace("".join(self.indents)) + + self.write('<' + name) + for (name, value) in attrs.items(): + self.write(' %s=%s' % (name, quoteattr(value))) + self.write('/>') + + if newl: + self.ignorableWhitespace(self.newl) + +class SliceGetTicket(GetSliceTicket): + """ + Deprecated. See GetSliceTicket. + + Warning: This function exists solely for backward compatibility + with the old public PlanetLab 3.0 Node Manager, which will be + removed from service by 2007. This call is not intended to be used + by any other PLC except the public PlanetLab. + """ + + status = "deprecated" + + def call(self, auth, slice_id_or_name): + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice" + slice = slices[0] + + # Allow peers to obtain tickets for their own slices + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + if slice['instantiation'] != 'delegated': + raise PLCInvalidArgument, "Not in delegated state" + + nodes = Nodes(self.api, slice['node_ids']).dict() + persons = Persons(self.api, slice['person_ids']).dict() + slice_tags = SliceTags(self.api, slice['slice_tag_ids']).dict() + + ticket = NamedTemporaryFile() + + xml = PrettyXMLGenerator(out = ticket, encoding = self.api.encoding, indent = "", addindent = " ", newl = "\n") + xml.startDocument() + + # + xml.startElement('ticket', {}) + + # + xml.startElement('slice', + {'id': str(slice['slice_id']), + 'name': unicode(slice['name']), + 'expiry': unicode(int(slice['expires']))}) + + # + xml.startElement('nodes', {}) + for node_id in slice['node_ids']: + if not nodes.has_key(node_id): + continue + node = nodes[node_id] + # + xml.simpleElement('node', + {'id': str(node['node_id']), + 'hostname': unicode(node['hostname'])}) + # + xml.endElement('nodes') + + # + xml.startElement('users', {}) + for person_id in slice['person_ids']: + if not persons.has_key(person_id): + continue + user = persons[person_id] + # + xml.simpleElement('user', + {'person_id': unicode(user['person_id']), + 'email': unicode(user['email'])}) + # + xml.endElement('users') + + # + xml.startElement('rspec', {}) + for slice_tag_id in slice['slice_tag_ids']: + if not slice_tags.has_key(slice_tag_id): + continue + slice_tag = slice_tags[slice_tag_id] + + name = slice_tag['name'] + value = slice_tag['value'] + + def kbps_to_bps(kbps): + bps = int(kbps) * 1000 + return bps + + def max_kbyte_to_bps(max_kbyte): + bps = int(max_kbyte) * 1000 * 8 / 24 / 60 / 60 + return bps + + # XXX Used to support multiple named values for each attribute type + name_type_cast = { + 'cpu_share': ('nm_cpu_share', 'cpu_share', 'integer', int), + + 'net_share': ('nm_net_share', 'rate', 'integer', int), + 'net_min_rate': ('nm_net_min_rate', 'rate', 'integer', int), + 'net_max_rate': ('nm_net_max_rate', 'rate', 'integer', int), + 'net_max_kbyte': ('nm_net_avg_rate', 'rate', 'integer', max_kbyte_to_bps), + + 'net_i2_share': ('nm_net_exempt_share', 'rate', 'integer', int), + 'net_i2_min_rate': ('nm_net_exempt_min_rate', 'rate', 'integer', kbps_to_bps), + 'net_i2_max_rate': ('nm_net_exempt_max_rate', 'rate', 'integer', kbps_to_bps), + 'net_i2_max_kbyte': ('nm_net_exempt_avg_rate', 'rate', 'integer', max_kbyte_to_bps), + + 'disk_max': ('nm_disk_quota', 'quota', 'integer', int), + 'plc_agent_version': ('plc_agent_version', 'version', 'string', str), + 'plc_slice_type': ('plc_slice_type', 'type', 'string', str), + 'plc_ticket_pubkey': ('plc_ticket_pubkey', 'key', 'string', str), + } + + if name == 'initscript': + (attribute_name, value_name, type) = ('initscript', 'initscript_id', 'integer') + value = slice_tag['slice_tag_id'] + elif name in name_type_cast: + (attribute_name, value_name, type, cast) = name_type_cast[name] + value = cast(value) + else: + attribute_name = value_name = name + type = "string" + + # + xml.startElement('resource', {'name': unicode(attribute_name)}) + + # + xml.startElement('value', + {'name': unicode(value_name), + 'type': type}, + newl = False) + # element value + xml.characters(unicode(value)) + # + xml.endElement('value', indent = False) + + # + xml.endElement('resource') + # + xml.endElement('rspec') + + # + xml.endElement('slice') + + # Add signature template + xml.startElement('Signature', {'xmlns': "http://www.w3.org/2000/09/xmldsig#"}) + xml.startElement('SignedInfo', {}) + xml.simpleElement('CanonicalizationMethod', {'Algorithm': "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"}) + xml.simpleElement('SignatureMethod', {'Algorithm': "http://www.w3.org/2000/09/xmldsig#rsa-sha1"}) + xml.startElement('Reference', {'URI': ""}) + xml.startElement('Transforms', {}) + xml.simpleElement('Transform', {'Algorithm': "http://www.w3.org/2000/09/xmldsig#enveloped-signature"}) + xml.endElement('Transforms') + xml.simpleElement('DigestMethod', {'Algorithm': "http://www.w3.org/2000/09/xmldsig#sha1"}) + xml.simpleElement('DigestValue', {}) + xml.endElement('Reference') + xml.endElement('SignedInfo') + xml.simpleElement('SignatureValue', {}) + xml.endElement('Signature') + + xml.endElement('ticket') + xml.endDocument() + + if not hasattr(self.api.config, 'PLC_API_TICKET_KEY') or \ + not os.path.exists(self.api.config.PLC_API_TICKET_KEY): + raise PLCAPIError, "Slice ticket signing key not found" + + ticket.flush() + + # Sign the ticket + p = Popen(["xmlsec1", "--sign", + "--privkey-pem", self.api.config.PLC_API_TICKET_KEY, + ticket.name], + stdin = PIPE, stdout = PIPE, stderr = PIPE, close_fds = True) + signed_ticket = p.stdout.read() + err = p.stderr.read() + rc = p.wait() + + ticket.close() + + if rc: + raise PLCAPIError, err + + return signed_ticket diff --git a/PLC/Methods/SliceInfo.py b/PLC/Methods/SliceInfo.py new file mode 100644 index 0000000..2182c6e --- /dev/null +++ b/PLC/Methods/SliceInfo.py @@ -0,0 +1,75 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Faults import * +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.Nodes import Node, Nodes + +class SliceInfo(Method): + """ + Deprecated. Can be implemented with GetSlices. + + Returns an array of structs containing details about slices. + The summary can optionally include the list of nodes in and + users of each slice. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins may query any + slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + [Mixed(Slice.fields['name'])], + Parameter(bool, "Whether or not to return users for the slices", nullok = True), + Parameter(bool, "Whether or not to return nodes for the slices", nullok = True) + ] + + returns = [Slice.fields] + + + def call(self, auth, slice_name_list=None, return_users=None, return_nodes=None): + # If we are not admin, make sure to return only viewable + # slices. + slice_filter = slice_name_list + slices = Slices(self.api, slice_filter) + if not slices: + raise PLCInvalidArgument, "No such slice" + + if 'admin' not in self.caller['roles']: + # Get slices that we are able to view + valid_slice_ids = self.caller['slice_ids'] + if 'pi' in self.caller['roles'] and self.caller['site_ids']: + sites = Sites(self.api, self.caller['site_ids']) + for site in sites: + valid_slice_ids += site['slice_ids'] + + if not valid_slice_ids: + return [] + + slices = filter(lambda slice: slice['slice_id'] in valid_slice_ids, slices) + + + for slice in slices: + index = slices.index(slice) + node_ids = slices[index].pop('node_ids') + person_ids = slices[index].pop('person_ids') + if return_users or return_users is None: + persons = Persons(self.api, person_ids) + emails = [person['email'] for person in persons] + slices[index]['users'] = emails + if return_nodes or return_nodes is None: + nodes = Nodes(self.api, node_ids) + hostnames = [node['hostname'] for node in nodes] + slices[index]['nodes'] = hostnames + + + return slices diff --git a/PLC/Methods/SliceListNames.py b/PLC/Methods/SliceListNames.py new file mode 100644 index 0000000..01f6257 --- /dev/null +++ b/PLC/Methods/SliceListNames.py @@ -0,0 +1,45 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Methods.GetSlices import GetSlices + +class SliceListNames(GetSlices): + """ + Deprecated. Can be implemented with GetSlices. + + List the names of registered slices. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins may query any + slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Parameter(str, "Slice prefix", nullok = True) + ] + + returns = [Slice.fields['name']] + + + def call(self, auth, prefix=None): + + slice_filter = None + if prefix: + slice_filter = {'name': prefix+'*'} + + slices = GetSlices.call(self, auth, slice_filter) + + if not slices: + raise PLCInvalidArgument, "No such slice" + + slice_names = [slice['name'] for slice in slices] + + return slice_names diff --git a/PLC/Methods/SliceListUserSlices.py b/PLC/Methods/SliceListUserSlices.py new file mode 100644 index 0000000..1635905 --- /dev/null +++ b/PLC/Methods/SliceListUserSlices.py @@ -0,0 +1,47 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Persons import Person, Persons +from PLC.Methods.GetSlices import GetSlices +from PLC.Methods.GetPersons import GetPersons + +class SliceListUserSlices(GetSlices, GetPersons): + """ + Deprecated. Can be implemented with GetPersons and GetSlices. + + Return the slices the specified user (by email address) is a member of. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins may query any + slice. If a slice that cannot be queried is specified in + slice_filter, details about that slice will not be returned. + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Person.fields['email'] + ] + + returns = [Slice.fields['name']] + + + def call(self, auth, email): + + persons = GetPersons.call(self, auth, [email]) + if not persons: + return [] + person = persons[0] + slice_ids = person['slice_ids'] + if not slice_ids: + return [] + + slices = GetSlices.call(self, auth, slice_ids) + slice_names = [slice['name'] for slice in slices] + + return slice_names diff --git a/PLC/Methods/SliceNodesAdd.py b/PLC/Methods/SliceNodesAdd.py new file mode 100644 index 0000000..0eab37c --- /dev/null +++ b/PLC/Methods/SliceNodesAdd.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.AddSliceToNodes import AddSliceToNodes + +class SliceNodesAdd(AddSliceToNodes): + """ + Deprecated. See AddSliceToNodes. + + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'], + [Node.fields['hostname']] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, nodes_list): + + return AddSliceToNodes.call(self, auth, slice_name, nodes_list) diff --git a/PLC/Methods/SliceNodesDel.py b/PLC/Methods/SliceNodesDel.py new file mode 100644 index 0000000..c2abd71 --- /dev/null +++ b/PLC/Methods/SliceNodesDel.py @@ -0,0 +1,29 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Nodes import Node, Nodes +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.DeleteSliceFromNodes import DeleteSliceFromNodes + +class SliceNodesDel(DeleteSliceFromNodes): + """ + Deprecated. See DeleteSliceFromNodes. + + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'], + [Node.fields['hostname']] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, nodes_list): + + return DeleteSliceFromNodes.call(self, auth, slice_name, nodes_list) diff --git a/PLC/Methods/SliceNodesList.py b/PLC/Methods/SliceNodesList.py new file mode 100644 index 0000000..1c74168 --- /dev/null +++ b/PLC/Methods/SliceNodesList.py @@ -0,0 +1,40 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Nodes import Node, Nodes +from PLC.Methods.GetSlices import GetSlices +from PLC.Methods.GetNodes import GetNodes + +class SliceNodesList(GetSlices, GetNodes): + """ + Deprecated. Can be implemented with GetSlices and GetNodes. + + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'] + ] + + returns = [Node.fields['hostname']] + + + def call(self, auth, slice_name): + slices = GetSlices.call(self, auth, [slice_name]) + if not slices: + return [] + + slice = slices[0] + nodes = GetNodes.call(self, auth, slice['node_ids']) + if not nodes: + return [] + + node_hostnames = [node['hostname'] for node in nodes] + + return node_hostnames diff --git a/PLC/Methods/SliceRenew.py b/PLC/Methods/SliceRenew.py new file mode 100644 index 0000000..531c4a7 --- /dev/null +++ b/PLC/Methods/SliceRenew.py @@ -0,0 +1,33 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.UpdateSlice import UpdateSlice + +class SliceRenew(UpdateSlice): + """ + Deprecated. See UpdateSlice. + + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'], + Slice.fields['expires'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, slice_expires): + + slice_fields = {} + slice_fields['expires'] = slice_expires + + return UpdateSlice.call(self, auth, slice_name, slice_fields) diff --git a/PLC/Methods/SliceTicketGet.py b/PLC/Methods/SliceTicketGet.py new file mode 100644 index 0000000..5b2b786 --- /dev/null +++ b/PLC/Methods/SliceTicketGet.py @@ -0,0 +1,13 @@ +from PLC.Methods.SliceGetTicket import SliceGetTicket + +class SliceTicketGet(SliceGetTicket): + """ + Deprecated. See GetSliceTicket. + + Warning: This function exists solely for backward compatibility + with the old public PlanetLab 3.0 Node Manager, which will be + removed from service by 2007. This call is not intended to be used + by any other PLC except the public PlanetLab. + """ + + status = "deprecated" diff --git a/PLC/Methods/SliceUpdate.py b/PLC/Methods/SliceUpdate.py new file mode 100644 index 0000000..7f6b754 --- /dev/null +++ b/PLC/Methods/SliceUpdate.py @@ -0,0 +1,37 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.UpdateSlice import UpdateSlice + +class SliceUpdate(UpdateSlice): + """ + Deprecated. See UpdateSlice. + + """ + + status = 'deprecated' + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'], + Slice.fields['url'], + Slice.fields['description'], + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, url, description): + + slice_fields = {} + slice_fields['url'] = url + slice_fields['description'] = description + + return UpdateSlice.call(self, auth, slice_name, slice_fields) + + return 1 diff --git a/PLC/Methods/SliceUserAdd.py b/PLC/Methods/SliceUserAdd.py new file mode 100644 index 0000000..6b0c593 --- /dev/null +++ b/PLC/Methods/SliceUserAdd.py @@ -0,0 +1,32 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Slices import Slice, Slices +from PLC.Auth import Auth +from PLC.Methods.AddPersonToSlice import AddPersonToSlice + +class SliceUserAdd(AddPersonToSlice): + """ + Deprecated. See AddPersonToSlice. + + """ + + status = "deprecated" + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Slice.fields['name'], + [Person.fields['email']], + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, user_list): + + for user in user_list: + AddPersonToSlice.call(self, auth, user, slice_name) + + return 1 diff --git a/PLC/Methods/SliceUserDel.py b/PLC/Methods/SliceUserDel.py new file mode 100644 index 0000000..9109b00 --- /dev/null +++ b/PLC/Methods/SliceUserDel.py @@ -0,0 +1,35 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Persons import Person, Persons +from PLC.Slices import Slice, Slices +from PLC.Methods.DeletePersonFromSlice import DeletePersonFromSlice + +class SliceUserDel(Method): + """ + Deprecated. Can be implemented with DeletePersonFromSlice. + + Removes the specified users from the specified slice. If the person is + already a member of the slice, no errors are returned. + + Returns 1 if successful, faults otherwise. + """ + + status = "deprecated" + + roles = ['admin', 'pi'] + + accepts = [ + Auth(), + Slice.fields['name'], + [Person.fields['email']], + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_name, user_list): + for user in user_list: + DeletePersonFromSlice.call(self, auth, user, slice_name) + + return 1 diff --git a/PLC/Methods/SliceUsersList.py b/PLC/Methods/SliceUsersList.py new file mode 100644 index 0000000..ffea537 --- /dev/null +++ b/PLC/Methods/SliceUsersList.py @@ -0,0 +1,45 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Slices import Slice, Slices +from PLC.Persons import Person, Persons +from PLC.Methods.GetSlices import GetSlices +from PLC.Methods.GetPersons import GetPersons + +class SliceUsersList(GetSlices, GetPersons): + """ + Deprecated. Can be implemented with GetSlices and GetPersons. + + List users that are members of the named slice. + + Users may only query slices of which they are members. PIs may + query any of the slices at their sites. Admins may query any + slice. If a slice that cannot be queried is specified details + about that slice will not be returned. + """ + + status = "deprecated" + + roles = ['admin', 'pi', 'user'] + + accepts = [ + Auth(), + Slice.fields['name'] + ] + + returns = [Person.fields['email']] + + + def call(self, auth, slice_name): + + slice_filter = [slice_name] + slices = GetSlices.call(self, auth, slice_filter) + if not slices: + return [] + slice = slices[0] + + persons = GetPersons.call(self, auth, slice['person_ids']) + person_emails = [person['email'] for person in persons] + + return person_emails diff --git a/PLC/Methods/UnBindObjectFromPeer.py b/PLC/Methods/UnBindObjectFromPeer.py new file mode 100644 index 0000000..156f976 --- /dev/null +++ b/PLC/Methods/UnBindObjectFromPeer.py @@ -0,0 +1,67 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Auth import Auth +from PLC.Persons import Persons +from PLC.Sites import Sites +from PLC.Nodes import Nodes +from PLC.Slices import Slices +from PLC.Keys import Keys +from PLC.Peers import Peers +from PLC.Faults import * + +class UnBindObjectFromPeer(Method): + """ + This method is a hopefully temporary hack to let the sfa correctly + detach the objects it creates from a remote peer object. This is + needed so that the sfa federation link can work in parallel with + RefreshPeer, as RefreshPeer depends on remote objects being + correctly marked. + + UnBindObjectFromPeer is allowed to admins only. + """ + + roles = ['admin'] + + known_types = ['site','person','slice','node','key'] + types_doc = ",".join(["'%s'"%type for type in known_types]) + + accepts = [ + Auth(), + Parameter(str,"Object type, among "+types_doc), + Parameter(int,"object_id"), + Parameter(str,"peer shortname"), + Parameter(int,"remote object_id, set to 0 if unknown"), + ] + + returns = Parameter (int, '1 if successful') + + def locate_object (self, object_type, object_id): + # locate e.g. the Nodes symbol + class_obj = globals()[object_type.capitalize()+'s'] + id_name=object_type+'_id' + # invoke e.g. Nodes ({'node_id':node_id}) + objs=class_obj(self.api,{id_name:object_id}) + if len(objs) != 1: + raise PLCInvalidArgument,"Cannot locate object, type=%s id=%d"%\ + (type,object_id) + return objs[0] + + + def call(self, auth, object_type, object_id, shortname): + + object_type = object_type.lower() + if object_type not in self.known_types: + raise PLCInvalidArgument, 'Unrecognized object type %s'%object_type + + peers=Peers(self.api,{'shortname':shortname.upper()}) + if len(peers) !=1: + raise PLCInvalidArgument, 'No such peer with shortname %s'%shortname + + peer=peers[0] + object = self.locate_object (object_type, object_id) + remover_name = 'remove_'+object_type + remove_function = getattr(type(peer),remover_name) + remove_function(peer,object) + + return 1 diff --git a/PLC/Methods/UpdateAddress.py b/PLC/Methods/UpdateAddress.py new file mode 100644 index 0000000..03acb37 --- /dev/null +++ b/PLC/Methods/UpdateAddress.py @@ -0,0 +1,54 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Addresses import Address, Addresses +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['line1', 'line2', 'line3', + 'city', 'state', 'postalcode', 'country'] + +class UpdateAddress(Method): + """ + Updates the parameters of an existing address with the values in + address_fields. + + PIs may only update addresses of their own sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + address_fields = dict(filter(can_update, Address.fields.items())) + + accepts = [ + Auth(), + Address.fields['address_id'], + address_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, address_id, address_fields): + address_fields = dict(filter(can_update, address_fields.items())) + + # Get associated address details + addresses = Addresses(self.api, [address_id]) + if not addresses: + raise PLCInvalidArgument, "No such address" + address = addresses[0] + + if 'admin' not in self.caller['roles']: + if address['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Address must be associated with one of your sites" + + address.update(address_fields) + address.sync() + + # Logging variables + self.event_objects = {'Address': [address['address_id']]} + self.message = 'Address %d updated: %s' % \ + (address['address_id'], ", ".join(address_fields.keys())) + + return 1 diff --git a/PLC/Methods/UpdateAddressType.py b/PLC/Methods/UpdateAddressType.py new file mode 100644 index 0000000..2b42ade --- /dev/null +++ b/PLC/Methods/UpdateAddressType.py @@ -0,0 +1,42 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.AddressTypes import AddressType, AddressTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in ['name', 'description'] + +class UpdateAddressType(Method): + """ + Updates the parameters of an existing address type with the values + in address_type_fields. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + address_type_fields = dict(filter(can_update, AddressType.fields.items())) + + accepts = [ + Auth(), + Mixed(AddressType.fields['address_type_id'], + AddressType.fields['name']), + address_type_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, address_type_id_or_name, address_type_fields): + address_type_fields = dict(filter(can_update, address_type_fields.items())) + + address_types = AddressTypes(self.api, [address_type_id_or_name]) + if not address_types: + raise PLCInvalidArgument, "No such address type" + address_type = address_types[0] + + address_type.update(address_type_fields) + address_type.sync() + self.event_objects = {'AddressType': [address_type['address_type_id']]} + + return 1 diff --git a/PLC/Methods/UpdateConfFile.py b/PLC/Methods/UpdateConfFile.py new file mode 100644 index 0000000..5ae37fb --- /dev/null +++ b/PLC/Methods/UpdateConfFile.py @@ -0,0 +1,42 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.ConfFiles import ConfFile, ConfFiles +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in \ + ['conf_file_id', 'node_ids', 'nodegroup_ids'] + +class UpdateConfFile(Method): + """ + Updates a node configuration file. Only the fields specified in + conf_file_fields are updated, all other fields are left untouched. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + conf_file_fields = dict(filter(can_update, ConfFile.fields.items())) + + accepts = [ + Auth(), + ConfFile.fields['conf_file_id'], + conf_file_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, conf_file_id, conf_file_fields): + conf_file_fields = dict(filter(can_update, conf_file_fields.items())) + + conf_files = ConfFiles(self.api, [conf_file_id]) + if not conf_files: + raise PLCInvalidArgument, "No such configuration file" + + conf_file = conf_files[0] + conf_file.update(conf_file_fields) + conf_file.sync() + self.event_objects = {'ConfFile': [conf_file['conf_file_id']]} + + return 1 diff --git a/PLC/Methods/UpdateIlink.py b/PLC/Methods/UpdateIlink.py new file mode 100644 index 0000000..97b14b6 --- /dev/null +++ b/PLC/Methods/UpdateIlink.py @@ -0,0 +1,65 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Ilinks import Ilink, Ilinks +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes +from PLC.Sites import Sites + +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdateIlink(Method): + """ + Updates the value of an existing ilink + + Access rights depend on the tag type. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + Ilink.fields['ilink_id'], + Ilink.fields['value'] + ] + + returns = Parameter(int, '1 if successful') + + object_type = 'Interface' + + def call(self, auth, ilink_id, value): + ilinks = Ilinks(self.api, [ilink_id]) + if not ilinks: + raise PLCInvalidArgument, "No such ilink %r"%ilink_id + ilink = ilinks[0] + + src_if=Interfaces(self.api,ilink['src_interface_id'])[0] + dst_if=Interfaces(self.api,ilink['dst_interface_id'])[0] + tag_type_id = ilink['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + # check authorizations + if 'admin' in self.caller['roles']: + pass + elif not AuthorizeHelpers.caller_may_access_tag_type (self.api, self.caller, tag_type): + raise PLCPermissionDenied, "%s, forbidden tag %s"%(self.name,tag_type['tagname']) + elif AuthorizeHelpers.interface_belongs_to_person (self.api, src_if, self.caller): + pass + elif src_if_id != dst_if_id and AuthorizeHelpers.interface_belongs_to_person (self.api, dst_if, self.caller): + pass + else: + raise PLCPermissionDenied, "%s: you must own either the src or dst interface"%self.name + + ilink['value'] = value + ilink.sync() + + self.object_ids = [ilink['src_interface_id'],ilink['dst_interface_id']] + return 1 diff --git a/PLC/Methods/UpdateInitScript.py b/PLC/Methods/UpdateInitScript.py new file mode 100644 index 0000000..f8c4eba --- /dev/null +++ b/PLC/Methods/UpdateInitScript.py @@ -0,0 +1,42 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.InitScripts import InitScript, InitScripts +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in \ + ['initscript_id'] + +class UpdateInitScript(Method): + """ + Updates an initscript. Only the fields specified in + initscript_fields are updated, all other fields are left untouched. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + initscript_fields = dict(filter(can_update, InitScript.fields.items())) + + accepts = [ + Auth(), + InitScript.fields['initscript_id'], + initscript_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, initscript_id, initscript_fields): + initscript_fields = dict(filter(can_update, initscript_fields.items())) + + initscripts = InitScripts(self.api, [initscript_id]) + if not initscripts: + raise PLCInvalidArgument, "No such initscript" + + initscript = initscripts[0] + initscript.update(initscript_fields) + initscript.sync() + self.event_objects = {'InitScript': [initscript['initscript_id']]} + + return 1 diff --git a/PLC/Methods/UpdateInterface.py b/PLC/Methods/UpdateInterface.py new file mode 100644 index 0000000..034780e --- /dev/null +++ b/PLC/Methods/UpdateInterface.py @@ -0,0 +1,94 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row +from PLC.Auth import Auth + +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagTypes +from PLC.InterfaceTags import InterfaceTags +from PLC.Interfaces import Interface, Interfaces +from PLC.Methods.AddInterfaceTag import AddInterfaceTag +from PLC.Methods.UpdateInterfaceTag import UpdateInterfaceTag + +cannot_update = ['interface_id','node_id'] + +class UpdateInterface(Method): + """ + Updates an existing interface network. Any values specified in + interface_fields are used, otherwise defaults are + used. Acceptable values for method are dhcp and static. If type is + static, then ip, gateway, network, broadcast, netmask, and dns1 + must all be specified in interface_fields. If type is dhcp, + these parameters, even if specified, are ignored. + + PIs and techs may only update interfaces associated with their own + nodes. Admins may update any interface network. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepted_fields = Row.accepted_fields(cannot_update, Interface.fields,exclude=True) + accepted_fields.update(Interface.tags) + + accepts = [ + Auth(), + Interface.fields['interface_id'], + accepted_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, interface_id, interface_fields): + + [native,tags,rejected] = Row.split_fields(interface_fields,[Interface.fields,Interface.tags]) + + # type checking + native= Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot update Interface column(s) %r"%rejected + + # Get interface information + interfaces = Interfaces(self.api, [interface_id]) + if not interfaces: + raise PLCInvalidArgument, "No such interface" + + interface = interfaces[0] + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site where the node exists. + if 'admin' not in self.caller['roles']: + nodes = Nodes(self.api, [interface['node_id']]) + if not nodes: + raise PLCPermissionDenied, "Interface is not associated with a node" + node = nodes[0] + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to update interface" + + interface.update(native) + interface.update_last_updated(commit=False) + interface.sync() + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + if not TagTypes(self.api,{'tagname':tagname}): + raise PLCInvalidArgument,"No such TagType %s"%tagname + interface_tags=InterfaceTags(self.api,{'tagname':tagname,'interface_id':interface['interface_id']}) + if not interface_tags: + AddInterfaceTag(self.api).__call__(auth,interface['interface_id'],tagname,value) + else: + UpdateInterfaceTag(self.api).__call__(auth,interface_tags[0]['interface_tag_id'],value) + + self.event_objects = {'Interface': [interface['interface_id']]} + if 'ip' in interface: + self.message = "Interface %s updated"%interface['ip'] + else: + self.message = "Interface %d updated"%interface['interface_id'] + self.message += "[%s]." % ", ".join(interface_fields.keys()) + + return 1 diff --git a/PLC/Methods/UpdateInterfaceTag.py b/PLC/Methods/UpdateInterfaceTag.py new file mode 100644 index 0000000..bcd6fc0 --- /dev/null +++ b/PLC/Methods/UpdateInterfaceTag.py @@ -0,0 +1,60 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Sites +from PLC.Nodes import Nodes +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes +from PLC.InterfaceTags import InterfaceTag, InterfaceTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdateInterfaceTag(Method): + """ + Updates the value of an existing interface setting + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + InterfaceTag.fields['interface_tag_id'], + InterfaceTag.fields['value'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, interface_tag_id, value): + interface_tags = InterfaceTags(self.api, [interface_tag_id]) + if not interface_tags: + raise PLCInvalidArgument, "No such interface setting %r"%interface_tag_id + interface_tag = interface_tags[0] + + tag_type_id = interface_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + interfaces = Interfaces (self.api, interface_tag['interface_id']) + if not interfaces: + raise PLCInvalidArgument, "No such interface %d"%interface_tag['interface_id'] + interface=interfaces[0] + + # check authorizations + interface.caller_may_write_tag(self.api, self.caller, tag_type) + + interface_tag['value'] = value + interface_tag.sync() + + self.object_ids = [interface_tag['interface_tag_id']] + return 1 diff --git a/PLC/Methods/UpdateKey.py b/PLC/Methods/UpdateKey.py new file mode 100644 index 0000000..870fca7 --- /dev/null +++ b/PLC/Methods/UpdateKey.py @@ -0,0 +1,55 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Keys import Key, Keys +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['key_type', 'key'] + +class UpdateKey(Method): + """ + Updates the parameters of an existing key with the values in + key_fields. + + Non-admins may only update their own keys. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + key_fields = dict(filter(can_update, Key.fields.items())) + + accepts = [ + Auth(), + Key.fields['key_id'], + key_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, key_id, key_fields): + key_fields = dict(filter(can_update, key_fields.items())) + + # Get key information + keys = Keys(self.api, [key_id]) + if not keys: + raise PLCInvalidArgument, "No such key" + key = keys[0] + + if key['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local key" + + if 'admin' not in self.caller['roles']: + if key['key_id'] not in self.caller['key_ids']: + raise PLCPermissionDenied, "Key must be associated with one of your accounts" + + key.update(key_fields) + key.sync() + + # Logging variables + self.event_objects = {'Key': [key['key_id']]} + self.message = 'key %d updated: %s' % \ + (key['key_id'], ", ".join(key_fields.keys())) + return 1 diff --git a/PLC/Methods/UpdateLeases.py b/PLC/Methods/UpdateLeases.py new file mode 100644 index 0000000..c760c5b --- /dev/null +++ b/PLC/Methods/UpdateLeases.py @@ -0,0 +1,140 @@ +from __future__ import print_function + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Timestamp import Timestamp, Duration + +from PLC.Leases import Lease, Leases +from PLC.Slices import Slice, Slices + +can_update = lambda (field, value): field in ['t_from', 't_until', 'duration'] + + +class UpdateLeases(Method): + """ + Updates the parameters of a (set of) existing lease(s) with the values in + lease_fields; specifically this applies to the timeslot definition. + As a convenience you may, in addition to the t_from and t_until fields, + you can also set the 'duration' field. + + Users may only update leases attached to their slices. + PIs may update any of the leases for slices at their sites, or any + slices of which they are members. Admins may update any lease. + + Returns a dict of successfully updated lease_ids and error messages. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + lease_fields = dict(filter(can_update, Lease.fields.items())) + + accepts = [ + Auth(), + Mixed(Lease.fields['lease_id'], + [Lease.fields['lease_id']]), + lease_fields + ] + + returns = Parameter( + dict, + " 'updated_ids' is the list ids updated," + "'errors' is a list of error strings") + + debug = False +# debug=True + + def call(self, auth, lease_ids, input_fields): + input_fields = dict(filter(can_update, input_fields.items())) + + if 'duration' in input_fields: + if 't_from' in input_fields and 't_until' in input_fields: + raise PLCInvalidArgument( + "Cannot set t_from AND t_until AND duration") + # specify 'duration':0 to keep duration unchanged + if input_fields['duration']: + input_fields['duration'] = Duration.validate( + input_fields['duration']) + + # Get lease information + leases = Leases(self.api, lease_ids) + if not leases: + raise PLCInvalidArgument("No such leases {}".format(lease_ids)) + + # fetch related slices + slices = Slices(self.api, + [lease['slice_id'] for lease in leases], + ['slice_id', 'person_ids']) + # create hash on slice_id + slice_map = dict([(slice['slice_id'], slice) for slice in slices]) + + updated_ids = [] + errors = [] + + lease_ids = [lease['lease_id'] for lease in leases] + for lease in leases: + + if 'admin' not in self.caller['roles']: + slice = slice_map[lease['slice_id']] + # check slices only once + if 'verified' not in slice: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied( + "Not a member of slice {}".format(slice['name'])) + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied( + "Slice {} not associated with any of your sites" + .format(slice['name'])) + slice['verified'] = True + + try: + # we've ruled out already the case where all 3 (from, to, + # duration) where specified + if 'duration' not in input_fields: + lease_fields = input_fields + else: + # all arithmetics on longs.. + duration = Duration.validate(input_fields['duration']) + # specify 'duration':0 to keep duration unchanged + if not duration: + duration = Timestamp.cast_long( + lease['t_until']) \ + - Timestamp.cast_long(lease['t_from']) + if 't_from' in input_fields: + lease_fields = { + 't_from': input_fields['t_from'], + 't_until': Timestamp.cast_long( + input_fields['from']) + duration} + elif 't_until' in input_fields: + lease_fields = { + 't_from': Timestamp.cast_long( + input_fields['t_until']) - duration, + 't_until': input_fields['t_until']} + else: + lease_fields = {'t_until': Timestamp.cast_long( + lease['t_from']) + duration} + if UpdateLeases.debug: + for k in ['t_from', 't_until']: + if k in lease_fields: + print(k, 'aka', Timestamp.sql_validate_utc( + lease_fields[k])) + + lease.update(lease_fields) + lease.sync() + updated_ids.append(lease['lease_id']) + except Exception, e: + errors.append( + "Could not update lease {} - check new time limits ? -- {}" + .format(lease['lease_id'], e)) + + # Logging variables + self.event_objects = {'Lease': updated_ids} + self.message = 'lease {} updated: {}'\ + .format(lease_ids, ", ".join(input_fields.keys())) + + return {'updated_ids': updated_ids, + 'errors': errors} diff --git a/PLC/Methods/UpdateMessage.py b/PLC/Methods/UpdateMessage.py new file mode 100644 index 0000000..7f96129 --- /dev/null +++ b/PLC/Methods/UpdateMessage.py @@ -0,0 +1,43 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Messages import Message, Messages +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['template', 'enabled'] + +class UpdateMessage(Method): + """ + Updates the parameters of an existing message template with the + values in message_fields. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + message_fields = dict(filter(can_update, Message.fields.items())) + + accepts = [ + Auth(), + Message.fields['message_id'], + message_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, message_id, message_fields): + message_fields = dict(filter(can_update, message_fields.items())) + + # Get message information + messages = Messages(self.api, [message_id]) + if not messages: + raise PLCInvalidArgument, "No such message" + message = messages[0] + + message.update(message_fields) + message.sync() + self.event_objects = {'Message': [message['message_id']]} + + return 1 diff --git a/PLC/Methods/UpdateNode.py b/PLC/Methods/UpdateNode.py new file mode 100644 index 0000000..973dbb9 --- /dev/null +++ b/PLC/Methods/UpdateNode.py @@ -0,0 +1,124 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row +from PLC.Auth import Auth +from PLC.Namespace import hostname_to_hrn +from PLC.Peers import Peers +from PLC.Sites import Sites +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagTypes +from PLC.NodeTags import NodeTags, NodeTag + +admin_only = [ 'key', 'session', 'boot_nonce', 'site_id'] +can_update = ['hostname', 'node_type', 'boot_state', 'model', 'version'] + admin_only + +class UpdateNode(Method): + """ + Updates a node. Only the fields specified in node_fields are + updated, all other fields are left untouched. + + PIs and techs can update only the nodes at their sites. Only + admins can update the key, session, and boot_nonce fields. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + accepted_fields = Row.accepted_fields(can_update,Node.fields) + # xxx check the related_fields feature + accepted_fields.update(Node.related_fields) + accepted_fields.update(Node.tags) + + accepts = [ + Auth(), + Mixed(Node.fields['node_id'], + Node.fields['hostname']), + accepted_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_id_or_hostname, node_fields): + + # split provided fields + [native,related,tags,rejected] = Row.split_fields(node_fields,[Node.fields,Node.related_fields,Node.tags]) + + # type checking + native = Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot update Node column(s) %r"%rejected + + # Authenticated function + assert self.caller is not None + + # Remove admin only fields + if 'admin' not in self.caller['roles']: + for key in admin_only: + if native.has_key(key): + del native[key] + + # Get account information + nodes = Nodes(self.api, [node_id_or_hostname]) + if not nodes: + raise PLCInvalidArgument, "No such node %r"%node_id_or_hostname + node = nodes[0] + + if node['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local node %r"%node_id_or_hostname + + # If we are not an admin, make sure that the caller is a + # member of the site at which the node is located. + if 'admin' not in self.caller['roles']: + if node['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to delete nodes from specified site" + + # Make requested associations + for (k,v) in related.iteritems(): + node.associate(auth, k,v) + + node.update(native) + node.update_last_updated(commit=False) + node.sync(commit=True) + + # if hostname was modifed make sure to update the hrn + # tag + if 'hostname' in native: + root_auth = self.api.config.PLC_HRN_ROOT + # sub auth is the login base of this node's site + sites = Sites(self.api, node['site_id'], ['login_base']) + site = sites[0] + login_base = site['login_base'] + tags['hrn'] = hostname_to_hrn(root_auth, login_base, node['hostname']) + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + tag_types = TagTypes(self.api,{'tagname':tagname}) + if not tag_types: + raise PLCInvalidArgument,"No such TagType %s"%tagname + tag_type = tag_types[0] + node_tags=NodeTags(self.api,{'tagname':tagname,'node_id':node['node_id']}) + if not node_tags: + node_tag = NodeTag(self.api) + node_tag['node_id'] = node['node_id'] + node_tag['tag_type_id'] = tag_type['tag_type_id'] + node_tag['tagname'] = tagname + node_tag['value'] = value + node_tag.sync() + else: + node_tag = node_tags[0] + node_tag['value'] = value + node_tag.sync() + + # Logging variables + self.event_objects = {'Node': [node['node_id']]} + if 'hostname' in node: + self.message = 'Node %s updated'%node['hostname'] + else: + self.message = 'Node %d updated'%node['node_id'] + self.message += " [%s]." % (", ".join(node_fields.keys()),) + if 'boot_state' in node_fields.keys(): + self.message += ' boot_state updated to %s' % node_fields['boot_state'] + + return 1 diff --git a/PLC/Methods/UpdateNodeGroup.py b/PLC/Methods/UpdateNodeGroup.py new file mode 100644 index 0000000..6b80187 --- /dev/null +++ b/PLC/Methods/UpdateNodeGroup.py @@ -0,0 +1,45 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.Auth import Auth + +can_update = lambda (field, value): field in ['groupname','value'] + +class UpdateNodeGroup(Method): + """ + Updates a custom node group. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + nodegroup_fields = dict(filter(can_update, NodeGroup.fields.items())) + + accepts = [ + Auth(), + Mixed(NodeGroup.fields['nodegroup_id'], + NodeGroup.fields['groupname']), + nodegroup_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, nodegroup_id_or_name, nodegroup_fields): + nodegroup_fields = dict(filter(can_update, nodegroup_fields.items())) + + # Get nodegroup information + nodegroups = NodeGroups(self.api, [nodegroup_id_or_name]) + if not nodegroups: + raise PLCInvalidArgument, "No such nodegroup %r"%nodegroup_id_or_name + nodegroup = nodegroups[0] + + nodegroup.update(nodegroup_fields) + nodegroup.sync() + + # Logging variables + self.event_objects = {'NodeGroup': [nodegroup['nodegroup_id']]} + self.message = 'Node group %d updated: %s' % \ + (nodegroup['nodegroup_id'], ", ".join(nodegroup_fields.keys())) + return 1 diff --git a/PLC/Methods/UpdateNodeTag.py b/PLC/Methods/UpdateNodeTag.py new file mode 100644 index 0000000..d0b3d8e --- /dev/null +++ b/PLC/Methods/UpdateNodeTag.py @@ -0,0 +1,60 @@ +# +# Thierry Parmentelat - INRIA +# + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Sites +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagType, TagTypes +from PLC.NodeTags import NodeTag, NodeTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdateNodeTag(Method): + """ + Updates the value of an existing node tag + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + NodeTag.fields['node_tag_id'], + NodeTag.fields['value'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, node_tag_id, value): + node_tags = NodeTags(self.api, [node_tag_id]) + if not node_tags: + raise PLCInvalidArgument, "No such node tag %r"%node_tag_id + node_tag = node_tags[0] + + tag_type_id = node_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + nodes = Nodes (self.api, node_tag['node_id']) + if not nodes: + raise PLCInvalidArgument, "No such node %d"%node_tag['node_id'] + node=nodes[0] + + # check authorizations + node.caller_may_write_tag(self.api,self.caller,tag_type) + + node_tag['value'] = value + node_tag.sync() + + self.object_ids = [node_tag['node_tag_id']] + return 1 diff --git a/PLC/Methods/UpdatePCU.py b/PLC/Methods/UpdatePCU.py new file mode 100644 index 0000000..fb17176 --- /dev/null +++ b/PLC/Methods/UpdatePCU.py @@ -0,0 +1,53 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUs import PCU, PCUs +from PLC.Auth import Auth + +can_update = lambda (field, value): field not in \ + ['pcu_id', 'site_id'] + +class UpdatePCU(Method): + """ + Updates the parameters of an existing PCU with the values in + pcu_fields. + + Non-admins may only update PCUs at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech'] + + update_fields = dict(filter(can_update, PCU.fields.items())) + + accepts = [ + Auth(), + PCU.fields['pcu_id'], + update_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, pcu_id, pcu_fields): + pcu_fields = dict(filter(can_update, pcu_fields.items())) + + # Get associated PCU details + pcus = PCUs(self.api, [pcu_id]) + if not pcus: + raise PLCInvalidArgument, "No such PCU" + pcu = pcus[0] + + if 'admin' not in self.caller['roles']: + if pcu['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to update that PCU" + + pcu.update(pcu_fields) + pcu.update_last_updated(commit=False) + pcu.sync() + + # Logging variables + self.event_objects = {'PCU': [pcu['pcu_id']]} + self.message = 'PCU %d updated: %s' % \ + (pcu['pcu_id'], ", ".join(pcu_fields.keys())) + return 1 diff --git a/PLC/Methods/UpdatePCUProtocolType.py b/PLC/Methods/UpdatePCUProtocolType.py new file mode 100644 index 0000000..b5d915e --- /dev/null +++ b/PLC/Methods/UpdatePCUProtocolType.py @@ -0,0 +1,41 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUProtocolTypes import PCUProtocolType, PCUProtocolTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['pcu_type_id', 'port', 'protocol', 'supported'] + +class UpdatePCUProtocolType(Method): + """ + Updates a pcu protocol type. Only the fields specified in + port_typee_fields are updated, all other fields are left untouched. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + protocol_type_fields = dict(filter(can_update, PCUProtocolType.fields.items())) + + accepts = [ + Auth(), + PCUProtocolType.fields['pcu_protocol_type_id'], + protocol_type_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, protocol_type_id, protocol_type_fields): + protocol_type_fields = dict(filter(can_update, protocol_type_fields.items())) + + protocol_types = PCUProtocolTypes(self.api, [protocol_type_id]) + if not protocol_types: + raise PLCInvalidArgument, "No such pcu protocol type" + + protocol_type = protocol_types[0] + protocol_type.update(protocol_type_fields) + protocol_type.sync() + self.event_objects = {'PCUProtocolType': [protocol_type['pcu_protocol_type_id']]} + return 1 diff --git a/PLC/Methods/UpdatePCUType.py b/PLC/Methods/UpdatePCUType.py new file mode 100644 index 0000000..2850fdb --- /dev/null +++ b/PLC/Methods/UpdatePCUType.py @@ -0,0 +1,42 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.PCUTypes import PCUType, PCUTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['model', 'name'] + +class UpdatePCUType(Method): + """ + Updates a PCU type. Only the fields specified in + pcu_typee_fields are updated, all other fields are left untouched. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + pcu_type_fields = dict(filter(can_update, PCUType.fields.items())) + + accepts = [ + Auth(), + PCUType.fields['pcu_type_id'], + pcu_type_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, pcu_type_id, pcu_type_fields): + pcu_type_fields = dict(filter(can_update, pcu_type_fields.items())) + + pcu_types = PCUTypes(self.api, [pcu_type_id]) + if not pcu_types: + raise PLCInvalidArgument, "No such pcu type" + + pcu_type = pcu_types[0] + pcu_type.update(pcu_type_fields) + pcu_type.sync() + self.event_objects = {'PCUType': [pcu_type['pcu_type_id']]} + + return 1 diff --git a/PLC/Methods/UpdatePeer.py b/PLC/Methods/UpdatePeer.py new file mode 100644 index 0000000..cd4c1ec --- /dev/null +++ b/PLC/Methods/UpdatePeer.py @@ -0,0 +1,50 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Peers import Peer, Peers + +can_update = lambda (field, value): field in \ + ['peername', 'peer_url', 'key', 'cacert', 'shortname', 'hrn_root'] + +class UpdatePeer(Method): + """ + Updates a peer. Only the fields specified in peer_fields are + updated, all other fields are left untouched. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + peer_fields = dict(filter(can_update, Peer.fields.items())) + + accepts = [ + Auth(), + Mixed(Peer.fields['peer_id'], + Peer.fields['peername']), + peer_fields + ] + + returns = Parameter(int, "1 if successful") + + def call(self, auth, peer_id_or_name, peer_fields): + peer_fields = dict(filter(can_update, peer_fields.items())) + + # Get account information + peers = Peers(self.api, [peer_id_or_name]) + if not peers: + raise PLCInvalidArgument, "No such peer" + peer = peers[0] + + if isinstance(self.caller, Peer): + if self.caller['peer_id'] != peer['peer_id']: + raise PLCPermissionDenied, "Not allowed to update specified peer" + + peer.update(peer_fields) + peer.sync() + + # Log affected objects + self.event_objects = {'Peer': [peer['peer_id']]} + + return 1 diff --git a/PLC/Methods/UpdatePerson.py b/PLC/Methods/UpdatePerson.py new file mode 100644 index 0000000..3212ef5 --- /dev/null +++ b/PLC/Methods/UpdatePerson.py @@ -0,0 +1,129 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth +from PLC.Table import Row +from PLC.Persons import Person, Persons +from PLC.sendmail import sendmail +from PLC.TagTypes import TagTypes +from PLC.PersonTags import PersonTags, PersonTag +from PLC.Namespace import email_to_hrn + +related_fields = Person.related_fields.keys() +can_update = ['first_name', 'last_name', 'title', 'email', + 'password', 'phone', 'url', 'bio', 'accepted_aup', + 'enabled'] + related_fields + +class UpdatePerson(Method): + """ + Updates a person. Only the fields specified in person_fields are + updated, all other fields are left untouched. + + Users and techs can only update themselves. PIs can only update + themselves and other non-PIs at their sites. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'tech'] + + accepted_fields = Row.accepted_fields(can_update,Person.fields) + # xxx check the related_fields feature + accepted_fields.update(Person.related_fields) + accepted_fields.update(Person.tags) + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + accepted_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_id_or_email, person_fields): + # split provided fields + [native,related,tags,rejected] = Row.split_fields(person_fields,[Person.fields,Person.related_fields,Person.tags]) + + # type checking + native = Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot update Person column(s) %r"%rejected + + # Authenticated function + assert self.caller is not None + + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account %s"%person_id_or_email + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account %s"%person_id_or_email + + # Check if we can update this account + if not self.caller.can_update(person): + raise PLCPermissionDenied, "Not allowed to update specified account" + + # Make requested associations + for k,v in related.iteritems(): + person.associate (auth, k, v) + + person.update(native) + person.update_last_updated(False) + person.sync(commit=True) + + # send a mail + if 'enabled' in person_fields: + To = [("%s %s" % (person['first_name'], person['last_name']), person['email'])] + Cc = [] + if person['enabled']: + Subject = "%s account enabled" % (self.api.config.PLC_NAME) + Body = "Your %s account has been enabled. Please visit %s to access your account." % (self.api.config.PLC_NAME, self.api.config.PLC_WWW_HOST) + else: + Subject = "%s account disabled" % (self.api.config.PLC_NAME) + Body = "Your %s account has been disabled. Please contact your PI or PlanetLab support for more information" % (self.api.config.PLC_NAME) + sendmail(self.api, To = To, Cc = Cc, Subject = Subject, Body = Body) + + # if email was modifed make sure to update the hrn tag + if 'email' in native: + hrn_tag=PersonTags(self.api,{'tagname':'hrn','person_id':person['person_id']}) + if hrn_tag: + old_hrn = hrn_tag[0]['value'] + root_auth = self.api.config.PLC_HRN_ROOT + login_base = old_hrn.split('.')[-2] + hrn=email_to_hrn("%s.%s"%(root_auth,login_base),person['email']) + tags['hrn'] = hrn + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + tag_types = TagTypes(self.api,{'tagname':tagname}) + if not tag_types: + raise PLCInvalidArgument,"No such TagType %s"%tagname + tag_type = tag_types[0] + person_tags=PersonTags(self.api,{'tagname':tagname,'person_id':person['person_id']}) + if not person_tags: + person_tag = PersonTag(self.api) + person_tag['person_id'] = person['person_id'] + person_tag['tag_type_id'] = tag_type['tag_type_id'] + person_tag['tagname'] = tagname + person_tag['value'] = value + person_tag.sync() + else: + person_tag = person_tags[0] + person_tag['value'] = value + person_tag.sync() + + # Logging variables + self.event_objects = {'Person': [person['person_id']]} + + # Redact password + if 'password' in person_fields: + person_fields['password'] = "Removed by API" + self.message = 'Person %d updated: %s.' % \ + (person['person_id'], person_fields.keys()) + if 'enabled' in person_fields: + self.message += ' Person enabled' + + return 1 diff --git a/PLC/Methods/UpdatePersonTag.py b/PLC/Methods/UpdatePersonTag.py new file mode 100644 index 0000000..23ab6ab --- /dev/null +++ b/PLC/Methods/UpdatePersonTag.py @@ -0,0 +1,56 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Persons import Person, Persons +from PLC.TagTypes import TagType, TagTypes +from PLC.PersonTags import PersonTag, PersonTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdatePersonTag(Method): + """ + Updates the value of an existing person setting + + Admins have full access. Non-admins can change their own tags. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + PersonTag.fields['person_tag_id'], + PersonTag.fields['value'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, person_tag_id, value): + person_tags = PersonTags(self.api, [person_tag_id]) + if not person_tags: + raise PLCInvalidArgument, "No such person setting %r"%person_tag_id + person_tag = person_tags[0] + + tag_type_id = person_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + persons = Persons (self.api, person_tag['person_id']) + if not persons: + raise PLCInvalidArgument, "No such person %d"%person_tag['person_id'] + person=persons[0] + + # check authorizations + person.caller_may_write_tag(self.api,self.caller,tag_type) + + person_tag['value'] = value + person_tag.sync() + + self.object_ids = [person_tag['person_tag_id']] + return 1 diff --git a/PLC/Methods/UpdateSite.py b/PLC/Methods/UpdateSite.py new file mode 100644 index 0000000..86ae121 --- /dev/null +++ b/PLC/Methods/UpdateSite.py @@ -0,0 +1,99 @@ +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Sites import Site, Sites +from PLC.Auth import Auth + +from PLC.TagTypes import TagTypes +from PLC.SiteTags import SiteTags +from PLC.Methods.AddSiteTag import AddSiteTag +from PLC.Methods.UpdateSiteTag import UpdateSiteTag + +related_fields = Site.related_fields.keys() +can_update = lambda (field, value): field in \ + ['name', 'abbreviated_name', 'login_base', + 'is_public', 'latitude', 'longitude', 'url', + 'max_slices', 'max_slivers', 'enabled', 'ext_consortium_id'] + \ + related_fields + +class UpdateSite(Method): + """ + Updates a site. Only the fields specified in update_fields are + updated, all other fields are left untouched. + + PIs can only update sites they are a member of. Only admins can + update max_slices, max_slivers, and login_base. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi'] + + site_fields = dict(filter(can_update, Site.fields.items() + Site.related_fields.items())) + + accepts = [ + Auth(), + Mixed(Site.fields['site_id'], + Site.fields['login_base']), + site_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, site_id_or_login_base, site_fields): + site_fields = dict(filter(can_update, site_fields.items())) + + # Get site information + sites = Sites(self.api, [site_id_or_login_base]) + if not sites: + raise PLCInvalidArgument, "No such site" + site = sites[0] + + if site['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local site" + + # Authenticated function + assert self.caller is not None + + # If we are not an admin, make sure that the caller is a + # member of the site. + if 'admin' not in self.caller['roles']: + if site['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Not allowed to modify specified site" + + # Remove admin only fields + for key in 'max_slices', 'max_slivers', 'login_base': + if key in site_fields: + del site_fields[key] + + # Make requested associations + for field in related_fields: + if field in site_fields: + site.associate(auth, field, site_fields[field]) + site_fields.pop(field) + + site.update(site_fields) + site.update_last_updated(False) + site.sync() + + # Logging variables + self.event_objects = {'Site': [site['site_id']]} + self.message = 'Site %d updated: %s' % \ + (site['site_id'], ", ".join(site_fields.keys())) + + # Update Site HRN if login_base changed + if 'login_base' in site_fields: + root_auth = self.api.config.PLC_HRN_ROOT + tagname = 'hrn' + tagvalue = '.'.join([root_auth, site['login_base']]) + # check if the tagtype instance exists + tag_types = TagTypes(self.api,{'tagname':tagname}) + if tag_types: + site_tags=SiteTags(self.api,{'tagname':tagname,'site_id':site['site_id']}) + if not site_tags: + AddSiteTag(self.api).__call__(auth,int(site['site_id']),tagname,tagvalue) + else: + UpdateSiteTag(self.api).__call__(auth,site_tags[0]['site_tag_id'],tagvalue) + + + return 1 diff --git a/PLC/Methods/UpdateSiteTag.py b/PLC/Methods/UpdateSiteTag.py new file mode 100644 index 0000000..a5d69f4 --- /dev/null +++ b/PLC/Methods/UpdateSiteTag.py @@ -0,0 +1,58 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.Sites import Site, Sites +from PLC.TagTypes import TagType, TagTypes +from PLC.SiteTags import SiteTag, SiteTags + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdateSiteTag(Method): + """ + Updates the value of an existing site setting + + Admins have full access. Non-admins need to + (1) have at least one of the roles attached to the tagtype, + and (2) belong in the same site as the tagged subject. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'tech', 'user'] + + accepts = [ + Auth(), + SiteTag.fields['site_tag_id'], + SiteTag.fields['value'] + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, site_tag_id, value): + site_tags = SiteTags(self.api, [site_tag_id]) + if not site_tags: + raise PLCInvalidArgument, "No such site setting %r"%site_tag_id + site_tag = site_tags[0] + + tag_type_id = site_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + sites = Sites (self.api, site_tag['site_id']) + if not sites: + raise PLCInvalidArgument, "No such site %d"%site_tag['site_id'] + site=sites[0] + + # check authorizations + site.caller_may_write_tag(self.api,self.caller,tag_type) + + site_tag['value'] = value + site_tag.sync() + + self.object_ids = [site_tag['site_tag_id']] + return 1 diff --git a/PLC/Methods/UpdateSlice.py b/PLC/Methods/UpdateSlice.py new file mode 100644 index 0000000..9129ba0 --- /dev/null +++ b/PLC/Methods/UpdateSlice.py @@ -0,0 +1,143 @@ +import time + +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Table import Row +from PLC.Auth import Auth + +from PLC.Slices import Slice, Slices +from PLC.Sites import Site, Sites +from PLC.TagTypes import TagTypes +from PLC.SliceTags import SliceTags +from PLC.Methods.AddSliceTag import AddSliceTag +from PLC.Methods.UpdateSliceTag import UpdateSliceTag + +can_update = ['instantiation', 'url', 'description', 'max_nodes', 'expires'] + +class UpdateSlice(Method): + """ + Updates the parameters of an existing slice with the values in + slice_fields. + + Users may only update slices of which they are members. PIs may + update any of the slices at their sites, or any slices of which + they are members. Admins may update any slice. + + Only PIs and admins may update max_nodes. Slices cannot be renewed + (by updating the expires parameter) more than 8 weeks into the + future. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user'] + + accepted_fields = Row.accepted_fields(can_update, Slice.fields) + # xxx check the related_fields feature + accepted_fields.update(Slice.related_fields) + accepted_fields.update(Slice.tags) + + accepts = [ + Auth(), + Mixed(Slice.fields['slice_id'], + Slice.fields['name']), + accepted_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_id_or_name, slice_fields): + + # split provided fields + [native,related,tags,rejected] = Row.split_fields(slice_fields,[Slice.fields,Slice.related_fields,Slice.tags]) + + # type checking + native = Row.check_fields (native, self.accepted_fields) + if rejected: + raise PLCInvalidArgument, "Cannot update Slice column(s) %r"%rejected + + slices = Slices(self.api, [slice_id_or_name]) + if not slices: + raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name + slice = slices[0] + + if slice['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local slice" + + # Authenticated function + assert self.caller is not None + + if 'admin' not in self.caller['roles']: + if self.caller['person_id'] in slice['person_ids']: + pass + elif 'pi' not in self.caller['roles']: + raise PLCPermissionDenied, "Not a member of the specified slice" + elif slice['site_id'] not in self.caller['site_ids']: + raise PLCPermissionDenied, "Specified slice not associated with any of your sites" + + # Renewing + renewing=False + if 'expires' in slice_fields and slice_fields['expires'] > slice['expires']: + sites = Sites(self.api, [slice['site_id']]) + assert sites + site = sites[0] + + if site['max_slices'] <= 0: + raise PLCInvalidArgument, "Slice creation and renewal have been disabled for the site" + + # Maximum expiration date is 8 weeks from now + # XXX Make this configurable + max_expires = time.time() + (8 * 7 * 24 * 60 * 60) + + if 'admin' not in self.caller['roles'] and slice_fields['expires'] > max_expires: + raise PLCInvalidArgument, "Cannot renew a slice beyond 8 weeks from now" + + # XXX Make this a configurable policy + if slice['description'] is None or not slice['description'].strip(): + if 'description' not in slice_fields or slice_fields['description'] is None or \ + not slice_fields['description'].strip(): + raise PLCInvalidArgument, "Cannot renew a slice with an empty description or URL" + + if slice['url'] is None or not slice['url'].strip(): + if 'url' not in slice_fields or slice_fields['url'] is None or \ + not slice_fields['url'].strip(): + raise PLCInvalidArgument, "Cannot renew a slice with an empty description or URL" + renewing=True + + if 'max_nodes' in slice_fields and slice_fields['max_nodes'] != slice['max_nodes']: + if 'admin' not in self.caller['roles'] and \ + 'pi' not in self.caller['roles']: + raise PLCInvalidArgument, "Only admins and PIs may update max_nodes" + + # Make requested associations + for (k,v) in related.iteritems(): + slice.associate(auth,k,v) + + slice.update(slice_fields) + slice.sync(commit=True) + + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + if not TagTypes(self.api,{'tagname':tagname}): + raise PLCInvalidArgument,"No such TagType %s"%tagname + slice_tags=SliceTags(self.api,{'tagname':tagname,'slice_id':slice['slice_id']}) + if not slice_tags: + AddSliceTag(self.api).__call__(auth,slice['slice_id'],tagname,value) + else: + UpdateSliceTag(self.api).__call__(auth,slice_tags[0]['slice_tag_id'],value) + + self.event_objects = {'Slice': [slice['slice_id']]} + if 'name' in slice: + self.message='Slice %s updated'%slice['name'] + else: + self.message='Slice %d updated'%slice['slice_id'] + if renewing: + # it appears that slice['expires'] may be either an int, or a formatted string + try: + expire_date=time.strftime('%Y-%m-%d:%H:%M',time.localtime(float(slice['expires']))) + except: + expire_date=slice['expires'] + self.message += ' renewed until %s'%expire_date + + return 1 diff --git a/PLC/Methods/UpdateSliceTag.py b/PLC/Methods/UpdateSliceTag.py new file mode 100644 index 0000000..5eff0c3 --- /dev/null +++ b/PLC/Methods/UpdateSliceTag.py @@ -0,0 +1,72 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Auth import Auth + +from PLC.TagTypes import TagTypes, TagType +from PLC.Nodes import Node +from PLC.Slices import Slice, Slices +from PLC.SliceTags import SliceTag, SliceTags +from PLC.InitScripts import InitScript, InitScripts + +from PLC.AuthorizeHelpers import AuthorizeHelpers + +# need to import so the core classes get decorated with caller_may_write_tag +from PLC.AuthorizeHelpers import AuthorizeHelpers + +class UpdateSliceTag(Method): + """ + Updates the value of an existing slice or sliver attribute. + + Users may only update attributes of slices or slivers of which + they are members. PIs may only update attributes of slices or + slivers at their sites, or of which they are members. Admins may + update attributes of any slice or sliver. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin', 'pi', 'user', 'node'] + + accepts = [ + Auth(), + SliceTag.fields['slice_tag_id'], + Mixed(SliceTag.fields['value'], + InitScript.fields['name']) + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, slice_tag_id, value): + slice_tags = SliceTags(self.api, [slice_tag_id]) + if not slice_tags: + raise PLCInvalidArgument, "No such slice attribute" + slice_tag = slice_tags[0] + + tag_type_id = slice_tag['tag_type_id'] + tag_type = TagTypes (self.api,[tag_type_id])[0] + + slices = Slices(self.api, [slice_tag['slice_id']]) + if not slices: + raise PLCInvalidArgument, "No such slice %d"%slice_tag['slice_id'] + slice = slices[0] + + assert slice_tag['slice_tag_id'] in slice['slice_tag_ids'] + + # check authorizations + node_id_or_hostname=slice_tag['node_id'] + nodegroup_id_or_name=slice_tag['nodegroup_id'] + slice.caller_may_write_tag(self.api,self.caller,tag_type,node_id_or_hostname,nodegroup_id_or_name) + + if slice_tag['tagname'] in ['initscript']: + initscripts = InitScripts(self.api, {'enabled': True, 'name': value}) + if not initscripts: + raise PLCInvalidArgument, "No such plc initscript" + + slice_tag['value'] = unicode(value) + slice_tag.sync() + self.event_objects = {'SliceTag': [slice_tag['slice_tag_id']]} + return 1 diff --git a/PLC/Methods/UpdateTagType.py b/PLC/Methods/UpdateTagType.py new file mode 100644 index 0000000..8fc868c --- /dev/null +++ b/PLC/Methods/UpdateTagType.py @@ -0,0 +1,53 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.TagTypes import TagType, TagTypes +from PLC.Auth import Auth + +can_update = lambda (field, value): field in \ + ['tagname', 'description', 'category'] + +class UpdateTagType(Method): + """ + Updates the parameters of an existing tag type + with the values in tag_type_fields. + + Returns 1 if successful, faults otherwise. + """ + + roles = ['admin'] + + tag_type_fields = dict(filter(can_update, TagType.fields.items())) + + accepts = [ + Auth(), + Mixed(TagType.fields['tag_type_id'], + TagType.fields['tagname']), + tag_type_fields + ] + + returns = Parameter(int, '1 if successful') + + def call(self, auth, tag_type_id_or_name, tag_type_fields): + + accepted_type_fields = dict(filter(can_update, tag_type_fields.items())) + rejected_keys = [ k for k in tag_type_fields if k not in accepted_type_fields ] + if rejected_keys: + error="Cannot update TagType column(s) %r"%rejected_keys + if 'roles' in rejected_keys or 'role_ids' in rejected_keys: + error += " see AddRoleToTagType DeleteRoleFromTagType" + raise PLCInvalidArgument, error + + tag_types = TagTypes(self.api, [tag_type_id_or_name]) + if not tag_types: + raise PLCInvalidArgument, "No such tag type" + tag_type = tag_types[0] + + tag_type.update(accepted_type_fields) + tag_type.sync() + self.object_ids = [tag_type['tag_type_id']] + + return 1 diff --git a/PLC/Methods/VerifyPerson.py b/PLC/Methods/VerifyPerson.py new file mode 100644 index 0000000..b34506a --- /dev/null +++ b/PLC/Methods/VerifyPerson.py @@ -0,0 +1,156 @@ +import random +import base64 +import time +import urllib + +from PLC.Logger import logger +from PLC.Faults import * +from PLC.Method import Method +from PLC.Parameter import Parameter, Mixed +from PLC.Persons import Person, Persons +from PLC.Sites import Site, Sites +from PLC.Messages import Message, Messages +from PLC.Auth import Auth +from PLC.sendmail import sendmail + +class VerifyPerson(Method): + """ + Verify a new (must be disabled) user's e-mail address and registration. + + If verification_key is not specified, then a new verification_key + will be generated and stored with the user's account. The key will + be e-mailed to the user in the form of a link to a web page. + + The web page should verify the key by calling this function again + and specifying verification_key. If the key matches what has been + stored in the user's account, then an e-mail will be sent to the + user's PI (and support if the user is requesting a PI role), + asking the PI (or support) to enable the account. + + Returns 1 if the verification key if valid. + """ + + roles = ['admin'] + + accepts = [ + Auth(), + Mixed(Person.fields['person_id'], + Person.fields['email']), + Person.fields['verification_key'], + Person.fields['verification_expires'] + ] + + returns = Parameter(int, '1 if verification_key is valid') + + def call(self, auth, person_id_or_email, verification_key = None, verification_expires = None): + # Get account information + persons = Persons(self.api, [person_id_or_email]) + if not persons: + raise PLCInvalidArgument, "No such account %r"%person_id_or_email + person = persons[0] + + if person['peer_id'] is not None: + raise PLCInvalidArgument, "Not a local account %r"%person_id_or_email + + if person['enabled']: + raise PLCInvalidArgument, "Account %r must be new (disabled)"%person_id_or_email + + # Get the primary site name + person_sites = Sites(self.api, person['site_ids']) + if person_sites: + site_name = person_sites[0]['name'] + else: + site_name = "No Site" + + # Generate 32 random bytes + bytes = random.sample(xrange(0, 256), 32) + # Base64 encode their string representation + random_key = base64.b64encode("".join(map(chr, bytes))) + + if verification_key is None or \ + (verification_key is not None and person['verification_expires'] and \ + person['verification_expires'] < time.time()): + # Only allow one verification at a time + if person['verification_expires'] is not None and \ + person['verification_expires'] > time.time(): + raise PLCPermissionDenied, "Verification request already pending" + + if verification_expires is None: + verification_expires = int(time.time() + (24 * 60 * 60)) + + person['verification_key'] = random_key + person['verification_expires'] = verification_expires + person.sync() + + # Send e-mail to user + To = ("%s %s" % (person['first_name'], person['last_name']), person['email']) + Cc = None + + message_id = 'Verify account' + + + elif verification_key is not None: + if person['verification_key'] is None or \ + person['verification_expires'] is None: + raise PLCPermissionDenied, "Invalid Verification key" + elif person['verification_key'] != verification_key: + raise PLCPermissionDenied, "Verification key incorrect" + else: + person['verification_key'] = None + person['verification_expires'] = None + person.sync() + + # Get the PI(s) of each site that the user is registering with + person_ids = set() + for site in person_sites: + person_ids.update(site['person_ids']) + persons = Persons(self.api, person_ids) + pis = filter(lambda person: 'pi' in person['roles'] and person['enabled'], persons) + + # Send e-mail to PI(s) and copy the user + To = [("%s %s" % (pi['first_name'], pi['last_name']), pi['email']) for pi in pis] + Cc = ("%s %s" % (person['first_name'], person['last_name']), person['email']) + + if 'pi' in person['roles']: + # And support if user is requesting a PI role + To.append(("%s Support" % self.api.config.PLC_NAME, + self.api.config.PLC_MAIL_SUPPORT_ADDRESS)) + message_id = 'New PI account' + else: + message_id = 'New account' + + messages = Messages(self.api, [message_id]) + if messages: + # Send message to user + message = messages[0] + + params = {'PLC_NAME': self.api.config.PLC_NAME, + 'PLC_MAIL_SUPPORT_ADDRESS': self.api.config.PLC_MAIL_SUPPORT_ADDRESS, + 'PLC_WWW_HOST': self.api.config.PLC_WWW_HOST, + 'PLC_WWW_SSL_PORT': self.api.config.PLC_WWW_SSL_PORT, + 'person_id': person['person_id'], + # Will be used in a URL, so must quote appropriately + 'verification_key': urllib.quote_plus(random_key), + 'site_name': site_name, + 'first_name': person['first_name'], + 'last_name': person['last_name'], + 'email': person['email'], + 'roles': ", ".join(person['roles'])} + + sendmail(self.api, + To = To, + Cc = Cc, + Subject = message['subject'] % params, + Body = message['template'] % params) + else: + logger.warning("No message template '%s'" % message_id) + + # Logging variables + self.event_objects = {'Person': [person['person_id']]} + self.message = message_id + + if verification_key is not None and person['verification_expires'] and \ + person['verification_expires'] < time.time(): + raise PLCPermissionDenied, "Verification key has expired. Another email has been sent." + + return 1 diff --git a/PLC/Methods/__init__.py b/PLC/Methods/__init__.py new file mode 100644 index 0000000..d72755e --- /dev/null +++ b/PLC/Methods/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/python -tt + +import os +native_methods = [] +toppath = os.path.dirname(__file__) +for path, dirs, methods in os.walk(toppath): + remove_dirs = [] + for dir in dirs: + if dir.startswith("."): + remove_dirs.append(dir) + for dir in remove_dirs: + dirs.remove(dir) + prefix = path + "/" + prefix = prefix[len(toppath) + 1:].replace("/", ".") + for method in methods: + if method == "__init__.py": + continue + if not method.endswith(".py"): + continue + native_methods.append(prefix + method[:-3]) diff --git a/PLC/Methods/system/__init__.py b/PLC/Methods/system/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/PLC/Methods/system/listMethods.py b/PLC/Methods/system/listMethods.py new file mode 100644 index 0000000..fe5a9a3 --- /dev/null +++ b/PLC/Methods/system/listMethods.py @@ -0,0 +1,20 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter +import PLC.Methods + +class listMethods(Method): + """ + This method lists all the methods that the XML-RPC server knows + how to dispatch. + """ + + roles = [] + accepts = [] + returns = Parameter(list, 'List of methods') + + def __init__(self, api): + Method.__init__(self, api) + self.name = "system.listMethods" + + def call(self): + return self.api.all_methods diff --git a/PLC/Methods/system/methodHelp.py b/PLC/Methods/system/methodHelp.py new file mode 100644 index 0000000..22a0dc1 --- /dev/null +++ b/PLC/Methods/system/methodHelp.py @@ -0,0 +1,20 @@ +from PLC.Method import Method +from PLC.Parameter import Parameter + +class methodHelp(Method): + """ + Returns help text if defined for the method passed, otherwise + returns an empty string. + """ + + roles = [] + accepts = [Parameter(str, 'Method name')] + returns = Parameter(str, 'Method help') + + def __init__(self, api): + Method.__init__(self, api) + self.name = "system.methodHelp" + + def call(self, method): + function = self.api.callable(method) + return function.help() diff --git a/PLC/Methods/system/methodSignature.py b/PLC/Methods/system/methodSignature.py new file mode 100644 index 0000000..4b049a1 --- /dev/null +++ b/PLC/Methods/system/methodSignature.py @@ -0,0 +1,60 @@ +from PLC.Parameter import Parameter, Mixed +from PLC.Method import Method, xmlrpc_type + +class methodSignature(Method): + """ + Returns an array of known signatures (an array of arrays) for the + method name passed. If no signatures are known, returns a + none-array (test for type != array to detect missing signature). + """ + + roles = [] + accepts = [Parameter(str, "Method name")] + returns = [Parameter([str], "Method signature")] + + def __init__(self, api): + Method.__init__(self, api) + self.name = "system.methodSignature" + + def possible_signatures(self, signature, arg): + """ + Return a list of the possible new signatures given a current + signature and the next argument. + """ + + if isinstance(arg, Mixed): + arg_types = [xmlrpc_type(mixed_arg) for mixed_arg in arg] + else: + arg_types = [xmlrpc_type(arg)] + + return [signature + [arg_type] for arg_type in arg_types] + + def signatures(self, returns, args): + """ + Returns a list of possible signatures given a return value and + a set of arguments. + """ + + signatures = [[xmlrpc_type(returns)]] + + for arg in args: + # Create lists of possible new signatures for each current + # signature. Reduce the list of lists back down to a + # single list. + signatures = reduce(lambda a, b: a + b, + [self.possible_signatures(signature, arg) \ + for signature in signatures]) + + return signatures + + def call(self, method): + function = self.api.callable(method) + (min_args, max_args, defaults) = function.args() + + signatures = [] + + assert len(max_args) >= len(min_args) + for num_args in range(len(min_args), len(max_args) + 1): + signatures += self.signatures(function.returns, function.accepts[:num_args]) + + return signatures diff --git a/PLC/Methods/system/multicall.py b/PLC/Methods/system/multicall.py new file mode 100644 index 0000000..64563ef --- /dev/null +++ b/PLC/Methods/system/multicall.py @@ -0,0 +1,54 @@ +import sys +import xmlrpclib + +from PLC.Parameter import Parameter, Mixed +from PLC.Method import Method + +class multicall(Method): + """ + Process an array of calls, and return an array of results. Calls + should be structs of the form + + {'methodName': string, 'params': array} + + Each result will either be a single-item array containg the result + value, or a struct of the form + + {'faultCode': int, 'faultString': string} + + This is useful when you need to make lots of small calls without + lots of round trips. + """ + + roles = [] + accepts = [[{'methodName': Parameter(str, "Method name"), + 'params': Parameter(list, "Method arguments")}]] + returns = Mixed([Mixed()], + {'faultCode': Parameter(int, "XML-RPC fault code"), + 'faultString': Parameter(int, "XML-RPC fault detail")}) + + def __init__(self, api): + Method.__init__(self, api) + self.name = "system.multicall" + + def call(self, calls): + # Some error codes, borrowed from xmlrpc-c. + REQUEST_REFUSED_ERROR = -507 + + results = [] + for call in calls: + try: + name = call['methodName'] + params = call['params'] + if name == 'system.multicall': + errmsg = "Recursive system.multicall forbidden" + raise xmlrpclib.Fault(REQUEST_REFUSED_ERROR, errmsg) + result = [self.api.call(self.source, name, *params)] + except xmlrpclib.Fault, fault: + result = {'faultCode': fault.faultCode, + 'faultString': fault.faultString} + except: + errmsg = "%s:%s" % (sys.exc_type, sys.exc_value) + result = {'faultCode': 1, 'faultString': errmsg} + results.append(result) + return results diff --git a/PLC/Namespace.py b/PLC/Namespace.py new file mode 100644 index 0000000..e61ff2c --- /dev/null +++ b/PLC/Namespace.py @@ -0,0 +1,46 @@ +# +# Thierry, April 2013 +# +# This file here is the connection to the SFA code +# at some point in time we had duplicated this from sfa +# but of course both versions have entirely diverged since then +# in addition we really only need 2 hepler functions here, that allow to maintain +# hrn for nodes and persons and that is all +# +# So in order to avoid such situations in the future, +# we try to import and re-use the SFA code +# assumption being that if these hrn's are of importance then it makes sense to +# require people to have sfa-plc installed as well +# however we do not want this requirement to break myplc in case it is not fulfilled +# + +#################### try to import from sfa +try: + from sfa.planetlab.plxrn import hostname_to_hrn +except: + hostname_to_hrn=None + +try: + from sfa.planetlab.plxrn import email_to_hrn +except: + email_to_hrn=None + +try: + from sfa.planetlab.plxrn import slicename_to_hrn +except: + slicename_to_hrn=None +#################### if not found, bring our own local version +import re +def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token) + +if hostname_to_hrn is None: + def hostname_to_hrn (auth_hrn, login_base, hostname): + return ".".join( [ auth_hrn, login_base, escape(hostname) ] ) + +if email_to_hrn is None: + def email_to_hrn (auth_hrn, email): + return '.'.join([auth_hrn,email.split('@')[0].replace(".", "_").replace("+", "_")]) + +if slicename_to_hrn is None: + def slicename_to_hrn (auth_hrn, slicename): + return ".".join([auth_hrn] + slicename.split("_",1)) diff --git a/PLC/NetworkMethods.py b/PLC/NetworkMethods.py new file mode 100644 index 0000000..a54934d --- /dev/null +++ b/PLC/NetworkMethods.py @@ -0,0 +1,51 @@ +# +# Functions for interacting with the network_methods table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class NetworkMethod(Row): + """ + Representation of a row in the network_methods table. To use, + instantiate with a dict of values. + """ + + table_name = 'network_methods' + primary_key = 'method' + join_tables = ['interfaces'] + fields = { + 'method': Parameter(str, "Network method", max = 20), + } + + def validate_method(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Network method must be specified" + + # Make sure network method does not alredy exist + conflicts = NetworkMethods(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Network method name already in use" + + return name + +class NetworkMethods(Table): + """ + Representation of the network_methods table in the database. + """ + + def __init__(self, api, methods = None): + Table.__init__(self, api, NetworkMethod) + + sql = "SELECT %s FROM network_methods" % \ + ", ".join(NetworkMethod.fields) + + if methods: + sql += " WHERE method IN (%s)" % ", ".join( [ api.db.quote (m) for m in methods ] ) + + self.selectall(sql) diff --git a/PLC/NetworkTypes.py b/PLC/NetworkTypes.py new file mode 100644 index 0000000..eb34e7c --- /dev/null +++ b/PLC/NetworkTypes.py @@ -0,0 +1,51 @@ +# +# Functions for interacting with the network_types table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class NetworkType(Row): + """ + Representation of a row in the network_types table. To use, + instantiate with a dict of values. + """ + + table_name = 'network_types' + primary_key = 'type' + join_tables = ['interfaces'] + fields = { + 'type': Parameter(str, "Network type", max = 20), + } + + def validate_type(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Network type must be specified" + + # Make sure network type does not alredy exist + conflicts = NetworkTypes(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Network type name already in use" + + return name + +class NetworkTypes(Table): + """ + Representation of the network_types table in the database. + """ + + def __init__(self, api, types = None): + Table.__init__(self, api, NetworkType) + + sql = "SELECT %s FROM network_types" % \ + ", ".join(NetworkType.fields) + + if types: + sql += " WHERE type IN (%s)" % ", ".join( [ api.db.quote (t) for t in types ] ) + + self.selectall(sql) diff --git a/PLC/NodeGroups.py b/PLC/NodeGroups.py new file mode 100644 index 0000000..bbbb6b3 --- /dev/null +++ b/PLC/NodeGroups.py @@ -0,0 +1,110 @@ +# +# Functions for interacting with the nodegroups table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.Nodes import Node, Nodes + +class NodeGroup(Row): + """ + Representation of a row in the nodegroups table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'nodegroups' + primary_key = 'nodegroup_id' + join_tables = ['conf_file_nodegroup'] + primary_field = 'nodegroup_id' + fields = { + 'nodegroup_id': Parameter(int, "Node group identifier"), + 'groupname': Parameter(str, "Node group name", max = 50), + 'tag_type_id': Parameter (int, "Node tag type id"), + 'value' : Parameter(str, "value that the nodegroup definition is based upon"), + 'tagname' : Parameter(str, "Tag name that the nodegroup definition is based upon"), + 'conf_file_ids': Parameter([int], "List of configuration files specific to this node group"), + 'node_ids' : Parameter([int], "List of node_ids that belong to this nodegroup"), + } + related_fields = { + } + + def validate_name(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Invalid node group name" + + # Make sure node group does not alredy exist + conflicts = NodeGroups(self.api, [name]) + for nodegroup in conflicts: + if 'nodegroup_id' not in self or self['nodegroup_id'] != nodegroup['nodegroup_id']: + raise PLCInvalidArgument, "Node group name already in use" + + return name + + def associate_conf_files(self, auth, field, value): + """ + Add conf_files found in value list (AddConfFileToNodeGroup) + Delets conf_files not found in value list (DeleteConfFileFromNodeGroup) + """ + + assert 'conf_file_ids' in self + assert 'nodegroup_id' in self + assert isinstance(value, list) + + conf_file_ids = self.separate_types(value)[0] + + if self['conf_file_ids'] != conf_file_ids: + from PLC.Methods.AddConfFileToNodeGroup import AddConfFileToNodeGroup + from PLC.Methods.DeleteConfFileFromNodeGroup import DeleteConfFileFromNodeGroup + new_conf_files = set(conf_file_ids).difference(self['conf_file_ids']) + stale_conf_files = set(self['conf_file_ids']).difference(conf_file_ids) + + for new_conf_file in new_conf_files: + AddConfFileToNodeGroup.__call__(AddConfFileToNodeGroup(self.api), + auth, new_conf_file, self['nodegroup_id']) + for stale_conf_file in stale_conf_files: + DeleteConfFileFromNodeGroup.__call__(DeleteConfFileFromNodeGroup(self.api), + auth, stale_conf_file, self['nodegroup_id']) + + +class NodeGroups(Table): + """ + Representation of row(s) from the nodegroups table in the + database. + """ + + def __init__(self, api, nodegroup_filter = None, columns = None): + Table.__init__(self, api, NodeGroup, columns) + + sql = "SELECT %s FROM view_nodegroups WHERE True" % \ + ", ".join(self.columns) + + if nodegroup_filter is not None: + if isinstance(nodegroup_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), nodegroup_filter) + strs = filter(lambda x: isinstance(x, StringTypes), nodegroup_filter) + nodegroup_filter = Filter(NodeGroup.fields, {'nodegroup_id': ints, 'groupname': strs}) + sql += " AND (%s) %s" % nodegroup_filter.sql(api, "OR") + elif isinstance(nodegroup_filter, dict): + nodegroup_filter = Filter(NodeGroup.fields, nodegroup_filter) + sql += " AND (%s) %s" % nodegroup_filter.sql(api, "AND") + elif isinstance(nodegroup_filter, (int, long)): + nodegroup_filter = Filter(NodeGroup.fields, {'nodegroup_id': nodegroup_filter}) + sql += " AND (%s) %s" % nodegroup_filter.sql(api, "AND") + elif isinstance(nodegroup_filter, StringTypes): + nodegroup_filter = Filter(NodeGroup.fields, {'groupname': nodegroup_filter}) + sql += " AND (%s) %s" % nodegroup_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong node group filter %r"%nodegroup_filter + + self.selectall(sql) diff --git a/PLC/NodeTags.py b/PLC/NodeTags.py new file mode 100644 index 0000000..7f69e32 --- /dev/null +++ b/PLC/NodeTags.py @@ -0,0 +1,52 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagType, TagTypes + +class NodeTag(Row): + """ + Representation of a row in the node_tag. + To use, instantiate with a dict of values. + """ + + table_name = 'node_tag' + primary_key = 'node_tag_id' + fields = { + 'node_tag_id': Parameter(int, "Node tag identifier"), + 'node_id': Node.fields['node_id'], + 'hostname' : Node.fields['hostname'], + 'tag_type_id': TagType.fields['tag_type_id'], + 'value': Parameter(str, "Node tag value"), + 'tagname': TagType.fields['tagname'], + 'description': TagType.fields['description'], + 'category': TagType.fields['category'], + } + +class NodeTags(Table): + """ + Representation of row(s) from the node_tag table in the + database. + """ + + def __init__(self, api, node_tag_filter = None, columns = None): + Table.__init__(self, api, NodeTag, columns) + + sql = "SELECT %s FROM view_node_tags WHERE True" % \ + ", ".join(self.columns) + + if node_tag_filter is not None: + if isinstance(node_tag_filter, (list, tuple, set, int, long)): + node_tag_filter = Filter(NodeTag.fields, {'node_tag_id': node_tag_filter}) + elif isinstance(node_tag_filter, dict): + node_tag_filter = Filter(NodeTag.fields, node_tag_filter) + else: + raise PLCInvalidArgument, "Wrong node tag filter %r"%node_tag_filter + sql += " AND (%s) %s" % node_tag_filter.sql(api) + + + self.selectall(sql) diff --git a/PLC/NodeTypes.py b/PLC/NodeTypes.py new file mode 100644 index 0000000..00d4bf7 --- /dev/null +++ b/PLC/NodeTypes.py @@ -0,0 +1,49 @@ +# +# Functions for interacting with the node_types table in the database +# +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class NodeType(Row): + """ + Representation of a row in the node_types table. To use, + instantiate with a dict of values. + """ + + table_name = 'node_types' + primary_key = 'node_type' + join_tables = ['nodes'] + fields = { + 'node_type': Parameter(str, "Node type", max = 20), + } + + def validate_node_type(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Node type must be specified" + + # Make sure node type does not alredy exist + conflicts = NodeTypes(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Node type name already in use" + + return name + +class NodeTypes(Table): + """ + Representation of the node_types table in the database. + """ + + def __init__(self, api, node_types = None): + Table.__init__(self, api, NodeType) + + sql = "SELECT %s FROM node_types" % \ + ", ".join(NodeType.fields) + + if node_types: + sql += " WHERE node_type IN (%s)" % ", ".join( [ api.db.quote (t) for t in node_types ] ) + + self.selectall(sql) diff --git a/PLC/Nodes.py b/PLC/Nodes.py new file mode 100644 index 0000000..7e27ed4 --- /dev/null +++ b/PLC/Nodes.py @@ -0,0 +1,344 @@ +# +# Functions for interacting with the nodes table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +import re + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.NodeTypes import NodeTypes +from PLC.BootStates import BootStates +from PLC.Interfaces import Interface, Interfaces +from PLC.TagTypes import TagType, TagTypes + +def valid_hostname(hostname): + # 1. Each part begins and ends with a letter or number. + # 2. Each part except the last can contain letters, numbers, or hyphens. + # 3. Each part is between 1 and 64 characters, including the trailing dot. + # 4. At least two parts. + # 5. Last part can only contain between 2 and 6 letters. + good_hostname = r'^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\.)+' \ + r'[a-z]{2,6}$' + return hostname and \ + re.match(good_hostname, hostname, re.IGNORECASE) + +class Node(Row): + """ + Representation of a row in the nodes table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'nodes' + primary_key = 'node_id' + join_tables = [ 'slice_node', 'peer_node', 'slice_tag', + 'node_session', 'node_slice_whitelist', + 'node_tag', 'conf_file_node', 'pcu_node', 'leases', ] + fields = { + 'node_id': Parameter(int, "Node identifier"), + 'node_type': Parameter(str,"Node type",max=20), + 'hostname': Parameter(str, "Fully qualified hostname", max = 255), + 'site_id': Parameter(int, "Site at which this node is located"), + 'boot_state': Parameter(str, "Boot state", max = 20), + 'run_level': Parameter(str, "Run level", max = 20), + 'model': Parameter(str, "Make and model of the actual machine", max = 255, nullok = True), + 'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128), + 'version': Parameter(str, "Apparent Boot CD version", max = 64), + 'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024), + 'date_created': Parameter(int, "Date and time when node entry was created", ro = True), + 'last_updated': Parameter(int, "Date and time when node entry was created", ro = True), + 'last_contact': Parameter(int, "Date and time when node last contacted plc", ro = True), + 'last_boot': Parameter(int, "Date and time when node last booted", ro = True), + 'last_download': Parameter(int, "Date and time when node boot image was created", ro = True), + 'last_pcu_reboot': Parameter(int, "Date and time when PCU reboot was attempted", ro = True), + 'last_pcu_confirmation': Parameter(int, "Date and time when PCU reboot was confirmed", ro = True), + 'last_time_spent_online': Parameter(int, "Length of time the node was last online before shutdown/failure", ro = True), + 'last_time_spent_offline': Parameter(int, "Length of time the node was last offline after failure and before reboot", ro = True), + 'verified': Parameter(bool, "Whether the node configuration is verified correct", ro=False), + 'key': Parameter(str, "(Admin only) Node key", max = 256), + 'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True), + 'interface_ids': Parameter([int], "List of network interfaces that this node has"), + 'conf_file_ids': Parameter([int], "List of configuration files specific to this node"), + # 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"), + 'slice_ids': Parameter([int], "List of slices on this node"), + 'slice_ids_whitelist': Parameter([int], "List of slices allowed on this node"), + 'pcu_ids': Parameter([int], "List of PCUs that control this node"), + 'ports': Parameter([int], "List of PCU ports that this node is connected to"), + 'peer_id': Parameter(int, "Peer to which this node belongs", nullok = True), + 'peer_node_id': Parameter(int, "Foreign node identifier at peer", nullok = True), + 'node_tag_ids' : Parameter ([int], "List of tags attached to this node"), + 'nodegroup_ids': Parameter([int], "List of node groups that this node is in"), + } + related_fields = { + 'interfaces': [Mixed(Parameter(int, "Interface identifier"), + Filter(Interface.fields))], + 'conf_files': [Parameter(int, "ConfFile identifier")], + 'slices': [Mixed(Parameter(int, "Slice identifier"), + Parameter(str, "Slice name"))], + 'slices_whitelist': [Mixed(Parameter(int, "Slice identifier"), + Parameter(str, "Slice name"))] + } + + view_tags_name = "view_node_tags" + # tags are used by the Add/Get/Update methods to expose tags + # this is initialized here and updated by the accessors factory + tags = { } + + def validate_hostname(self, hostname): + hostname = hostname.lower() + if not valid_hostname(hostname): + raise PLCInvalidArgument, "Invalid hostname" + + conflicts = Nodes(self.api, [hostname]) + for node in conflicts: + if 'node_id' not in self or self['node_id'] != node['node_id']: + raise PLCInvalidArgument, "Hostname already in use" + + return hostname + + def validate_node_type(self, node_type): + node_types = [row['node_type'] for row in NodeTypes(self.api)] + if node_type not in node_types: + raise PLCInvalidArgument, "Invalid node type %r"%node_type + return node_type + + def validate_boot_state(self, boot_state): + boot_states = [row['boot_state'] for row in BootStates(self.api)] + if boot_state not in boot_states: + raise PLCInvalidArgument, "Invalid boot state %r"%boot_state + return boot_state + + validate_date_created = Row.validate_timestamp + validate_last_updated = Row.validate_timestamp + validate_last_contact = Row.validate_timestamp + validate_last_boot = Row.validate_timestamp + validate_last_download = Row.validate_timestamp + validate_last_pcu_reboot = Row.validate_timestamp + validate_last_pcu_confirmation = Row.validate_timestamp + + def update_readonly_int(self, col_name, commit = True): + + assert 'node_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET %s = %s" % (self.table_name, col_name, self[col_name]) + \ + " where node_id = %d" % (self['node_id']) ) + self.sync(commit) + + def update_timestamp(self, col_name, commit = True): + """ + Update col_name field with current time + """ + + assert 'node_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET %s = CURRENT_TIMESTAMP " % (self.table_name, col_name) + \ + " where node_id = %d" % (self['node_id']) ) + self.sync(commit) + + def update_last_boot(self, commit = True): + self.update_timestamp('last_boot', commit) + def update_last_download(self, commit = True): + self.update_timestamp('last_download', commit) + def update_last_pcu_reboot(self, commit = True): + self.update_timestamp('last_pcu_reboot', commit) + def update_last_pcu_confirmation(self, commit = True): + self.update_timestamp('last_pcu_confirmation', commit) + + def update_last_contact(self, commit = True): + self.update_timestamp('last_contact', commit) + def update_last_updated(self, commit = True): + self.update_timestamp('last_updated', commit) + + def update_tags(self, tags): + from PLC.Shell import Shell + from PLC.NodeTags import NodeTags + from PLC.Methods.AddNodeTag import AddNodeTag + from PLC.Methods.UpdateNodeTag import UpdateNodeTag + shell = Shell() + for (tagname,value) in tags.iteritems(): + # the tagtype instance is assumed to exist, just check that + if not TagTypes(self.api,{'tagname':tagname}): + raise PLCInvalidArgument,"No such TagType %s"%tagname + node_tags=NodeTags(self.api,{'tagname':tagname,'node_id':node['node_id']}) + if not node_tags: + AddNodeTag(self.api).__call__(shell.auth,node['node_id'],tagname,value) + else: + UpdateNodeTag(self.api).__call__(shell.auth,node_tags[0]['node_tag_id'],value) + + def associate_interfaces(self, auth, field, value): + """ + Delete interfaces not found in value list (using DeleteInterface) + Add interfaces found in value list (using AddInterface) + Updates interfaces found w/ interface_id in value list (using UpdateInterface) + """ + + assert 'interface_ids' in self + assert 'node_id' in self + assert isinstance(value, list) + + (interface_ids, blank, interfaces) = self.separate_types(value) + + if self['interface_ids'] != interface_ids: + from PLC.Methods.DeleteInterface import DeleteInterface + + stale_interfaces = set(self['interface_ids']).difference(interface_ids) + + for stale_interface in stale_interfaces: + DeleteInterface.__call__(DeleteInterface(self.api), auth, stale_interface['interface_id']) + + def associate_conf_files(self, auth, field, value): + """ + Add conf_files found in value list (AddConfFileToNode) + Delets conf_files not found in value list (DeleteConfFileFromNode) + """ + + assert 'conf_file_ids' in self + assert 'node_id' in self + assert isinstance(value, list) + + conf_file_ids = self.separate_types(value)[0] + + if self['conf_file_ids'] != conf_file_ids: + from PLC.Methods.AddConfFileToNode import AddConfFileToNode + from PLC.Methods.DeleteConfFileFromNode import DeleteConfFileFromNode + new_conf_files = set(conf_file_ids).difference(self['conf_file_ids']) + stale_conf_files = set(self['conf_file_ids']).difference(conf_file_ids) + + for new_conf_file in new_conf_files: + AddConfFileToNode.__call__(AddConfFileToNode(self.api), auth, new_conf_file, self['node_id']) + for stale_conf_file in stale_conf_files: + DeleteConfFileFromNode.__call__(DeleteConfFileFromNode(self.api), auth, stale_conf_file, self['node_id']) + + def associate_slices(self, auth, field, value): + """ + Add slices found in value list to (AddSliceToNode) + Delete slices not found in value list (DeleteSliceFromNode) + """ + + from PLC.Slices import Slices + + assert 'slice_ids' in self + assert 'node_id' in self + assert isinstance(value, list) + + (slice_ids, slice_names) = self.separate_types(value)[0:2] + + if slice_names: + slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id') + slice_ids += slices.keys() + + if self['slice_ids'] != slice_ids: + from PLC.Methods.AddSliceToNodes import AddSliceToNodes + from PLC.Methods.DeleteSliceFromNodes import DeleteSliceFromNodes + new_slices = set(slice_ids).difference(self['slice_ids']) + stale_slices = set(self['slice_ids']).difference(slice_ids) + + for new_slice in new_slices: + AddSliceToNodes.__call__(AddSliceToNodes(self.api), auth, new_slice, [self['node_id']]) + for stale_slice in stale_slices: + DeleteSliceFromNodes.__call__(DeleteSliceFromNodes(self.api), auth, stale_slice, [self['node_id']]) + + def associate_slices_whitelist(self, auth, field, value): + """ + Add slices found in value list to whitelist (AddSliceToNodesWhitelist) + Delete slices not found in value list from whitelist (DeleteSliceFromNodesWhitelist) + """ + + from PLC.Slices import Slices + + assert 'slice_ids_whitelist' in self + assert 'node_id' in self + assert isinstance(value, list) + + (slice_ids, slice_names) = self.separate_types(value)[0:2] + + if slice_names: + slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id') + slice_ids += slices.keys() + + if self['slice_ids_whitelist'] != slice_ids: + from PLC.Methods.AddSliceToNodesWhitelist import AddSliceToNodesWhitelist + from PLC.Methods.DeleteSliceFromNodesWhitelist import DeleteSliceFromNodesWhitelist + new_slices = set(slice_ids).difference(self['slice_ids_whitelist']) + stale_slices = set(self['slice_ids_whitelist']).difference(slice_ids) + + for new_slice in new_slices: + AddSliceToNodesWhitelist.__call__(AddSliceToNodesWhitelist(self.api), auth, new_slice, [self['node_id']]) + for stale_slice in stale_slices: + DeleteSliceFromNodesWhitelist.__call__(DeleteSliceFromNodesWhitelist(self.api), auth, stale_slice, [self['node_id']]) + + + def delete(self, commit = True): + """ + Delete existing node. + """ + + assert 'node_id' in self + + # we need to clean up InterfaceTags, so handling interfaces as part of join_tables does not work + # federated nodes don't have interfaces though so for smooth transition from 4.2 to 4.3 + if 'peer_id' in self and self['peer_id']: + pass + else: + assert 'interface_ids' in self + for interface in Interfaces(self.api,self['interface_ids']): + interface.delete() + + # Clean up miscellaneous join tables + for table in self.join_tables: + self.api.db.do("DELETE FROM %s WHERE node_id = %d" % \ + (table, self['node_id'])) + + # Mark as deleted + self['deleted'] = True + self.sync(commit) + +class Nodes(Table): + """ + Representation of row(s) from the nodes table in the + database. + """ + + def __init__(self, api, node_filter = None, columns = None): + Table.__init__(self, api, Node, columns) + + # the view that we're selecting upon: start with view_nodes + view = "view_nodes" + # as many left joins as requested tags + for tagname in self.tag_columns: + view= "%s left join %s using (%s)"%(view,Node.tagvalue_view_name(tagname), + Node.primary_key) + + sql = "SELECT %s FROM %s WHERE deleted IS False" % \ + (", ".join(self.columns.keys()+self.tag_columns.keys()),view) + + if node_filter is not None: + if isinstance(node_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), node_filter) + strs = filter(lambda x: isinstance(x, StringTypes), node_filter) + node_filter = Filter(Node.fields, {'node_id': ints, 'hostname': strs}) + sql += " AND (%s) %s" % node_filter.sql(api, "OR") + elif isinstance(node_filter, dict): + allowed_fields=dict(Node.fields.items()+Node.tags.items()) + node_filter = Filter(allowed_fields, node_filter) + sql += " AND (%s) %s" % node_filter.sql(api, "AND") + elif isinstance (node_filter, StringTypes): + node_filter = Filter(Node.fields, {'hostname':node_filter}) + sql += " AND (%s) %s" % node_filter.sql(api, "AND") + elif isinstance (node_filter, (int, long)): + node_filter = Filter(Node.fields, {'node_id':node_filter}) + sql += " AND (%s) %s" % node_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong node filter %r"%node_filter + + self.selectall(sql) diff --git a/PLC/PCUProtocolTypes.py b/PLC/PCUProtocolTypes.py new file mode 100644 index 0000000..9281c46 --- /dev/null +++ b/PLC/PCUProtocolTypes.py @@ -0,0 +1,67 @@ +# +# Functions for interacting with the pcu_type_port table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table +from PLC.Filter import Filter + +class PCUProtocolType(Row): + """ + Representation of a row in the pcu_protocol_type table. To use, + instantiate with a dict of values. + """ + + table_name = 'pcu_protocol_type' + primary_key = 'pcu_protocol_type_id' + join_tables = [] + fields = { + 'pcu_protocol_type_id': Parameter(int, "PCU protocol type identifier"), + 'pcu_type_id': Parameter(int, "PCU type identifier"), + 'port': Parameter(int, "PCU port"), + 'protocol': Parameter(str, "Protocol"), + 'supported': Parameter(bool, "Is the port/protocol supported by PLC") + } + + def validate_port(self, port): + # make sure port is not blank + + if not port: + raise PLCInvalidArgument, "Port must be specified" + + return port + + def validate_protocol(self, protocol): + # make sure port is not blank + if not len(protocol): + raise PLCInvalidArgument, "protocol must be specified" + + return protocol + +class PCUProtocolTypes(Table): + """ + Representation of the pcu_protocol_types table in the database. + """ + + def __init__(self, api, protocol_type_filter = None, columns = None): + Table.__init__(self, api, PCUProtocolType, columns) + + sql = "SELECT %s FROM pcu_protocol_type WHERE True" % \ + ", ".join(self.columns) + + if protocol_type_filter is not None: + if isinstance(protocol_type_filter, (list, tuple, set, int, long)): + protocol_type_filter = Filter(PCUProtocolType.fields, {'pcu_protocol_type_id': protocol_type_filter}) + sql += " AND (%s) %s" % protocol_type_filter.sql(api, "OR") + elif isinstance(protocol_type_filter, dict): + protocol_type_filter = Filter(PCUProtocolType.fields, protocol_type_filter) + sql += " AND (%s) %s" % protocol_type_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong pcu_protocol_type filter %r"%protocol_type_filter + + + self.selectall(sql) diff --git a/PLC/PCUTypes.py b/PLC/PCUTypes.py new file mode 100644 index 0000000..78a3353 --- /dev/null +++ b/PLC/PCUTypes.py @@ -0,0 +1,103 @@ +# +# Functions for interacting with the pcu_types table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table +from PLC.Filter import Filter + +class PCUType(Row): + """ + Representation of a row in the pcu_types table. To use, + instantiate with a dict of values. + """ + + table_name = 'pcu_types' + primary_key = 'pcu_type_id' + join_tables = ['pcu_protocol_type'] + fields = { + 'pcu_type_id': Parameter(int, "PCU Type Identifier"), + 'model': Parameter(str, "PCU model", max = 254), + 'name': Parameter(str, "PCU full name", max = 254), + 'pcu_protocol_type_ids': Parameter([int], "PCU Protocol Type Identifiers"), + 'pcu_protocol_types': Parameter([dict], "PCU Protocol Type List") + } + + def validate_model(self, model): + # Make sure name is not blank + if not len(model): + raise PLCInvalidArgument, "Model must be specified" + + # Make sure boot state does not alredy exist + conflicts = PCUTypes(self.api, [model]) + for pcu_type in conflicts: + if 'pcu_type_id' not in self or self['pcu_type_id'] != pcu_type['pcu_type_id']: + raise PLCInvalidArgument, "Model already in use" + + return model + +class PCUTypes(Table): + """ + Representation of the pcu_types table in the database. + """ + + def __init__(self, api, pcu_type_filter = None, columns = None): + + # Remove pcu_protocol_types from query since its not really a field + # in the db. We will add it later + if columns == None: + columns = PCUType.fields.keys() + if 'pcu_protocol_types' in columns: + removed_fields = ['pcu_protocol_types'] + columns.remove('pcu_protocol_types') + else: + removed_fields = [] + + Table.__init__(self, api, PCUType, columns) + + sql = "SELECT %s FROM view_pcu_types WHERE True" % \ + ", ".join(self.columns) + + if pcu_type_filter is not None: + if isinstance(pcu_type_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), pcu_type_filter) + strs = filter(lambda x: isinstance(x, StringTypes), pcu_type_filter) + pcu_type_filter = Filter(PCUType.fields, {'pcu_type_id': ints, 'model': strs}) + sql += " AND (%s) %s" % pcu_type_filter.sql(api, "OR") + elif isinstance(pcu_type_filter, dict): + pcu_type_filter = Filter(PCUType.fields, pcu_type_filter) + sql += " AND (%s) %s" % pcu_type_filter.sql(api, "AND") + elif isinstance (pcu_type_filter, StringTypes): + pcu_type_filter = Filter(PCUType.fields, {'model':pcu_type_filter}) + sql += " AND (%s) %s" % pcu_type_filter.sql(api, "AND") + elif isinstance (pcu_type_filter, int): + pcu_type_filter = Filter(PCUType.fields, {'pcu_type_id':pcu_type_filter}) + sql += " AND (%s) %s" % pcu_type_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong pcu_type filter %r"%pcu_type_filter + + + self.selectall(sql) + + # return a list of protocol type objects for each port type + if 'pcu_protocol_types' in removed_fields: + from PLC.PCUProtocolTypes import PCUProtocolTypes + protocol_type_ids = set() + for pcu_type in self: + protocol_type_ids.update(pcu_type['pcu_protocol_type_ids']) + + protocol_return_fields = ['pcu_protocol_type_id', 'port', 'protocol', 'supported'] + all_protocol_types = PCUProtocolTypes(self.api, list(protocol_type_ids), \ + protocol_return_fields).dict('pcu_protocol_type_id') + + for pcu_type in self: + pcu_type['pcu_protocol_types'] = [] + for protocol_type_id in pcu_type['pcu_protocol_type_ids']: + pcu_type['pcu_protocol_types'].append(all_protocol_types[protocol_type_id]) diff --git a/PLC/PCUs.py b/PLC/PCUs.py new file mode 100644 index 0000000..d628677 --- /dev/null +++ b/PLC/PCUs.py @@ -0,0 +1,134 @@ +# +# Functions for interacting with the pcus table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.Interfaces import valid_ip, Interface, Interfaces +from PLC.Nodes import Node, Nodes + +class PCU(Row): + """ + Representation of a row in the pcus table. To use, + instantiate with a dict of values. + """ + + table_name = 'pcus' + primary_key = 'pcu_id' + join_tables = ['pcu_node'] + fields = { + 'pcu_id': Parameter(int, "PCU identifier"), + 'site_id': Parameter(int, "Identifier of site where PCU is located"), + 'hostname': Parameter(str, "PCU hostname", max = 254), + 'ip': Parameter(str, "PCU IP address", max = 254), + 'protocol': Parameter(str, "PCU protocol, e.g. ssh, https, telnet", max = 16, nullok = True), + 'username': Parameter(str, "PCU username", max = 254, nullok = True), + 'password': Parameter(str, "PCU username", max = 254, nullok = True), + 'notes': Parameter(str, "Miscellaneous notes", max = 254, nullok = True), + 'model': Parameter(str, "PCU model string", max = 32, nullok = True), + 'node_ids': Parameter([int], "List of nodes that this PCU controls"), + 'ports': Parameter([int], "List of the port numbers that each node is connected to"), + 'last_updated': Parameter(int, "Date and time when node entry was created", ro = True), + } + + def validate_ip(self, ip): + if not valid_ip(ip): + raise PLCInvalidArgument, "Invalid IP address " + ip + return ip + + validate_last_updated = Row.validate_timestamp + + def update_timestamp(self, col_name, commit = True): + """ + Update col_name field with current time + """ + + assert 'pcu_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET %s = CURRENT_TIMESTAMP " % (self.table_name, col_name) + \ + " where pcu_id = %d" % (self['pcu_id']) ) + self.sync(commit) + + def update_last_updated(self, commit = True): + self.update_timestamp('last_updated', commit) + + def add_node(self, node, port, commit = True): + """ + Add node to existing PCU. + """ + + assert 'pcu_id' in self + assert isinstance(node, Node) + assert isinstance(port, (int, long)) + assert 'node_id' in node + + pcu_id = self['pcu_id'] + node_id = node['node_id'] + + if node_id not in self['node_ids'] and port not in self['ports']: + self.api.db.do("INSERT INTO pcu_node (pcu_id, node_id, port)" \ + " VALUES(%(pcu_id)d, %(node_id)d, %(port)d)", + locals()) + + if commit: + self.api.db.commit() + + self['node_ids'].append(node_id) + self['ports'].append(port) + + def remove_node(self, node, commit = True): + """ + Remove node from existing PCU. + """ + + assert 'pcu_id' in self + assert isinstance(node, Node) + assert 'node_id' in node + + pcu_id = self['pcu_id'] + node_id = node['node_id'] + + if node_id in self['node_ids']: + i = self['node_ids'].index(node_id) + port = self['ports'][i] + + self.api.db.do("DELETE FROM pcu_node" \ + " WHERE pcu_id = %(pcu_id)d" \ + " AND node_id = %(node_id)d", + locals()) + + if commit: + self.api.db.commit() + + self['node_ids'].remove(node_id) + self['ports'].remove(port) + +class PCUs(Table): + """ + Representation of row(s) from the pcus table in the + database. + """ + + def __init__(self, api, pcu_filter = None, columns = None): + Table.__init__(self, api, PCU, columns) + + sql = "SELECT %s FROM view_pcus WHERE True" % \ + ", ".join(self.columns) + + if pcu_filter is not None: + if isinstance(pcu_filter, (list, tuple, set, int, long)): + pcu_filter = Filter(PCU.fields, {'pcu_id': pcu_filter}) + elif isinstance(pcu_filter, dict): + pcu_filter = Filter(PCU.fields, pcu_filter) + else: + raise PLCInvalidArgument, "Wrong pcu filter %r"%pcu_filter + sql += " AND (%s) %s" % pcu_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/POD.py b/PLC/POD.py new file mode 100644 index 0000000..41227d2 --- /dev/null +++ b/PLC/POD.py @@ -0,0 +1,90 @@ +# Marc E. Fiuczynski +# Copyright (C) 2004 The Trustees of Princeton University +# +# Client ping of death program for both udp & icmp +# +# modified for inclusion by api by Aaron K + +import struct +import os +import array +import getopt +from socket import * + +UPOD_PORT = 664 + +def _in_cksum(packet): + """THE RFC792 states: 'The 16 bit one's complement of + the one's complement sum of all 16 bit words in the header.' + Generates a checksum of a (ICMP) packet. Based on in_chksum found + in ping.c on FreeBSD. + """ + + # add byte if not dividable by 2 + if len(packet) & 1: + packet = packet + '\0' + + # split into 16-bit word and insert into a binary array + words = array.array('h', packet) + sum = 0 + + # perform ones complement arithmetic on 16-bit words + for word in words: + sum += (word & 0xffff) + + hi = sum >> 16 + lo = sum & 0xffff + sum = hi + lo + sum = sum + (sum >> 16) + + return (~sum) & 0xffff # return ones complement + +def _construct(id, data): + """Constructs a ICMP IPOD packet + """ + ICMP_TYPE = 6 # ping of death code used by PLK + ICMP_CODE = 0 + ICMP_CHECKSUM = 0 + ICMP_ID = 0 + ICMP_SEQ_NR = 0 + + header = struct.pack('bbHHh', ICMP_TYPE, ICMP_CODE, ICMP_CHECKSUM, \ + ICMP_ID, ICMP_SEQ_NR+id) + + packet = header + data # ping packet without checksum + checksum = _in_cksum(packet) # make checksum + + # construct header with correct checksum + header = struct.pack('bbHHh', ICMP_TYPE, ICMP_CODE, checksum, ICMP_ID, \ + ICMP_SEQ_NR+id) + + # ping packet *with* checksum + packet = header + data + + # a perfectly formatted ICMP echo packet + return packet + +def icmp_pod(host,key): + uid = os.getuid() + if uid <> 0: + print "must be root to send icmp pod" + return + + s = socket(AF_INET, SOCK_RAW, getprotobyname("icmp")) + packet = _construct(0, key) # make a ping packet + addr = (host,1) + print 'pod sending icmp-based reboot request to %s' % host + for i in range(1,10): + s.sendto(packet, addr) + +def udp_pod(host,key,fromaddr=('', 0)): + addr = host, UPOD_PORT + s = socket(AF_INET, SOCK_DGRAM) + s.bind(fromaddr) + packet = key + print 'pod sending udp-based reboot request to %s' % host + for i in range(1,10): + s.sendto(packet, addr) + +def noop_pod(host,key): + pass diff --git a/PLC/Parameter.py b/PLC/Parameter.py new file mode 100644 index 0000000..6268fce --- /dev/null +++ b/PLC/Parameter.py @@ -0,0 +1,102 @@ +# +# Shared type definitions +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import * +from PLC.Faults import * + +class Parameter: + """ + Typed value wrapper. Use in accepts and returns to document method + parameters. Set the optional and default attributes for + sub-parameters (i.e., dict fields). + """ + + def __init__(self, typeval, doc = "", + min = None, max = None, + optional = None, + ro = False, + nullok = False): + # Basic type of the parameter. Must be a builtin type + # that can be marshalled by XML-RPC. + self.type = typeval + + # Documentation string for the parameter + self.doc = doc + + # Basic value checking. For numeric types, the minimum and + # maximum possible values, inclusive. For string types, the + # minimum and maximum possible UTF-8 encoded byte lengths. + self.min = min + self.max = max + + # Whether the sub-parameter is optional or not. If None, + # unknown whether it is optional. + self.optional = optional + + # Whether the DB field is read-only. + self.ro = ro + + # Whether the DB field can be NULL. + self.nullok = nullok + + def type(self): + return self.type + + def __repr__(self): + return repr(self.type) + +class Mixed(tuple): + """ + A list (technically, a tuple) of types. Use in accepts and returns + to document method parameters that may return mixed types. + """ + + def __new__(cls, *types): + return tuple.__new__(cls, types) + +def python_type(arg): + """ + Returns the Python type of the specified argument, which may be a + Python type, a typed value, or a Parameter. + """ + + if isinstance(arg, Parameter): + arg = arg.type + + if isinstance(arg, type): + return arg + else: + return type(arg) + +def xmlrpc_type(arg): + """ + Returns the XML-RPC type of the specified argument, which may be a + Python type, a typed value, or a Parameter. + """ + + arg_type = python_type(arg) + + if arg_type == NoneType: + return "nil" + elif arg_type == IntType or arg_type == LongType: + return "int" + elif arg_type == bool: + return "boolean" + elif arg_type == FloatType: + return "double" + elif arg_type in StringTypes: + return "string" + elif arg_type == ListType or arg_type == TupleType: + return "array" + elif arg_type == DictType: + return "struct" + elif arg_type == Mixed: + # Not really an XML-RPC type but return "mixed" for + # documentation purposes. + return "mixed" + else: + raise PLCAPIError, "XML-RPC cannot marshal %s objects" % arg_type diff --git a/PLC/Peers.py b/PLC/Peers.py new file mode 100644 index 0000000..c272a2a --- /dev/null +++ b/PLC/Peers.py @@ -0,0 +1,309 @@ +# +# Thierry Parmentelat - INRIA +# + +import re +from types import StringTypes +import traceback +from urlparse import urlparse + +import PLC.Auth +from PLC.Logger import logger +from PLC.Faults import * +from PLC.Namespace import hostname_to_hrn +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Sites import Site, Sites +from PLC.Persons import Person, Persons +from PLC.Keys import Key, Keys +from PLC.Nodes import Node, Nodes +from PLC.TagTypes import TagType, TagTypes +from PLC.NodeTags import NodeTag, NodeTags +from PLC.SliceTags import SliceTag, SliceTags +from PLC.Slices import Slice, Slices + +class Peer(Row): + """ + Stores the list of peering PLCs in the peers table. + See the Row class for more details + """ + + table_name = 'peers' + primary_key = 'peer_id' + join_tables = ['peer_site', 'peer_person', 'peer_key', 'peer_node', 'peer_slice'] + fields = { + 'peer_id': Parameter (int, "Peer identifier"), + 'peername': Parameter (str, "Peer name"), + 'peer_url': Parameter (str, "Peer API URL"), + 'key': Parameter(str, "Peer GPG public key"), + 'cacert': Parameter(str, "Peer SSL public certificate"), + 'shortname' : Parameter(str, "Peer short name"), + 'hrn_root' : Parameter(str, "Root of this peer in a hierarchical naming space"), + ### cross refs + 'site_ids': Parameter([int], "List of sites for which this peer is authoritative"), + 'person_ids': Parameter([int], "List of users for which this peer is authoritative"), + 'key_ids': Parameter([int], "List of keys for which this peer is authoritative"), + 'node_ids': Parameter([int], "List of nodes for which this peer is authoritative"), + 'slice_ids': Parameter([int], "List of slices for which this peer is authoritative"), + } + + def validate_peername(self, peername): + if not len(peername): + raise PLCInvalidArgument, "Peer name must be specified" + + conflicts = Peers(self.api, [peername]) + for peer in conflicts: + if 'peer_id' not in self or self['peer_id'] != peer['peer_id']: + raise PLCInvalidArgument, "Peer name already in use" + + return peername + + def validate_peer_url(self, url): + """ + Validate URL. Must be HTTPS. + """ + + (scheme, netloc, path, params, query, fragment) = urlparse(url) + if scheme != "https": + raise PLCInvalidArgument, "Peer URL scheme must be https" + if path[-1] != '/': + raise PLCInvalidArgument, "Peer URL should end with /" + + return url + + def delete(self, commit = True): + """ + Deletes this peer and all related entities. + """ + + assert 'peer_id' in self + + # Remove all related entities + for obj in \ + Slices(self.api, self['slice_ids']) + \ + Keys(self.api, self['key_ids']) + \ + Persons(self.api, self['person_ids']) + \ + Nodes(self.api, self['node_ids']) + \ + Sites(self.api, self['site_ids']): + assert obj['peer_id'] == self['peer_id'] + obj.delete(commit = False) + + # Mark as deleted + self['deleted'] = True + self.sync(commit) + + def add_site(self, site, peer_site_id, commit = True): + """ + Associate a local site entry with this peer. + """ + + add = Row.add_object(Site, 'peer_site') + add(self, site, + {'peer_id': self['peer_id'], + 'site_id': site['site_id'], + 'peer_site_id': peer_site_id}, + commit = commit) + + def remove_site(self, site, commit = True): + """ + Unassociate a site with this peer. + """ + + remove = Row.remove_object(Site, 'peer_site') + remove(self, site, commit) + + def add_person(self, person, peer_person_id, commit = True): + """ + Associate a local user entry with this peer. + """ + + add = Row.add_object(Person, 'peer_person') + add(self, person, + {'peer_id': self['peer_id'], + 'person_id': person['person_id'], + 'peer_person_id': peer_person_id}, + commit = commit) + + def remove_person(self, person, commit = True): + """ + Unassociate a site with this peer. + """ + + remove = Row.remove_object(Person, 'peer_person') + remove(self, person, commit) + + def add_key(self, key, peer_key_id, commit = True): + """ + Associate a local key entry with this peer. + """ + + add = Row.add_object(Key, 'peer_key') + add(self, key, + {'peer_id': self['peer_id'], + 'key_id': key['key_id'], + 'peer_key_id': peer_key_id}, + commit = commit) + + def remove_key(self, key, commit = True): + """ + Unassociate a key with this peer. + """ + + remove = Row.remove_object(Key, 'peer_key') + remove(self, key, commit) + + def add_node(self, node, peer_node_id, commit = True): + """ + Associate a local node entry with this peer. + """ + + add = Row.add_object(Node, 'peer_node') + add(self, node, + {'peer_id': self['peer_id'], + 'node_id': node['node_id'], + 'peer_node_id': peer_node_id}, + commit = commit) + + sites = Sites(self.api, node['site_id'], ['login_base']) + site = sites[0] + login_base = site['login_base'] + try: + # attempt to manually update the 'hrn' tag with the remote prefix + hrn_root = self['hrn_root'] + hrn = hostname_to_hrn(hrn_root, login_base, node['hostname']) + tags = {'hrn': hrn} + Node(self.api, node).update_tags(tags) + except: + logger.exception("Could not find out hrn on hostname=%s"%node['hostname']) + + def remove_node(self, node, commit = True): + """ + Unassociate a node with this peer. + """ + + remove = Row.remove_object(Node, 'peer_node') + remove(self, node, commit) + # attempt to manually update the 'hrn' tag now that the node is local + root_auth = self.api.config.PLC_HRN_ROOT + sites = Sites(self.api, node['site_id'], ['login_base']) + site = sites[0] + login_base = site['login_base'] + hrn = hostname_to_hrn(root_auth, login_base, node['hostname']) + tags = {'hrn': hrn} + Node(self.api, node).update_tags(tags) + + def add_slice(self, slice, peer_slice_id, commit = True): + """ + Associate a local slice entry with this peer. + """ + + add = Row.add_object(Slice, 'peer_slice') + add(self, slice, + {'peer_id': self['peer_id'], + 'slice_id': slice['slice_id'], + 'peer_slice_id': peer_slice_id}, + commit = commit) + + def remove_slice(self, slice, commit = True): + """ + Unassociate a slice with this peer. + """ + + remove = Row.remove_object(Slice, 'peer_slice') + remove(self, slice, commit) + + def connect(self, **kwds): + """ + Connect to this peer via XML-RPC. + """ + + import xmlrpclib + from PLC.PyCurl import PyCurlTransport + self.server = xmlrpclib.ServerProxy(self['peer_url'], + PyCurlTransport(self['peer_url'], self['cacert']), + allow_none = 1, **kwds) + + def add_auth(self, function, methodname, **kwds): + """ + Sign the specified XML-RPC call and add an auth struct as the + first argument of the call. + """ + + def wrapper(*args, **kwds): + from PLC.GPG import gpg_sign + signature = gpg_sign(args, + self.api.config.PLC_ROOT_GPG_KEY, + self.api.config.PLC_ROOT_GPG_KEY_PUB, + methodname) + + auth = {'AuthMethod': "gpg", + 'name': self.api.config.PLC_NAME, + 'signature': signature} + + # Automagically add auth struct to every call + args = (auth,) + args + + return function(*args) + + return wrapper + + def __getattr__(self, attr): + """ + Returns a callable API function if attr is the name of a + PLCAPI function; otherwise, returns the specified attribute. + """ + + try: + # Figure out if the specified attribute is the name of a + # PLCAPI function. If so and the function requires an + # authentication structure as its first argument, return a + # callable that automagically adds an auth struct to the + # call. + methodname = attr + api_function = self.api.callable(methodname) + if api_function.accepts and \ + (isinstance(api_function.accepts[0], PLC.Auth.Auth) or \ + (isinstance(api_function.accepts[0], Mixed) and \ + filter(lambda param: isinstance(param, Auth), api_function.accepts[0]))): + function = getattr(self.server, methodname) + return self.add_auth(function, methodname) + except Exception, err: + pass + + if hasattr(self, attr): + return getattr(self, attr) + else: + raise AttributeError, "type object 'Peer' has no attribute '%s'" % attr + +class Peers (Table): + """ + Maps to the peers table in the database + """ + + def __init__ (self, api, peer_filter = None, columns = None): + Table.__init__(self, api, Peer, columns) + + sql = "SELECT %s FROM view_peers WHERE deleted IS False" % \ + ", ".join(self.columns) + + if peer_filter is not None: + if isinstance(peer_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), peer_filter) + strs = filter(lambda x: isinstance(x, StringTypes), peer_filter) + peer_filter = Filter(Peer.fields, {'peer_id': ints, 'peername': strs}) + sql += " AND (%s) %s" % peer_filter.sql(api, "OR") + elif isinstance(peer_filter, dict): + peer_filter = Filter(Peer.fields, peer_filter) + sql += " AND (%s) %s" % peer_filter.sql(api, "AND") + elif isinstance(peer_filter, (int, long)): + peer_filter = Filter(Peer.fields, {'peer_id': peer_filter}) + sql += " AND (%s) %s" % peer_filter.sql(api, "AND") + elif isinstance(peer_filter, StringTypes): + peer_filter = Filter(Peer.fields, {'peername': peer_filter}) + sql += " AND (%s) %s" % peer_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong peer filter %r"%peer_filter + + self.selectall(sql) diff --git a/PLC/PersonTags.py b/PLC/PersonTags.py new file mode 100644 index 0000000..9327abf --- /dev/null +++ b/PLC/PersonTags.py @@ -0,0 +1,54 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.TagTypes import TagType, TagTypes +from PLC.Persons import Person + +class PersonTag(Row): + """ + Representation of a row in the person_tag. + To use, instantiate with a dict of values. + """ + + table_name = 'person_tag' + primary_key = 'person_tag_id' + fields = { + 'person_tag_id': Parameter(int, "Person setting identifier"), + 'person_id': Person.fields['person_id'], + 'email': Person.fields['email'], + 'tag_type_id': TagType.fields['tag_type_id'], + 'tagname': TagType.fields['tagname'], + 'description': TagType.fields['description'], + 'category': TagType.fields['category'], + 'value': Parameter(str, "Person setting value"), + ### relations + + } + +class PersonTags(Table): + """ + Representation of row(s) from the person_tag table in the + database. + """ + + def __init__(self, api, person_tag_filter = None, columns = None): + Table.__init__(self, api, PersonTag, columns) + + sql = "SELECT %s FROM view_person_tags WHERE True" % \ + ", ".join(self.columns) + + if person_tag_filter is not None: + if isinstance(person_tag_filter, (list, tuple, set, int, long)): + person_tag_filter = Filter(PersonTag.fields, {'person_tag_id': person_tag_filter}) + elif isinstance(person_tag_filter, dict): + person_tag_filter = Filter(PersonTag.fields, person_tag_filter) + else: + raise PLCInvalidArgument, "Wrong person setting filter %r"%person_tag_filter + sql += " AND (%s) %s" % person_tag_filter.sql(api) + + + self.selectall(sql) diff --git a/PLC/Persons.py b/PLC/Persons.py new file mode 100644 index 0000000..8cd2856 --- /dev/null +++ b/PLC/Persons.py @@ -0,0 +1,407 @@ +# +# Functions for interacting with the persons table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import time +from random import Random +import re +import crypt + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Roles import Role, Roles +from PLC.Keys import Key, Keys +from PLC.Messages import Message, Messages + +class Person(Row): + """ + Representation of a row in the persons table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'persons' + primary_key = 'person_id' + join_tables = ['person_key', 'person_role', 'person_site', 'slice_person', 'person_session', 'peer_person'] + fields = { + 'person_id': Parameter(int, "User identifier"), + 'first_name': Parameter(str, "Given name", max = 128), + 'last_name': Parameter(str, "Surname", max = 128), + 'title': Parameter(str, "Title", max = 128, nullok = True), + 'email': Parameter(str, "Primary e-mail address", max = 254), + 'phone': Parameter(str, "Telephone number", max = 64, nullok = True), + 'url': Parameter(str, "Home page", max = 254, nullok = True), + 'bio': Parameter(str, "Biography", max = 254, nullok = True), + 'enabled': Parameter(bool, "Has been enabled"), + 'password': Parameter(str, "Account password in crypt() form", max = 254), + 'verification_key': Parameter(str, "Reset password key", max = 254, nullok = True), + 'verification_expires': Parameter(int, "Date and time when verification_key expires", nullok = True), + 'last_updated': Parameter(int, "Date and time of last update", ro = True), + 'date_created': Parameter(int, "Date and time when account was created", ro = True), + 'role_ids': Parameter([int], "List of role identifiers"), + 'roles': Parameter([str], "List of roles"), + 'site_ids': Parameter([int], "List of site identifiers"), + 'key_ids': Parameter([int], "List of key identifiers"), + 'slice_ids': Parameter([int], "List of slice identifiers"), + 'peer_id': Parameter(int, "Peer to which this user belongs", nullok = True), + 'peer_person_id': Parameter(int, "Foreign user identifier at peer", nullok = True), + 'person_tag_ids' : Parameter ([int], "List of tags attached to this person"), + } + related_fields = { + 'roles': [Mixed(Parameter(int, "Role identifier"), + Parameter(str, "Role name"))], + 'sites': [Mixed(Parameter(int, "Site identifier"), + Parameter(str, "Site name"))], + 'keys': [Mixed(Parameter(int, "Key identifier"), + Filter(Key.fields))], + 'slices': [Mixed(Parameter(int, "Slice identifier"), + Parameter(str, "Slice name"))] + } + view_tags_name = "view_person_tags" + # tags are used by the Add/Get/Update methods to expose tags + # this is initialized here and updated by the accessors factory + tags = { } + + def validate_email(self, email): + """ + Validate email address. Stolen from Mailman. + """ + email = email.lower() + invalid_email = PLCInvalidArgument("Invalid e-mail address %s"%email) + + if not email: + raise invalid_email + + email_re = re.compile('\A[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9._\-]+\.[a-zA-Z]+\Z') + if not email_re.match(email): + raise invalid_email + + # check only against users on the same peer + if 'peer_id' in self: + namespace_peer_id = self['peer_id'] + else: + namespace_peer_id = None + + conflicts = Persons(self.api, {'email':email,'peer_id':namespace_peer_id}) + + for person in conflicts: + if 'person_id' not in self or self['person_id'] != person['person_id']: + raise PLCInvalidArgument, "E-mail address already in use" + + return email + + def validate_password(self, password): + """ + Encrypt password if necessary before committing to the + database. + """ + + magic = "$1$" + + if len(password) > len(magic) and \ + password[0:len(magic)] == magic: + return password + else: + # Generate a somewhat unique 8 character salt string + salt = str(time.time()) + str(Random().random()) + salt = md5(salt).hexdigest()[:8] + return crypt.crypt(password.encode(self.api.encoding), magic + salt + "$") + + validate_date_created = Row.validate_timestamp + validate_last_updated = Row.validate_timestamp + validate_verification_expires = Row.validate_timestamp + + def can_update(self, person): + """ + Returns true if we can update the specified person. We can + update a person if: + + 1. We are the person. + 2. We are an admin. + 3. We are a PI and the person is a user or tech or at + one of our sites. + """ + + assert isinstance(person, Person) + + if self['person_id'] == person['person_id']: + return True + + if 'admin' in self['roles']: + return True + + if 'pi' in self['roles']: + if set(self['site_ids']).intersection(person['site_ids']): + # non-admin users cannot update a person who is neither a PI or ADMIN + return (not set(['pi','admin']).intersection(person['roles'])) + + return False + + def can_view(self, person): + """ + Returns true if we can view the specified person. We can + view a person if: + + 1. We are the person. + 2. We are an admin. + 3. We are a PI or Tech and the person is at one of our sites. + """ + + assert isinstance(person, Person) + + if self.can_update(person): + return True + + # pis and techs can see all people on their site + if set(['pi','tech']).intersection(self['roles']): + if set(self['site_ids']).intersection(person['site_ids']): + return True + + return False + + add_role = Row.add_object(Role, 'person_role') + remove_role = Row.remove_object(Role, 'person_role') + + add_key = Row.add_object(Key, 'person_key') + remove_key = Row.remove_object(Key, 'person_key') + + def set_primary_site(self, site, commit = True): + """ + Set the primary site for an existing user. + """ + + assert 'person_id' in self + assert 'site_id' in site + + person_id = self['person_id'] + site_id = site['site_id'] + self.api.db.do("UPDATE person_site SET is_primary = False" \ + " WHERE person_id = %(person_id)d", + locals()) + self.api.db.do("UPDATE person_site SET is_primary = True" \ + " WHERE person_id = %(person_id)d" \ + " AND site_id = %(site_id)d", + locals()) + + if commit: + self.api.db.commit() + + assert 'site_ids' in self + assert site_id in self['site_ids'] + + # Make sure that the primary site is first in the list + self['site_ids'].remove(site_id) + self['site_ids'].insert(0, site_id) + + def update_last_updated(self, commit = True): + """ + Update last_updated field with current time + """ + + assert 'person_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET last_updated = CURRENT_TIMESTAMP " % (self.table_name) + \ + " where person_id = %d" % (self['person_id']) ) + self.sync(commit) + + def associate_roles(self, auth, field, value): + """ + Adds roles found in value list to this person (using AddRoleToPerson). + Deletes roles not found in value list from this person (using DeleteRoleFromPerson). + """ + + assert 'role_ids' in self + assert 'person_id' in self + assert isinstance(value, list) + + (role_ids, role_names) = self.separate_types(value)[0:2] + + # Translate roles into role_ids + if role_names: + roles = Roles(self.api, role_names).dict('role_id') + role_ids += roles.keys() + + # Add new ids, remove stale ids + if self['role_ids'] != role_ids: + from PLC.Methods.AddRoleToPerson import AddRoleToPerson + from PLC.Methods.DeleteRoleFromPerson import DeleteRoleFromPerson + new_roles = set(role_ids).difference(self['role_ids']) + stale_roles = set(self['role_ids']).difference(role_ids) + + for new_role in new_roles: + AddRoleToPerson.__call__(AddRoleToPerson(self.api), auth, new_role, self['person_id']) + for stale_role in stale_roles: + DeleteRoleFromPerson.__call__(DeleteRoleFromPerson(self.api), auth, stale_role, self['person_id']) + + + def associate_sites(self, auth, field, value): + """ + Adds person to sites found in value list (using AddPersonToSite). + Deletes person from site not found in value list (using DeletePersonFromSite). + """ + + from PLC.Sites import Sites + + assert 'site_ids' in self + assert 'person_id' in self + assert isinstance(value, list) + + (site_ids, site_names) = self.separate_types(value)[0:2] + + # Translate roles into role_ids + if site_names: + sites = Sites(self.api, site_names, ['site_id']).dict('site_id') + site_ids += sites.keys() + + # Add new ids, remove stale ids + if self['site_ids'] != site_ids: + from PLC.Methods.AddPersonToSite import AddPersonToSite + from PLC.Methods.DeletePersonFromSite import DeletePersonFromSite + new_sites = set(site_ids).difference(self['site_ids']) + stale_sites = set(self['site_ids']).difference(site_ids) + + for new_site in new_sites: + AddPersonToSite.__call__(AddPersonToSite(self.api), auth, self['person_id'], new_site) + for stale_site in stale_sites: + DeletePersonFromSite.__call__(DeletePersonFromSite(self.api), auth, self['person_id'], stale_site) + + + def associate_keys(self, auth, field, value): + """ + Deletes key_ids not found in value list (using DeleteKey). + Adds key if key_fields w/o key_id is found (using AddPersonKey). + Updates key if key_fields w/ key_id is found (using UpdateKey). + """ + assert 'key_ids' in self + assert 'person_id' in self + assert isinstance(value, list) + + (key_ids, blank, keys) = self.separate_types(value) + + if self['key_ids'] != key_ids: + from PLC.Methods.DeleteKey import DeleteKey + stale_keys = set(self['key_ids']).difference(key_ids) + + for stale_key in stale_keys: + DeleteKey.__call__(DeleteKey(self.api), auth, stale_key) + + if keys: + from PLC.Methods.AddPersonKey import AddPersonKey + from PLC.Methods.UpdateKey import UpdateKey + updated_keys = filter(lambda key: 'key_id' in key, keys) + added_keys = filter(lambda key: 'key_id' not in key, keys) + + for key in added_keys: + AddPersonKey.__call__(AddPersonKey(self.api), auth, self['person_id'], key) + for key in updated_keys: + key_id = key.pop('key_id') + UpdateKey.__call__(UpdateKey(self.api), auth, key_id, key) + + + def associate_slices(self, auth, field, value): + """ + Adds person to slices found in value list (using AddPersonToSlice). + Deletes person from slices found in value list (using DeletePersonFromSlice). + """ + + from PLC.Slices import Slices + + assert 'slice_ids' in self + assert 'person_id' in self + assert isinstance(value, list) + + (slice_ids, slice_names) = self.separate_types(value)[0:2] + + # Translate roles into role_ids + if slice_names: + slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id') + slice_ids += slices.keys() + + # Add new ids, remove stale ids + if self['slice_ids'] != slice_ids: + from PLC.Methods.AddPersonToSlice import AddPersonToSlice + from PLC.Methods.DeletePersonFromSlice import DeletePersonFromSlice + new_slices = set(slice_ids).difference(self['slice_ids']) + stale_slices = set(self['slice_ids']).difference(slice_ids) + + for new_slice in new_slices: + AddPersonToSlice.__call__(AddPersonToSlice(self.api), auth, self['person_id'], new_slice) + for stale_slice in stale_slices: + DeletePersonFromSlice.__call__(DeletePersonFromSlice(self.api), auth, self['person_id'], stale_slice) + + + def delete(self, commit = True): + """ + Delete existing user. + """ + + # Delete all keys + keys = Keys(self.api, self['key_ids']) + for key in keys: + key.delete(commit = False) + + # Clean up miscellaneous join tables + for table in self.join_tables: + self.api.db.do("DELETE FROM %s WHERE person_id = %d" % \ + (table, self['person_id'])) + + # Mark as deleted + self['deleted'] = True + + # delete will fail if timestamp fields aren't validated, so lets remove them + for field in ['verification_expires', 'date_created', 'last_updated']: + if field in self: + self.pop(field) + + # don't validate, so duplicates can be consistently removed + self.sync(commit, validate=False) + +class Persons(Table): + """ + Representation of row(s) from the persons table in the + database. + """ + + def __init__(self, api, person_filter = None, columns = None): + Table.__init__(self, api, Person, columns) + + view = "view_persons" + for tagname in self.tag_columns: + view= "%s left join %s using (%s)"%(view,Person.tagvalue_view_name(tagname), + Person.primary_key) + + sql = "SELECT %s FROM %s WHERE deleted IS False" % \ + (", ".join(self.columns.keys()+self.tag_columns.keys()),view) + + if person_filter is not None: + if isinstance(person_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), person_filter) + strs = filter(lambda x: isinstance(x, StringTypes), person_filter) + person_filter = Filter(Person.fields, {'person_id': ints, 'email': strs}) + sql += " AND (%s) %s" % person_filter.sql(api, "OR") + elif isinstance(person_filter, dict): + allowed_fields=dict(Person.fields.items()+Person.tags.items()) + person_filter = Filter(allowed_fields, person_filter) + sql += " AND (%s) %s" % person_filter.sql(api, "AND") + elif isinstance (person_filter, StringTypes): + person_filter = Filter(Person.fields, {'email':person_filter}) + sql += " AND (%s) %s" % person_filter.sql(api, "AND") + elif isinstance (person_filter, (int, long)): + person_filter = Filter(Person.fields, {'person_id':person_filter}) + sql += " AND (%s) %s" % person_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong person filter %r"%person_filter + + self.selectall(sql) diff --git a/PLC/PostgreSQL.py b/PLC/PostgreSQL.py new file mode 100644 index 0000000..e04b02b --- /dev/null +++ b/PLC/PostgreSQL.py @@ -0,0 +1,266 @@ +# +# PostgreSQL database interface. +# Sort of like DBI(3) (Database independent interface for Perl). +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import psycopg2 +import psycopg2.extensions +psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) +# UNICODEARRAY not exported yet +psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY) + +import types +from types import StringTypes, NoneType +import traceback +import commands +import re +from pprint import pformat + +from PLC.Logger import logger +from PLC.Debug import profile +from PLC.Faults import * +from datetime import datetime as DateTimeType + +class PostgreSQL: + def __init__(self, api): + self.api = api + self.debug = False +# self.debug = True + self.connection = None + + def cursor(self): + if self.connection is None: + # (Re)initialize database connection + try: + # Try UNIX socket first + self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER, + password = self.api.config.PLC_DB_PASSWORD, + database = self.api.config.PLC_DB_NAME) + except psycopg2.OperationalError: + # Fall back on TCP + self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER, + password = self.api.config.PLC_DB_PASSWORD, + database = self.api.config.PLC_DB_NAME, + host = self.api.config.PLC_DB_HOST, + port = self.api.config.PLC_DB_PORT) + self.connection.set_client_encoding("UNICODE") + + (self.rowcount, self.description, self.lastrowid) = \ + (None, None, None) + + return self.connection.cursor() + + def close(self): + if self.connection is not None: + self.connection.close() + self.connection = None + + @staticmethod + # From pgdb, and simplify code + def _quote(x): + if isinstance(x, DateTimeType): + x = str(x) + elif isinstance(x, unicode): + x = x.encode( 'utf-8' ) + + if isinstance(x, types.StringType): + x = "'%s'" % str(x).replace("\\", "\\\\").replace("'", "''") + elif isinstance(x, (types.IntType, types.LongType, types.FloatType)): + pass + elif x is None: + x = 'NULL' + elif isinstance(x, (types.ListType, types.TupleType, set)): + x = 'ARRAY[%s]' % ', '.join(map(lambda x: str(_quote(x)), x)) + elif hasattr(x, '__pg_repr__'): + x = x.__pg_repr__() + else: + raise PLCDBError, 'Cannot quote type %s' % type(x) + return x + + + def quote(self, value): + """ + Returns quoted version of the specified value. + """ + return PostgreSQL._quote (value) + +# following is an unsuccessful attempt to re-use lib code as much as possible +# def quote(self, value): +# # The pgdb._quote function is good enough for general SQL +# # quoting, except for array types. +# if isinstance (value, (types.ListType, types.TupleType, set)): +# 'ARRAY[%s]' % ', '.join( [ str(self.quote(x)) for x in value ] ) +# else: +# try: +# # up to PyGreSQL-3.x, function was pgdb._quote +# import pgdb +# return pgdb._quote(value) +# except: +# # with PyGreSQL-4.x, use psycopg2's adapt +# from psycopg2.extensions import adapt +# return adapt (value) + + @classmethod + def param(self, name, value): + # None is converted to the unquoted string NULL + if isinstance(value, NoneType): + conversion = "s" + # True and False are also converted to unquoted strings + elif isinstance(value, bool): + conversion = "s" + elif isinstance(value, float): + conversion = "f" + elif not isinstance(value, StringTypes): + conversion = "d" + else: + conversion = "s" + + return '%(' + name + ')' + conversion + + def begin_work(self): + # Implicit in pgdb.connect() + pass + + def commit(self): + self.connection.commit() + + def rollback(self): + self.connection.rollback() + + def do(self, query, params = None): + cursor = self.execute(query, params) + cursor.close() + return self.rowcount + + def next_id(self, table_name, primary_key): + sequence = "%(table_name)s_%(primary_key)s_seq" % locals() + sql = "SELECT nextval('%(sequence)s')" % locals() + rows = self.selectall(sql, hashref = False) + if rows: + return rows[0][0] + + return None + + def last_insert_id(self, table_name, primary_key): + if isinstance(self.lastrowid, int): + sql = "SELECT %s FROM %s WHERE oid = %d" % \ + (primary_key, table_name, self.lastrowid) + rows = self.selectall(sql, hashref = False) + if rows: + return rows[0][0] + + return None + + # modified for psycopg2-2.0.7 + # executemany is undefined for SELECT's + # see http://www.python.org/dev/peps/pep-0249/ + # accepts either None, a single dict, a tuple of single dict - in which case it execute's + # or a tuple of several dicts, in which case it executemany's + def execute(self, query, params = None): + + cursor = self.cursor() + try: + + # psycopg2 requires %()s format for all parameters, + # regardless of type. + # this needs to be done carefully though as with pattern-based filters + # we might have percents embedded in the query + # so e.g. GetPersons({'email':'*fake*'}) was resulting in .. LIKE '%sake%' + if psycopg2: + query = re.sub(r'(%\([^)]*\)|%)[df]', r'\1s', query) + # rewrite wildcards set by Filter.py as '***' into '%' + query = query.replace ('***','%') + + if not params: + if self.debug: + logger.debug('execute0: {}'.format(query)) + cursor.execute(query) + elif isinstance(params, dict): + if self.debug: + logger.debug('execute-dict: params {} query {}' + .format(params, query%params)) + cursor.execute(query, params) + elif isinstance(params,tuple) and len(params)==1: + if self.debug: + logger.debug('execute-tuple {}'.format(query%params[0])) + cursor.execute(query,params[0]) + else: + param_seq=(params,) + if self.debug: + for params in param_seq: + logger.debug('executemany {}'.format(query%params)) + cursor.executemany(query, param_seq) + (self.rowcount, self.description, self.lastrowid) = \ + (cursor.rowcount, cursor.description, cursor.lastrowid) + except Exception, e: + try: + self.rollback() + except: + pass + uuid = commands.getoutput("uuidgen") + message = "Database error {}: - Query {} - Params {}".format(uuid, query, pformat(params)) + logger.exception(message) + raise PLCDBError("Please contact " + \ + self.api.config.PLC_NAME + " Support " + \ + "<" + self.api.config.PLC_MAIL_SUPPORT_ADDRESS + ">" + \ + " and reference " + uuid) + + return cursor + + def selectall(self, query, params = None, hashref = True, key_field = None): + """ + Return each row as a dictionary keyed on field name (like DBI + selectrow_hashref()). If key_field is specified, return rows + as a dictionary keyed on the specified field (like DBI + selectall_hashref()). + + If params is specified, the specified parameters will be bound + to the query. + """ + + cursor = self.execute(query, params) + rows = cursor.fetchall() + cursor.close() + self.commit() + if hashref or key_field is not None: + # Return each row as a dictionary keyed on field name + # (like DBI selectrow_hashref()). + labels = [column[0] for column in self.description] + rows = [dict(zip(labels, row)) for row in rows] + + if key_field is not None and key_field in labels: + # Return rows as a dictionary keyed on the specified field + # (like DBI selectall_hashref()). + return dict([(row[key_field], row) for row in rows]) + else: + return rows + + def fields(self, table, notnull = None, hasdef = None): + """ + Return the names of the fields of the specified table. + """ + + if hasattr(self, 'fields_cache'): + if self.fields_cache.has_key((table, notnull, hasdef)): + return self.fields_cache[(table, notnull, hasdef)] + else: + self.fields_cache = {} + + sql = "SELECT attname FROM pg_attribute, pg_class" \ + " WHERE pg_class.oid = attrelid" \ + " AND attnum > 0 AND relname = %(table)s" + + if notnull is not None: + sql += " AND attnotnull is %(notnull)s" + + if hasdef is not None: + sql += " AND atthasdef is %(hasdef)s" + + rows = self.selectall(sql, locals(), hashref = False) + + self.fields_cache[(table, notnull, hasdef)] = [row[0] for row in rows] + + return self.fields_cache[(table, notnull, hasdef)] diff --git a/PLC/PyCurl.py b/PLC/PyCurl.py new file mode 100644 index 0000000..4ae2fdc --- /dev/null +++ b/PLC/PyCurl.py @@ -0,0 +1,81 @@ +# +# Replacement for xmlrpclib.SafeTransport, which does not validate +# SSL certificates. Requires PyCurl. +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import os +import xmlrpclib +import pycurl +from tempfile import NamedTemporaryFile + +class PyCurlTransport(xmlrpclib.Transport): + def __init__(self, uri, cert = None, timeout = 300): + if hasattr(xmlrpclib.Transport,'__init__'): + xmlrpclib.Transport.__init__(self) + self.curl = pycurl.Curl() + + # Suppress signals + self.curl.setopt(pycurl.NOSIGNAL, 1) + + # Follow redirections + self.curl.setopt(pycurl.FOLLOWLOCATION, 1) + + # Set URL + self.url = uri + self.curl.setopt(pycurl.URL, str(uri)) + + # Set certificate path + if cert is not None: + if os.path.exists(cert): + cert_path = str(cert) + else: + # Keep a reference so that it does not get deleted + self.cert = NamedTemporaryFile(prefix = "cert") + self.cert.write(cert) + self.cert.flush() + cert_path = self.cert.name + self.curl.setopt(pycurl.CAINFO, cert_path) + self.curl.setopt(pycurl.SSL_VERIFYPEER, 2) + + # Set connection timeout + if timeout: + self.curl.setopt(pycurl.CONNECTTIMEOUT, timeout) + self.curl.setopt(pycurl.TIMEOUT, timeout) + + # Set request callback + self.body = "" + def body(buf): + self.body += buf + self.curl.setopt(pycurl.WRITEFUNCTION, body) + + def request(self, host, handler, request_body, verbose = 1): + # Set verbosity + self.curl.setopt(pycurl.VERBOSE, verbose) + + # Post request + self.curl.setopt(pycurl.POST, 1) + self.curl.setopt(pycurl.POSTFIELDS, request_body) + + try: + self.curl.perform() + errcode = self.curl.getinfo(pycurl.HTTP_CODE) + response = self.body + self.body = "" + errmsg="" + except pycurl.error, err: + (errcode, errmsg) = err + + if errcode == 60: + raise Exception, "PyCurl: SSL certificate validation failed" + elif errcode != 200: + raise Exception, "PyCurl: HTTP error %d -- %r" % (errcode,errmsg) + + # Parse response + p, u = self.getparser() + p.feed(response) + p.close() + + return u.close() diff --git a/PLC/Roles.py b/PLC/Roles.py new file mode 100644 index 0000000..fcc05f4 --- /dev/null +++ b/PLC/Roles.py @@ -0,0 +1,78 @@ +# +# Functions for interacting with the roles table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from types import StringTypes +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table + +class Role(Row): + """ + Representation of a row in the roles table. To use, + instantiate with a dict of values. + """ + + table_name = 'roles' + primary_key = 'role_id' + join_tables = ['person_role', 'tag_type_role' ] + fields = { + 'role_id': Parameter(int, "Role identifier"), + 'name': Parameter(str, "Role", max = 100), + } + + def validate_role_id(self, role_id): + # Make sure role does not already exist + conflicts = Roles(self.api, [role_id]) + if conflicts: + raise PLCInvalidArgument, "Role ID already in use" + + return role_id + + def validate_name(self, name): + # Make sure name is not blank + if not len(name): + raise PLCInvalidArgument, "Role must be specified" + + # Make sure role does not already exist + conflicts = Roles(self.api, [name]) + if conflicts: + raise PLCInvalidArgument, "Role name already in use" + + return name + +class Roles(Table): + """ + Representation of the roles table in the database. + """ + + def __init__(self, api, role_filter = None): + Table.__init__(self, api, Role) + + sql = "SELECT %s FROM roles WHERE True" % \ + ", ".join(Role.fields) + + if role_filter is not None: + if isinstance(role_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), role_filter) + strs = filter(lambda x: isinstance(x, StringTypes), role_filter) + role_filter = Filter(Role.fields, {'role_id': ints, 'name': strs}) + sql += " AND (%s) %s" % role_filter.sql(api, "OR") + elif isinstance(role_filter, dict): + role_filter = Filter(Role.fields, role_filter) + sql += " AND (%s) %s" % role_filter.sql(api, "AND") + elif isinstance(role_filter, (int, long)): + role_filter = Filter(Role.fields, {'role_id': role_filter}) + sql += " AND (%s) %s" % role_filter.sql(api, "AND") + elif isinstance(role_filter, StringTypes): + role_filter = Filter(Role.fields, {'name': role_filter}) + sql += " AND (%s) %s" % role_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong role filter %r"%role_filter + + self.selectall(sql) diff --git a/PLC/Sessions.py b/PLC/Sessions.py new file mode 100644 index 0000000..6a03068 --- /dev/null +++ b/PLC/Sessions.py @@ -0,0 +1,99 @@ +from types import StringTypes +import random +import base64 +import time + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.Persons import Person, Persons +from PLC.Nodes import Node, Nodes + +class Session(Row): + """ + Representation of a row in the sessions table. To use, instantiate + with a dict of values. + """ + + table_name = 'sessions' + primary_key = 'session_id' + join_tables = ['person_session', 'node_session'] + fields = { + 'session_id': Parameter(str, "Session key"), + 'person_id': Parameter(int, "Account identifier, if applicable"), + 'node_id': Parameter(int, "Node identifier, if applicable"), + 'expires': Parameter(int, "Date and time when session expires, in seconds since UNIX epoch"), + } + + def validate_expires(self, expires): + if expires < time.time(): + raise PLCInvalidArgument, "Expiration date must be in the future" + + return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(expires)) + + add_person = Row.add_object(Person, 'person_session') + + def add_node(self, node, commit = True): + # Nodes can have only one session at a time + self.api.db.do("DELETE FROM node_session WHERE node_id = %d" % \ + node['node_id']) + + add = Row.add_object(Node, 'node_session') + add(self, node, commit = commit) + + def sync(self, commit = True, insert = None): + if not self.has_key('session_id'): + # Before a new session is added, delete expired sessions + expired = Sessions(self.api, expires = -int(time.time())) + for session in expired: + session.delete(commit) + + # Generate 32 random bytes + bytes = random.sample(xrange(0, 256), 32) + # Base64 encode their string representation + self['session_id'] = base64.b64encode("".join(map(chr, bytes))) + # Force insert + insert = True + + Row.sync(self, commit, insert) + +class Sessions(Table): + """ + Representation of row(s) from the session table in the database. + """ + + def __init__(self, api, session_filter = None, expires = int(time.time())): + Table.__init__(self, api, Session) + + sql = "SELECT %s FROM view_sessions WHERE True" % \ + ", ".join(Session.fields) + + if session_filter is not None: + if isinstance(session_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), session_filter) + strs = filter(lambda x: isinstance(x, StringTypes), session_filter) + session_filter = Filter(Session.fields, {'person_id': ints, 'session_id': strs}) + sql += " AND (%s) %s" % session_filter.sql(api, "OR") + elif isinstance(session_filter, dict): + session_filter = Filter(Session.fields, session_filter) + sql += " AND (%s) %s" % session_filter.sql(api, "AND") + elif isinstance(session_filter, (int, long)): + session_filter = Filter(Session.fields, {'person_id': session_filter}) + sql += " AND (%s) %s" % session_filter.sql(api, "AND") + elif isinstance(session_filter, StringTypes): + session_filter = Filter(Session.fields, {'session_id': session_filter}) + sql += " AND (%s) %s" % session_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong session filter"%session_filter + + if expires is not None: + if expires >= 0: + sql += " AND expires > %(expires)d" + else: + expires = -expires + sql += " AND expires < %(expires)d" + + self.selectall(sql, locals()) diff --git a/PLC/Shell.py b/PLC/Shell.py new file mode 100644 index 0000000..df87e1c --- /dev/null +++ b/PLC/Shell.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# +# Interactive shell for testing PLCAPI +# +# Mark Huang +# Copyright (C) 2005 The Trustees of Princeton University +# + +import os +import pydoc +import xmlrpclib + +from PLC.API import PLCAPI +from PLC.Parameter import Mixed +from PLC.Auth import Auth +from PLC.Config import Config +from PLC.Method import Method +from PLC.PyCurl import PyCurlTransport +import PLC.Methods + +class Callable: + """ + Wrapper to call a method either directly or remotely and + automagically add the authentication structure if necessary. + """ + + def __init__(self, shell, name, func, auth = None): + self.shell = shell + self.name = name + self.func = func + self.auth = auth + + def __call__(self, *args, **kwds): + """ + Automagically add the authentication structure if the function + requires it and it has not been specified. + """ + + if self.auth and \ + (not args or not isinstance(args[0], dict) or \ + (not args[0].has_key('AuthMethod') and \ + not args[0].has_key('session'))): + args = (self.auth,) + args + + if self.shell.multi: + self.shell.calls.append({'methodName': self.name, 'params': list(args)}) + return None + else: + return self.func(*args, **kwds) + +class Shell: + def __init__(self, + # Add API functions to global scope + globals = None, + # Configuration file + config = None, + # XML-RPC server + url = None, xmlrpc = False, cacert = None, + # API authentication method + method = None, + # Password authentication + role = None, user = None, password = None, + # Session authentication + session = None): + """ + Initialize a new shell instance. Re-initializes globals. + """ + + try: + # If any XML-RPC options have been specified, do not try + # connecting directly to the DB. + if (url, method, user, password, role, cacert, xmlrpc) != \ + (None, None, None, None, None, None, False): + raise Exception + + # Otherwise, first try connecting directly to the DB. This + # absolutely requires a configuration file; the API + # instance looks for one in a default location if one is + # not specified. If this fails, try connecting to the API + # server via XML-RPC. + if config is None: + self.api = PLCAPI() + else: + self.api = PLCAPI(config) + self.config = self.api.config + self.url = None + self.server = None + except Exception, err: + # Try connecting to the API server via XML-RPC + self.api = PLCAPI(None) + + try: + if config is None: + self.config = Config() + else: + self.config = Config(config) + except Exception, err: + # Try to continue if no configuration file is available + self.config = None + + if url is None: + if self.config is None: + raise Exception, "Must specify API URL" + + url = "https://" + self.config.PLC_API_HOST + \ + ":" + str(self.config.PLC_API_PORT) + \ + "/" + self.config.PLC_API_PATH + "/" + + if cacert is None: + cacert = self.config.PLC_API_CA_SSL_CRT + + self.url = url + if cacert is not None: + self.server = xmlrpclib.ServerProxy(url, PyCurlTransport(url, cacert), allow_none = 1) + else: + self.server = xmlrpclib.ServerProxy(url, allow_none = 1) + + # Set up authentication structure + + # Default is to use session or capability authentication + if (method, user, password) == (None, None, None): + if session is not None or os.path.exists("/etc/planetlab/session"): + method = "session" + if session is None: + session = "/etc/planetlab/session" + else: + method = "capability" + + if method == "capability": + # Load defaults from configuration file if using capability + # authentication. + if user is None and self.config is not None: + user = self.config.PLC_API_MAINTENANCE_USER + if password is None and self.config is not None: + password = self.config.PLC_API_MAINTENANCE_PASSWORD + if role is None: + role = "admin" + elif method is None: + # Otherwise, default to password authentication + method = "password" + + if role == "anonymous" or method == "anonymous": + self.auth = {'AuthMethod': "anonymous"} + elif method == "session": + if session is None: + raise Exception, "Must specify session" + + if os.path.exists(session): + session = file(session).read() + + self.auth = {'AuthMethod': "session", 'session': session} + else: + if user is None: + raise Exception, "Must specify username" + + if password is None: + raise Exception, "Must specify password" + + self.auth = {'AuthMethod': method, + 'Username': user, + 'AuthString': password} + + if role is not None: + self.auth['Role'] = role + + for method in PLC.API.PLCAPI.all_methods: + api_function = self.api.callable(method) + + if self.server is None: + # Can just call it directly + func = api_function + else: + func = getattr(self.server, method) + + # If the function requires an authentication structure as + # its first argument, automagically add an auth struct to + # the call. + if api_function.accepts and \ + (isinstance(api_function.accepts[0], Auth) or \ + (isinstance(api_function.accepts[0], Mixed) and \ + filter(lambda param: isinstance(param, Auth), api_function.accepts[0]))): + auth = self.auth + else: + auth = None + + callable = Callable(self, method, func, auth) + + # Add to ourself and the global environment. Add dummy + # subattributes to support tab completion of methods with + # dots in their names (e.g., system.listMethods). + class Dummy: pass + paths = method.split(".") + if len(paths) > 1: + first = paths.pop(0) + + if not hasattr(self, first): + obj = Dummy() + setattr(self, first, obj) + # Also add to global environment if specified + if globals is not None: + globals[first] = obj + + obj = getattr(self, first) + + for path in paths: + if not hasattr(obj, path): + if path == paths[-1]: + setattr(obj, path, callable) + else: + setattr(obj, path, Dummy()) + obj = getattr(obj, path) + else: + setattr(self, method, callable) + # Also add to global environment if specified + if globals is not None: + globals[method] = callable + + # Override help(), begin(), and commit() + if globals is not None: + globals['help'] = self.help + globals['begin'] = self.begin + globals['commit'] = self.commit + + # Multicall support + self.calls = [] + self.multi = False + + def help(self, topic = None): + if isinstance(topic, Callable): + pydoc.pager(self.system.methodHelp(topic.name)) + else: + pydoc.help(topic) + + def begin(self): + if self.calls: + raise Exception, "multicall already in progress" + + self.multi = True + + def commit(self): + if self.calls: + ret = [] + self.multi = False + results = self.system.multicall(self.calls) + for result in results: + if type(result) == type({}): + raise xmlrpclib.Fault(result['faultCode'], result['faultString']) + elif type(result) == type([]): + ret.append(result[0]) + else: + raise ValueError, "unexpected type in multicall result" + else: + ret = None + + self.calls = [] + self.multi = False + + return ret diff --git a/PLC/SiteTags.py b/PLC/SiteTags.py new file mode 100644 index 0000000..d04b947 --- /dev/null +++ b/PLC/SiteTags.py @@ -0,0 +1,54 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.TagTypes import TagType, TagTypes +from PLC.Sites import Site + +class SiteTag(Row): + """ + Representation of a row in the site_tag. + To use, instantiate with a dict of values. + """ + + table_name = 'site_tag' + primary_key = 'site_tag_id' + fields = { + 'site_tag_id': Parameter(int, "Site setting identifier"), + 'site_id': Site.fields['site_id'], + 'login_base': Site.fields['login_base'], + 'tag_type_id': TagType.fields['tag_type_id'], + 'tagname': TagType.fields['tagname'], + 'description': TagType.fields['description'], + 'category': TagType.fields['category'], + 'value': Parameter(str, "Site setting value"), + ### relations + + } + +class SiteTags(Table): + """ + Representation of row(s) from the site_tag table in the + database. + """ + + def __init__(self, api, site_tag_filter = None, columns = None): + Table.__init__(self, api, SiteTag, columns) + + sql = "SELECT %s FROM view_site_tags WHERE True" % \ + ", ".join(self.columns) + + if site_tag_filter is not None: + if isinstance(site_tag_filter, (list, tuple, set, int, long)): + site_tag_filter = Filter(SiteTag.fields, {'site_tag_id': site_tag_filter}) + elif isinstance(site_tag_filter, dict): + site_tag_filter = Filter(SiteTag.fields, site_tag_filter) + else: + raise PLCInvalidArgument, "Wrong site setting filter %r"%site_tag_filter + sql += " AND (%s) %s" % site_tag_filter.sql(api) + + + self.selectall(sql) diff --git a/PLC/Sites.py b/PLC/Sites.py new file mode 100644 index 0000000..2075409 --- /dev/null +++ b/PLC/Sites.py @@ -0,0 +1,273 @@ +from types import StringTypes +import string + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.Slices import Slice, Slices +from PLC.PCUs import PCU, PCUs +from PLC.Nodes import Node, Nodes +from PLC.Addresses import Address, Addresses +from PLC.Persons import Person, Persons + +class Site(Row): + """ + Representation of a row in the sites table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + table_name = 'sites' + primary_key = 'site_id' + join_tables = ['person_site', 'site_address', 'peer_site'] + fields = { + 'site_id': Parameter(int, "Site identifier"), + 'name': Parameter(str, "Full site name", max = 254), + 'abbreviated_name': Parameter(str, "Abbreviated site name", max = 50), + 'login_base': Parameter(str, "Site slice prefix", max = 32), + 'is_public': Parameter(bool, "Publicly viewable site"), + 'enabled': Parameter(bool, "Has been enabled"), + 'latitude': Parameter(float, "Decimal latitude of the site", min = -90.0, max = 90.0, nullok = True), + 'longitude': Parameter(float, "Decimal longitude of the site", min = -180.0, max = 180.0, nullok = True), + 'url': Parameter(str, "URL of a page that describes the site", max = 254, nullok = True), + 'date_created': Parameter(int, "Date and time when site entry was created, in seconds since UNIX epoch", ro = True), + 'last_updated': Parameter(int, "Date and time when site entry was last updated, in seconds since UNIX epoch", ro = True), + 'max_slices': Parameter(int, "Maximum number of slices that the site is able to create"), + 'max_slivers': Parameter(int, "Maximum number of slivers that the site is able to create"), + 'person_ids': Parameter([int], "List of account identifiers"), + 'slice_ids': Parameter([int], "List of slice identifiers"), + 'address_ids': Parameter([int], "List of address identifiers"), + 'pcu_ids': Parameter([int], "List of PCU identifiers"), + 'node_ids': Parameter([int], "List of site node identifiers"), + 'peer_id': Parameter(int, "Peer to which this site belongs", nullok = True), + 'peer_site_id': Parameter(int, "Foreign site identifier at peer", nullok = True), + 'site_tag_ids' : Parameter ([int], "List of tags attached to this site"), + 'ext_consortium_id': Parameter(int, "external consortium id", nullok = True) + } + related_fields = { + 'persons': [Mixed(Parameter(int, "Person identifier"), + Parameter(str, "Email address"))], + 'addresses': [Mixed(Parameter(int, "Address identifer"), + Filter(Address.fields))] + } + view_tags_name = "view_site_tags" + # tags are used by the Add/Get/Update methods to expose tags + # this is initialized here and updated by the accessors factory + tags = { } + + def validate_name(self, name): + if not len(name): + raise PLCInvalidArgument, "Name must be specified" + + return name + + validate_abbreviated_name = validate_name + + def validate_login_base(self, login_base): + if not len(login_base): + raise PLCInvalidArgument, "Login base must be specified" + + if not set(login_base).issubset(string.lowercase + string.digits + '.'): + raise PLCInvalidArgument, "Login base must consist only of lowercase ASCII letters or numbers or dots" + + conflicts = Sites(self.api, [login_base]) + for site in conflicts: + if 'site_id' not in self or self['site_id'] != site['site_id']: + raise PLCInvalidArgument, "login_base already in use" + + return login_base + + def validate_latitude(self, latitude): + if not self.has_key('longitude') or \ + self['longitude'] is None: + raise PLCInvalidArgument, "Longitude must also be specified" + + return latitude + + def validate_longitude(self, longitude): + if not self.has_key('latitude') or \ + self['latitude'] is None: + raise PLCInvalidArgument, "Latitude must also be specified" + + return longitude + + validate_date_created = Row.validate_timestamp + validate_last_updated = Row.validate_timestamp + + add_person = Row.add_object(Person, 'person_site') + remove_person = Row.remove_object(Person, 'person_site') + + add_address = Row.add_object(Address, 'site_address') + remove_address = Row.remove_object(Address, 'site_address') + + def update_last_updated(self, commit = True): + """ + Update last_updated field with current time + """ + + assert 'site_id' in self + assert self.table_name + + self.api.db.do("UPDATE %s SET last_updated = CURRENT_TIMESTAMP " % (self.table_name) + \ + " where site_id = %d" % (self['site_id']) ) + self.sync(commit) + + + def associate_persons(self, auth, field, value): + """ + Adds persons found in value list to this site (using AddPersonToSite). + Deletes persons not found in value list from this site (using DeletePersonFromSite). + """ + + assert 'person_ids' in self + assert 'site_id' in self + assert isinstance(value, list) + + (person_ids, emails) = self.separate_types(value)[0:2] + + # Translate emails into person_ids + if emails: + persons = Persons(self.api, emails, ['person_id']).dict('person_id') + person_ids += persons.keys() + + # Add new ids, remove stale ids + if self['person_ids'] != person_ids: + from PLC.Methods.AddPersonToSite import AddPersonToSite + from PLC.Methods.DeletePersonFromSite import DeletePersonFromSite + new_persons = set(person_ids).difference(self['person_ids']) + stale_persons = set(self['person_ids']).difference(person_ids) + + for new_person in new_persons: + AddPersonToSite.__call__(AddPersonToSite(self.api), auth, new_person, self['site_id']) + for stale_person in stale_persons: + DeletePersonFromSite.__call__(DeletePersonFromSite(self.api), auth, stale_person, self['site_id']) + + def associate_addresses(self, auth, field, value): + """ + Deletes addresses_ids not found in value list (using DeleteAddress). + Adds address if slice_fields w/o address_id found in value list (using AddSiteAddress). + Update address if slice_fields w/ address_id found in value list (using UpdateAddress). + """ + + assert 'address_ids' in self + assert 'site_id' in self + assert isinstance(value, list) + + (address_ids, blank, addresses) = self.separate_types(value) + + for address in addresses: + if 'address_id' in address: + address_ids.append(address['address_id']) + + # Add new ids, remove stale ids + if self['address_ids'] != address_ids: + from PLC.Methods.DeleteAddress import DeleteAddress + stale_addresses = set(self['address_ids']).difference(address_ids) + + for stale_address in stale_addresses: + DeleteAddress.__call__(DeleteAddress(self.api), auth, stale_address) + + if addresses: + from PLC.Methods.AddSiteAddress import AddSiteAddress + from PLC.Methods.UpdateAddress import UpdateAddress + + updated_addresses = filter(lambda address: 'address_id' in address, addresses) + added_addresses = filter(lambda address: 'address_id' not in address, addresses) + + for address in added_addresses: + AddSiteAddress.__call__(AddSiteAddress(self.api), auth, self['site_id'], address) + for address in updated_addresses: + address_id = address.pop('address_id') + UpdateAddress.__call__(UpdateAddress(self.api), auth, address_id, address) + + def delete(self, commit = True): + """ + Delete existing site. + """ + + assert 'site_id' in self + + # Delete accounts of all people at the site who are not + # members of at least one other non-deleted site. + persons = Persons(self.api, self['person_ids']) + for person in persons: + delete = True + + person_sites = Sites(self.api, person['site_ids']) + for person_site in person_sites: + if person_site['site_id'] != self['site_id']: + delete = False + break + + if delete: + person.delete(commit = False) + + # Delete all site addresses + addresses = Addresses(self.api, self['address_ids']) + for address in addresses: + address.delete(commit = False) + + # Delete all site slices + slices = Slices(self.api, self['slice_ids']) + for slice in slices: + slice.delete(commit = False) + + # Delete all site PCUs + pcus = PCUs(self.api, self['pcu_ids']) + for pcu in pcus: + pcu.delete(commit = False) + + # Delete all site nodes + nodes = Nodes(self.api, self['node_ids']) + for node in nodes: + node.delete(commit = False) + + # Clean up miscellaneous join tables + for table in self.join_tables: + self.api.db.do("DELETE FROM %s WHERE site_id = %d" % \ + (table, self['site_id'])) + + # Mark as deleted + self['deleted'] = True + self.sync(commit) + +class Sites(Table): + """ + Representation of row(s) from the sites table in the + database. + """ + + def __init__(self, api, site_filter = None, columns = None): + Table.__init__(self, api, Site, columns) + + view = "view_sites" + for tagname in self.tag_columns: + view= "%s left join %s using (%s)"%(view,Site.tagvalue_view_name(tagname), + Site.primary_key) + + sql = "SELECT %s FROM %s WHERE deleted IS False" % \ + (", ".join(self.columns.keys()+self.tag_columns.keys()),view) + + if site_filter is not None: + if isinstance(site_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), site_filter) + strs = filter(lambda x: isinstance(x, StringTypes), site_filter) + site_filter = Filter(Site.fields, {'site_id': ints, 'login_base': strs}) + sql += " AND (%s) %s" % site_filter.sql(api, "OR") + elif isinstance(site_filter, dict): + allowed_fields=dict(Site.fields.items()+Site.tags.items()) + site_filter = Filter(allowed_fields, site_filter) + sql += " AND (%s) %s" % site_filter.sql(api, "AND") + elif isinstance (site_filter, StringTypes): + site_filter = Filter(Site.fields, {'login_base':site_filter}) + sql += " AND (%s) %s" % site_filter.sql(api, "AND") + elif isinstance (site_filter, (int, long)): + site_filter = Filter(Site.fields, {'site_id':site_filter}) + sql += " AND (%s) %s" % site_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong site filter %r"%site_filter + + self.selectall(sql) diff --git a/PLC/SliceInstantiations.py b/PLC/SliceInstantiations.py new file mode 100644 index 0000000..c0658d4 --- /dev/null +++ b/PLC/SliceInstantiations.py @@ -0,0 +1,51 @@ +# +# Functions for interacting with the slice_instantiations table in the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Table import Row, Table + +class SliceInstantiation(Row): + """ + Representation of a row in the slice_instantiations table. To use, + instantiate with a dict of values. + """ + + table_name = 'slice_instantiations' + primary_key = 'instantiation' + join_tables = ['slices'] + fields = { + 'instantiation': Parameter(str, "Slice instantiation state", max = 100), + } + + def validate_instantiation(self, instantiation): + # Make sure name is not blank + if not len(instantiation): + raise PLCInvalidArgument, "Slice instantiation state name must be specified" + + # Make sure slice instantiation does not alredy exist + conflicts = SliceInstantiations(self.api, [instantiation]) + if conflicts: + raise PLCInvalidArgument, "Slice instantiation state name already in use" + + return instantiation + +class SliceInstantiations(Table): + """ + Representation of the slice_instantiations table in the database. + """ + + def __init__(self, api, instantiations = None): + Table.__init__(self, api, SliceInstantiation) + + sql = "SELECT %s FROM slice_instantiations" % \ + ", ".join(SliceInstantiation.fields) + + if instantiations: + sql += " WHERE instantiation IN (%s)" % ", ".join( [ api.db.quote (i) for i in instantiations ] ) + + self.selectall(sql) diff --git a/PLC/SliceTags.py b/PLC/SliceTags.py new file mode 100644 index 0000000..e5070fd --- /dev/null +++ b/PLC/SliceTags.py @@ -0,0 +1,56 @@ +# +# Thierry Parmentelat - INRIA +# +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +# seems to cause import loops +#from PLC.Slices import Slice, Slices +from PLC.Nodes import Node, Nodes +from PLC.NodeGroups import NodeGroup, NodeGroups +from PLC.TagTypes import TagType, TagTypes + +class SliceTag(Row): + """ + Representation of a row in the slice_tag table. To use, + instantiate with a dict of values. + """ + + table_name = 'slice_tag' + primary_key = 'slice_tag_id' + fields = { + 'slice_tag_id': Parameter(int, "Slice tag identifier"), + 'slice_id': Parameter(int, "Slice identifier"), + 'name': Parameter(str, "Slice name"), + 'node_id': Node.fields['node_id'], + 'nodegroup_id': NodeGroup.fields['nodegroup_id'], + 'tag_type_id': TagType.fields['tag_type_id'], + 'tagname': TagType.fields['tagname'], + 'description': TagType.fields['description'], + 'category': TagType.fields['category'], + 'value': Parameter(str, "Slice attribute value"), + } + +class SliceTags(Table): + """ + Representation of row(s) from the slice_tag table in the + database. + """ + + def __init__(self, api, slice_tag_filter = None, columns = None): + Table.__init__(self, api, SliceTag, columns) + + sql = "SELECT %s FROM view_slice_tags WHERE True" % \ + ", ".join(self.columns) + + if slice_tag_filter is not None: + if isinstance(slice_tag_filter, (list, tuple, set, int, long)): + slice_tag_filter = Filter(SliceTag.fields, {'slice_tag_id': slice_tag_filter}) + elif isinstance(slice_tag_filter, dict): + slice_tag_filter = Filter(SliceTag.fields, slice_tag_filter) + else: + raise PLCInvalidArgument, "Wrong slice tag filter %r"%slice_tag_filter + sql += " AND (%s) %s" % slice_tag_filter.sql(api) + + self.selectall(sql) diff --git a/PLC/Slices.py b/PLC/Slices.py new file mode 100644 index 0000000..e5a51eb --- /dev/null +++ b/PLC/Slices.py @@ -0,0 +1,296 @@ +from types import StringTypes +import time +import re + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed +from PLC.Filter import Filter +from PLC.Debug import profile +from PLC.Table import Row, Table +from PLC.SliceInstantiations import SliceInstantiation, SliceInstantiations +from PLC.Nodes import Node +from PLC.Persons import Person, Persons +from PLC.SliceTags import SliceTag +from PLC.Timestamp import Timestamp + +class Slice(Row): + """ + Representation of a row in the slices table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync().To use, instantiate + with a dict of values. + """ + + table_name = 'slices' + primary_key = 'slice_id' + join_tables = ['slice_node', 'slice_person', 'slice_tag', 'peer_slice', 'node_slice_whitelist', 'leases', ] + fields = { + 'slice_id': Parameter(int, "Slice identifier"), + 'site_id': Parameter(int, "Identifier of the site to which this slice belongs"), + 'name': Parameter(str, "Slice name", max = 64), + 'instantiation': Parameter(str, "Slice instantiation state"), + 'url': Parameter(str, "URL further describing this slice", max = 254, nullok = True), + 'description': Parameter(str, "Slice description", max = 2048, nullok = True), + 'max_nodes': Parameter(int, "Maximum number of nodes that can be assigned to this slice"), + 'creator_person_id': Parameter(int, "Identifier of the account that created this slice"), + 'created': Parameter(int, "Date and time when slice was created, in seconds since UNIX epoch", ro = True), + 'expires': Parameter(int, "Date and time when slice expires, in seconds since UNIX epoch"), + 'node_ids': Parameter([int], "List of nodes in this slice", ro = True), + 'person_ids': Parameter([int], "List of accounts that can use this slice", ro = True), + 'slice_tag_ids': Parameter([int], "List of slice attributes", ro = True), + 'peer_id': Parameter(int, "Peer to which this slice belongs", nullok = True), + 'peer_slice_id': Parameter(int, "Foreign slice identifier at peer", nullok = True), + } + related_fields = { + 'persons': [Mixed(Parameter(int, "Person identifier"), + Parameter(str, "Email address"))], + 'nodes': [Mixed(Parameter(int, "Node identifier"), + Parameter(str, "Fully qualified hostname"))] + } + + view_tags_name="view_slice_tags" + tags = {} + + def validate_name(self, name): + # N.B.: Responsibility of the caller to ensure that login_base + # portion of the slice name corresponds to a valid site, if + # desired. + + # 1. Lowercase. + # 2. Begins with login_base (letters or numbers). + # 3. Then single underscore after login_base. + # 4. Then letters, numbers, or underscores. + good_name = r'^[a-z0-9\.]+_[a-zA-Z0-9_\.]+$' + if not name or \ + not re.match(good_name, name): + raise PLCInvalidArgument, "Invalid slice name" + + conflicts = Slices(self.api, [name]) + for slice in conflicts: + if 'slice_id' not in self or self['slice_id'] != slice['slice_id']: + raise PLCInvalidArgument, "Slice name already in use, %s"%name + + return name + + def validate_instantiation(self, instantiation): + instantiations = [row['instantiation'] for row in SliceInstantiations(self.api)] + if instantiation not in instantiations: + raise PLCInvalidArgument, "No such instantiation state" + + return instantiation + + validate_created = Row.validate_timestamp + + def validate_expires(self, expires): + # N.B.: Responsibility of the caller to ensure that expires is + # not too far into the future. + check_future = not ('is_deleted' in self and self['is_deleted']) + return Timestamp.sql_validate( expires, check_future = check_future) + + add_person = Row.add_object(Person, 'slice_person') + remove_person = Row.remove_object(Person, 'slice_person') + + add_node = Row.add_object(Node, 'slice_node') + remove_node = Row.remove_object(Node, 'slice_node') + + add_to_node_whitelist = Row.add_object(Node, 'node_slice_whitelist') + delete_from_node_whitelist = Row.remove_object(Node, 'node_slice_whitelist') + + def associate_persons(self, auth, field, value): + """ + Adds persons found in value list to this slice (using AddPersonToSlice). + Deletes persons not found in value list from this slice (using DeletePersonFromSlice). + """ + + assert 'person_ids' in self + assert 'slice_id' in self + assert isinstance(value, list) + + (person_ids, emails) = self.separate_types(value)[0:2] + + # Translate emails into person_ids + if emails: + persons = Persons(self.api, emails, ['person_id']).dict('person_id') + person_ids += persons.keys() + + # Add new ids, remove stale ids + if self['person_ids'] != person_ids: + from PLC.Methods.AddPersonToSlice import AddPersonToSlice + from PLC.Methods.DeletePersonFromSlice import DeletePersonFromSlice + new_persons = set(person_ids).difference(self['person_ids']) + stale_persons = set(self['person_ids']).difference(person_ids) + + for new_person in new_persons: + AddPersonToSlice.__call__(AddPersonToSlice(self.api), auth, new_person, self['slice_id']) + for stale_person in stale_persons: + DeletePersonFromSlice.__call__(DeletePersonFromSlice(self.api), auth, stale_person, self['slice_id']) + + def associate_nodes(self, auth, field, value): + """ + Adds nodes found in value list to this slice (using AddSliceToNodes). + Deletes nodes not found in value list from this slice (using DeleteSliceFromNodes). + """ + + from PLC.Nodes import Nodes + + assert 'node_ids' in self + assert 'slice_id' in self + assert isinstance(value, list) + + (node_ids, hostnames) = self.separate_types(value)[0:2] + + # Translate hostnames into node_ids + if hostnames: + nodes = Nodes(self.api, hostnames, ['node_id']).dict('node_id') + node_ids += nodes.keys() + + # Add new ids, remove stale ids + if self['node_ids'] != node_ids: + from PLC.Methods.AddSliceToNodes import AddSliceToNodes + from PLC.Methods.DeleteSliceFromNodes import DeleteSliceFromNodes + new_nodes = set(node_ids).difference(self['node_ids']) + stale_nodes = set(self['node_ids']).difference(node_ids) + + if new_nodes: + AddSliceToNodes.__call__(AddSliceToNodes(self.api), auth, self['slice_id'], list(new_nodes)) + if stale_nodes: + DeleteSliceFromNodes.__call__(DeleteSliceFromNodes(self.api), auth, self['slice_id'], list(stale_nodes)) + def associate_slice_tags(self, auth, fields, value): + """ + Deletes slice_tag_ids not found in value list (using DeleteSliceTag). + Adds slice_tags if slice_fields w/o slice_id is found (using AddSliceTag). + Updates slice_tag if slice_fields w/ slice_id is found (using UpdateSlceiAttribute). + """ + + assert 'slice_tag_ids' in self + assert isinstance(value, list) + + (attribute_ids, blank, attributes) = self.separate_types(value) + + # There is no way to add attributes by id. They are + # associated with a slice when they are created. + # So we are only looking to delete here + if self['slice_tag_ids'] != attribute_ids: + from PLC.Methods.DeleteSliceTag import DeleteSliceTag + stale_attributes = set(self['slice_tag_ids']).difference(attribute_ids) + + for stale_attribute in stale_attributes: + DeleteSliceTag.__call__(DeleteSliceTag(self.api), auth, stale_attribute['slice_tag_id']) + + # If dictionary exists, we are either adding new + # attributes or updating existing ones. + if attributes: + from PLC.Methods.AddSliceTag import AddSliceTag + from PLC.Methods.UpdateSliceTag import UpdateSliceTag + + added_attributes = filter(lambda x: 'slice_tag_id' not in x, attributes) + updated_attributes = filter(lambda x: 'slice_tag_id' in x, attributes) + + for added_attribute in added_attributes: + if 'tag_type' in added_attribute: + type = added_attribute['tag_type'] + elif 'tag_type_id' in added_attribute: + type = added_attribute['tag_type_id'] + else: + raise PLCInvalidArgument, "Must specify tag_type or tag_type_id" + + if 'value' in added_attribute: + value = added_attribute['value'] + else: + raise PLCInvalidArgument, "Must specify a value" + + if 'node_id' in added_attribute: + node_id = added_attribute['node_id'] + else: + node_id = None + + if 'nodegroup_id' in added_attribute: + nodegroup_id = added_attribute['nodegroup_id'] + else: + nodegroup_id = None + + AddSliceTag.__call__(AddSliceTag(self.api), auth, self['slice_id'], type, value, node_id, nodegroup_id) + for updated_attribute in updated_attributes: + attribute_id = updated_attribute.pop('slice_tag_id') + if attribute_id not in self['slice_tag_ids']: + raise PLCInvalidArgument, "Attribute doesnt belong to this slice" + else: + UpdateSliceTag.__call__(UpdateSliceTag(self.api), auth, attribute_id, updated_attribute) + + def sync(self, commit = True): + """ + Add or update a slice. + """ + + # Before a new slice is added, delete expired slices + if 'slice_id' not in self: + expired = Slices(self.api, expires = -int(time.time())) + for slice in expired: + slice.delete(commit) + + Row.sync(self, commit) + + def delete(self, commit = True): + """ + Delete existing slice. + """ + + assert 'slice_id' in self + + # Clean up miscellaneous join tables + for table in self.join_tables: + self.api.db.do("DELETE FROM %s WHERE slice_id = %d" % \ + (table, self['slice_id'])) + + # Mark as deleted + self['is_deleted'] = True + self.sync(commit) + + +class Slices(Table): + """ + Representation of row(s) from the slices table in the + database. + """ + + def __init__(self, api, slice_filter = None, columns = None, expires = int(time.time())): + Table.__init__(self, api, Slice, columns) + + # the view that we're selecting upon: start with view_slices + view = "view_slices" + # as many left joins as requested tags + for tagname in self.tag_columns: + view= "%s left join %s using (%s)"%(view,Slice.tagvalue_view_name(tagname), + Slice.primary_key) + + sql = "SELECT %s FROM %s WHERE is_deleted IS False" % \ + (", ".join(self.columns.keys()+self.tag_columns.keys()),view) + + if expires is not None: + if expires >= 0: + sql += " AND expires > %d" % expires + else: + expires = -expires + sql += " AND expires < %d" % expires + + if slice_filter is not None: + if isinstance(slice_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), slice_filter) + strs = filter(lambda x: isinstance(x, StringTypes), slice_filter) + slice_filter = Filter(Slice.fields, {'slice_id': ints, 'name': strs}) + sql += " AND (%s) %s" % slice_filter.sql(api, "OR") + elif isinstance(slice_filter, dict): + allowed_fields=dict(Slice.fields.items()+Slice.tags.items()) + slice_filter = Filter(allowed_fields, slice_filter) + sql += " AND (%s) %s" % slice_filter.sql(api, "AND") + elif isinstance (slice_filter, StringTypes): + slice_filter = Filter(Slice.fields, {'name':slice_filter}) + sql += " AND (%s) %s" % slice_filter.sql(api, "AND") + elif isinstance (slice_filter, (int, long)): + slice_filter = Filter(Slice.fields, {'slice_id':slice_filter}) + sql += " AND (%s) %s" % slice_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong slice filter %r"%slice_filter + + self.selectall(sql) diff --git a/PLC/Table.py b/PLC/Table.py new file mode 100644 index 0000000..2365965 --- /dev/null +++ b/PLC/Table.py @@ -0,0 +1,439 @@ +from types import StringTypes, IntType, LongType +import time +import calendar + +from PLC.Timestamp import Timestamp +from PLC.Faults import * +from PLC.Parameter import Parameter + + +class Row(dict): + """ + Representation of a row in a database table. To use, optionally + instantiate with a dict of values. Update as you would a + dict. Commit to the database with sync(). + """ + + # Set this to the name of the table that stores the row. + # e.g. table_name = "nodes" + table_name = None + + # Set this to the name of the primary key of the table. It is + # assumed that the this key is a sequence if it is not set when + # sync() is called. + # e.g. primary_key="node_id" + primary_key = None + + # Set this to the names of tables that reference this table's + # primary key. + join_tables = [] + + # Set this to a dict of the valid fields of this object and their + # types. Not all fields (e.g., joined fields) may be updated via + # sync(). + fields = {} + + # The name of the view that extends objects with tags + # e.g. view_tags_name = "view_node_tags" + view_tags_name = None + + # Set this to the set of tags that can be returned by the Get function + tags = {} + + def __init__(self, api, fields = {}): + dict.__init__(self, fields) + self.api = api + # run the class_init initializer once + cls=self.__class__ + if not hasattr(cls,'class_inited'): + cls.class_init (api) + cls.class_inited=True # actual value does not matter + + def validate(self): + """ + Validates values. Will validate a value with a custom function + if a function named 'validate_[key]' exists. + """ + + # Warn about mandatory fields + mandatory_fields = self.api.db.fields(self.table_name, notnull = True, hasdef = False) + for field in mandatory_fields: + if not self.has_key(field) or self[field] is None: + raise PLCInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__ + + # Validate values before committing + for key, value in self.iteritems(): + if value is not None and hasattr(self, 'validate_' + key): + validate = getattr(self, 'validate_' + key) + self[key] = validate(value) + + def separate_types(self, items): + """ + Separate a list of different typed objects. + Return a list for each type (ints, strs and dicts) + """ + + if isinstance(items, (list, tuple, set)): + ints = filter(lambda x: isinstance(x, (int, long)), items) + strs = filter(lambda x: isinstance(x, StringTypes), items) + dicts = filter(lambda x: isinstance(x, dict), items) + return (ints, strs, dicts) + else: + raise PLCInvalidArgument, "Can only separate list types" + + + def associate(self, *args): + """ + Provides a means for high level api calls to associate objects + using low level calls. + """ + + if len(args) < 3: + raise PLCInvalidArgumentCount, "auth, field, value must be specified" + elif hasattr(self, 'associate_' + args[1]): + associate = getattr(self, 'associate_'+args[1]) + associate(*args) + else: + raise PLCInvalidArguemnt, "No such associate function associate_%s" % args[1] + + def validate_timestamp (self, timestamp): + return Timestamp.sql_validate(timestamp) + + def add_object(self, classobj, join_table, columns = None): + """ + Returns a function that can be used to associate this object + with another. + """ + + def add(self, obj, columns = None, commit = True): + """ + Associate with the specified object. + """ + + # Various sanity checks + assert isinstance(self, Row) + assert self.primary_key in self + assert join_table in self.join_tables + assert isinstance(obj, classobj) + assert isinstance(obj, Row) + assert obj.primary_key in obj + assert join_table in obj.join_tables + + # By default, just insert the primary keys of each object + # into the join table. + if columns is None: + columns = {self.primary_key: self[self.primary_key], + obj.primary_key: obj[obj.primary_key]} + + params = [] + for name, value in columns.iteritems(): + params.append(self.api.db.param(name, value)) + + self.api.db.do("INSERT INTO %s (%s) VALUES(%s)" % \ + (join_table, ", ".join(columns), ", ".join(params)), + columns) + + if commit: + self.api.db.commit() + + return add + + add_object = classmethod(add_object) + + def remove_object(self, classobj, join_table): + """ + Returns a function that can be used to disassociate this + object with another. + """ + + def remove(self, obj, commit = True): + """ + Disassociate from the specified object. + """ + + assert isinstance(self, Row) + assert self.primary_key in self + assert join_table in self.join_tables + assert isinstance(obj, classobj) + assert isinstance(obj, Row) + assert obj.primary_key in obj + assert join_table in obj.join_tables + + self_id = self[self.primary_key] + obj_id = obj[obj.primary_key] + + self.api.db.do("DELETE FROM %s WHERE %s = %s AND %s = %s" % \ + (join_table, + self.primary_key, self.api.db.param('self_id', self_id), + obj.primary_key, self.api.db.param('obj_id', obj_id)), + locals()) + + if commit: + self.api.db.commit() + + return remove + + remove_object = classmethod(remove_object) + + # convenience: check in dict (self.fields or self.tags) that a key is writable + @staticmethod + def is_writable (key,value,dict): + # if not mentioned, assume it's writable (e.g. deleted ...) + if key not in dict: return True + # if mentioned but not linked to a Parameter object, idem + if not isinstance(dict[key], Parameter): return True + # if not marked ro, it's writable + if not dict[key].ro: return True + return False + + def db_fields(self, obj = None): + """ + Return only those fields that can be set or updated directly + (i.e., those fields that are in the primary table (table_name) + for this object, and are not marked as a read-only Parameter. + """ + + if obj is None: + obj = self + + db_fields = self.api.db.fields(self.table_name) + return dict ( [ (key, value) for (key, value) in obj.items() + if key in db_fields and + Row.is_writable(key, value, self.fields) ] ) + + def tag_fields (self, obj=None): + """ + Return the fields of obj that are mentioned in tags + """ + if obj is None: obj=self + + return dict ( [ (key,value) for (key,value) in obj.iteritems() + if key in self.tags and Row.is_writable(key,value,self.tags) ] ) + + # takes as input a list of columns, sort native fields from tags + # returns 2 dicts and one list : fields, tags, rejected + @classmethod + def parse_columns (cls, columns): + (fields,tags,rejected)=({},{},[]) + for column in columns: + if column in cls.fields: fields[column]=cls.fields[column] + elif column in cls.tags: tags[column]=cls.tags[column] + else: rejected.append(column) + return (fields,tags,rejected) + + # compute the 'accepts' part of a method, from a list of column names, and a fields dict + # use exclude=True to exclude the column names instead + # typically accepted_fields (Node.fields,['hostname','model',...]) + @staticmethod + def accepted_fields (update_columns, fields_dict, exclude=False): + result={} + for (k,v) in fields_dict.iteritems(): + if (not exclude and k in update_columns) or (exclude and k not in update_columns): + result[k]=v + return result + + # filter out user-provided fields that are not part of the declared acceptance list + # keep it separate from split_fields for simplicity + # typically check_fields (,{'hostname':Parameter(str,...),'model':Parameter(..)...}) + @staticmethod + def check_fields (user_dict, accepted_fields): +# avoid the simple, but silent, version +# return dict ([ (k,v) for (k,v) in user_dict.items() if k in accepted_fields ]) + result={} + for (k,v) in user_dict.items(): + if k in accepted_fields: result[k]=v + else: raise PLCInvalidArgument ('Trying to set/change unaccepted key %s'%k) + return result + + # given a dict (typically passed to an Update method), we check and sort + # them against a list of dicts, e.g. [Node.fields, Node.related_fields] + # return is a list that contains n+1 dicts, last one has the rejected fields + @staticmethod + def split_fields (fields, dicts): + result=[] + for x in dicts: result.append({}) + rejected={} + for (field,value) in fields.iteritems(): + found=False + for i in range(len(dicts)): + candidate_dict=dicts[i] + if field in candidate_dict.keys(): + result[i][field]=value + found=True + break + if not found: rejected[field]=value + result.append(rejected) + return result + + ### class initialization : create tag-dependent cross view if needed + @classmethod + def tagvalue_view_name (cls, tagname): + return "tagvalue_view_%s_%s"%(cls.primary_key,tagname) + + @classmethod + def tagvalue_view_create_sql (cls,tagname): + """ + returns a SQL sentence that creates a view named after the primary_key and tagname, + with 2 columns + (*) column 1: primary_key + (*) column 2: actual tag value, renamed into tagname + """ + + if not cls.view_tags_name: + raise Exception, 'WARNING: class %s needs to set view_tags_name'%cls.__name__ + + table_name=cls.table_name + primary_key=cls.primary_key + view_tags_name=cls.view_tags_name + tagvalue_view_name=cls.tagvalue_view_name(tagname) + return 'CREATE OR REPLACE VIEW %(tagvalue_view_name)s ' \ + 'as SELECT %(table_name)s.%(primary_key)s,%(view_tags_name)s.value as "%(tagname)s" ' \ + 'from %(table_name)s right join %(view_tags_name)s using (%(primary_key)s) ' \ + 'WHERE tagname = \'%(tagname)s\';'%locals() + + @classmethod + def class_init (cls,api): + cls.tagvalue_views_create (api) + + @classmethod + def tagvalue_views_create (cls,api): + if not cls.tags: return + for tagname in cls.tags.keys(): + api.db.do(cls.tagvalue_view_create_sql (tagname)) + api.db.commit() + + def __eq__(self, y): + """ + Compare two objects. + """ + + # Filter out fields that cannot be set or updated directly + # (and thus would not affect equality for the purposes of + # deciding if we should sync() or not). + x = self.db_fields() + y = self.db_fields(y) + return dict.__eq__(x, y) + + # validate becomes optional on sept. 2010 + # we find it useful to use DeletePerson on duplicated entries + def sync(self, commit = True, insert = None, validate=True): + """ + Flush changes back to the database. + """ + + # Validate all specified fields + if validate: self.validate() + + # Filter out fields that cannot be set or updated directly + db_fields = self.db_fields() + + # Parameterize for safety + keys = db_fields.keys() + values = [self.api.db.param(key, value) for (key, value) in db_fields.items()] + + # If the primary key (usually an auto-incrementing serial + # identifier) has not been specified, or the primary key is the + # only field in the table, or insert has been forced. + if not self.has_key(self.primary_key) or \ + keys == [self.primary_key] or \ + insert is True: + + # If primary key id is a serial int and it isnt included, get next id + if self.fields[self.primary_key].type in (IntType, LongType) and \ + self.primary_key not in self: + pk_id = self.api.db.next_id(self.table_name, self.primary_key) + self[self.primary_key] = pk_id + db_fields[self.primary_key] = pk_id + keys = db_fields.keys() + values = [self.api.db.param(key, value) for (key, value) in db_fields.items()] + # Insert new row + sql = "INSERT INTO %s (%s) VALUES (%s)" % \ + (self.table_name, ", ".join(keys), ", ".join(values)) + else: + # Update existing row + columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)] + sql = "UPDATE {} SET {} WHERE {} = {}"\ + .format(self.table_name, + ", ".join(columns), + self.primary_key, + self.api.db.param(self.primary_key, self[self.primary_key])) + + self.api.db.do(sql, db_fields) + + if commit: + self.api.db.commit() + + def delete(self, commit = True): + """ + Delete row from its primary table, and from any tables that + reference it. + """ + + assert self.primary_key in self + + for table in self.join_tables + [self.table_name]: + if isinstance(table, tuple): + key = table[1] + table = table[0] + else: + key = self.primary_key + + sql = "DELETE FROM %s WHERE %s = %s" % \ + (table, key, + self.api.db.param(self.primary_key, self[self.primary_key])) + + self.api.db.do(sql, self) + + if commit: + self.api.db.commit() + +class Table(list): + """ + Representation of row(s) in a database table. + """ + + def __init__(self, api, classobj, columns = None): + self.api = api + self.classobj = classobj + self.rows = {} + + if columns is None: + columns = classobj.fields + tag_columns={} + else: + (columns,tag_columns,rejected) = classobj.parse_columns(columns) + if not columns and not tag_columns: + raise PLCInvalidArgument, "No valid return fields specified for class %s"%classobj.__name__ + if rejected: + raise PLCInvalidArgument, "unknown column(s) specified %r in %s"%(rejected,classobj.__name__) + + self.columns = columns + self.tag_columns = tag_columns + + def sync(self, commit = True): + """ + Flush changes back to the database. + """ + + for row in self: + row.sync(commit) + + def selectall(self, sql, params = None): + """ + Given a list of rows from the database, fill ourselves with + Row objects. + """ + + for row in self.api.db.selectall(sql, params): + obj = self.classobj(self.api, row) + self.append(obj) + + def dict(self, key_field = None): + """ + Return ourself as a dict keyed on key_field. + """ + + if key_field is None: + key_field = self.classobj.primary_key + + return dict([(obj[key_field], obj) for obj in self]) diff --git a/PLC/TagTypes.py b/PLC/TagTypes.py new file mode 100644 index 0000000..f552435 --- /dev/null +++ b/PLC/TagTypes.py @@ -0,0 +1,79 @@ +# +# Thierry Parmentelat - INRIA +# +from types import StringTypes + +from PLC.Faults import * +from PLC.Parameter import Parameter +from PLC.Filter import Filter +from PLC.Table import Row, Table +from PLC.Roles import Role, Roles + +# xxx todo : deleting a tag type should delete the related nodegroup(s) + +class TagType (Row): + + """ + Representation of a row in the tag_types table. + """ + + table_name = 'tag_types' + primary_key = 'tag_type_id' + join_tables = ['tag_type_role', 'node_tag', 'interface_tag', 'slice_tag', 'site_tag', 'person_tag' ] + fields = { + 'tag_type_id': Parameter(int, "Node tag type identifier"), + 'tagname': Parameter(str, "Node tag type name", max = 100), + 'description': Parameter(str, "Node tag type description", max = 254), + 'category' : Parameter (str, "Node tag category", max=64, optional=True), + 'role_ids': Parameter([int], "List of role identifiers"), + 'roles': Parameter([str], "List of roles"), + } + + def validate_name(self, name): + if not len(name): + raise PLCInvalidArgument, "tag type name must be set" + + conflicts = TagTypes(self.api, [name]) + for tag_type in conflicts: + if 'tag_type_id' not in self or \ + self['tag_type_id'] != tag_type['tag_type_id']: + raise PLCInvalidArgument, "tag type name already in use" + + return name + + add_role = Row.add_object(Role, 'tag_type_role') + remove_role = Row.remove_object(Role, 'tag_type_role') + + +class TagTypes(Table): + """ + Representation of row(s) from the tag_types table + in the database. + """ + + def __init__(self, api, tag_type_filter = None, columns = None): + Table.__init__(self, api, TagType, columns) + + sql = "SELECT %s FROM view_tag_types WHERE True" % \ + ", ".join(self.columns) + + if tag_type_filter is not None: + if isinstance(tag_type_filter, (list, tuple, set)): + # Separate the list into integers and strings + ints = filter(lambda x: isinstance(x, (int, long)), tag_type_filter) + strs = filter(lambda x: isinstance(x, StringTypes), tag_type_filter) + tag_type_filter = Filter(TagType.fields, {'tag_type_id': ints, 'tagname': strs}) + sql += " AND (%s) %s" % tag_type_filter.sql(api, "OR") + elif isinstance(tag_type_filter, dict): + tag_type_filter = Filter(TagType.fields, tag_type_filter) + sql += " AND (%s) %s" % tag_type_filter.sql(api, "AND") + elif isinstance(tag_type_filter, (int, long)): + tag_type_filter = Filter(TagType.fields, {'tag_type_id':tag_type_filter}) + sql += " AND (%s) %s" % tag_type_filter.sql(api, "AND") + elif isinstance(tag_type_filter, StringTypes): + tag_type_filter = Filter(TagType.fields, {'tagname':tag_type_filter}) + sql += " AND (%s) %s" % tag_type_filter.sql(api, "AND") + else: + raise PLCInvalidArgument, "Wrong tag type filter %r"%tag_type_filter + + self.selectall(sql) diff --git a/PLC/Timestamp.py b/PLC/Timestamp.py new file mode 100644 index 0000000..9f382ec --- /dev/null +++ b/PLC/Timestamp.py @@ -0,0 +1,156 @@ +# +# Utilities to handle timestamps / durations from/to integers and strings +# +# datetime.{datetime,timedelta} are powerful tools, but these objects are not +# natively marshalled over xmlrpc +# + +from types import StringTypes +import time, calendar +import datetime + +from PLC.Faults import * +from PLC.Parameter import Parameter, Mixed + +# a dummy class mostly used as a namespace +class Timestamp: + + debug=False +# debug=True + + # this is how we expose times to SQL + sql_format = "%Y-%m-%d %H:%M:%S" + sql_format_utc = "%Y-%m-%d %H:%M:%S UTC" + # this one (datetime.isoformat) would work too but that's less readable - we support this input though + iso_format = "%Y-%m-%dT%H:%M:%S" + # sometimes it's convenient to understand more formats + input_formats = [ sql_format, + sql_format_utc, + iso_format, + "%Y-%m-%d %H:%M", + "%Y-%m-%d %H:%M UTC", + ] + + # for timestamps we usually accept either an int, or an ISO string, + # the datetime.datetime stuff can in general be used locally, + # but not sure it can be marshalled over xmlrpc though + + @staticmethod + def Parameter (doc): + return Mixed (Parameter (int, doc + " (unix timestamp)"), + Parameter (str, doc + " (formatted as %s)"%Timestamp.sql_format), + ) + + @staticmethod + def sql_validate (input, timezone=False, check_future = False): + """ + Validates the specified GMT timestamp, returns a + standardized string suitable for SQL input. + + Input may be a number (seconds since UNIX epoch back in 1970, + or a string (in one of the supported input formats). + + If timezone is True, the resulting string contains + timezone information, which is hard-wired as 'UTC' + + If check_future is True, raises an exception if timestamp is in + the past. + + Returns a GMT timestamp string suitable to feed SQL. + """ + + if not timezone: output_format = Timestamp.sql_format + else: output_format = Timestamp.sql_format_utc + + if Timestamp.debug: print 'sql_validate, in:',input, + if isinstance(input, StringTypes): + sql='' + # calendar.timegm() is the inverse of time.gmtime() + for time_format in Timestamp.input_formats: + try: + timestamp = calendar.timegm(time.strptime(input, time_format)) + sql = time.strftime(output_format, time.gmtime(timestamp)) + break + # wrong format: ignore + except ValueError: pass + # could not parse it + if not sql: + raise PLCInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats) + elif isinstance (input,(int,long,float)): + try: + timestamp = long(input) + sql = time.strftime(output_format, time.gmtime(timestamp)) + except Exception,e: + raise PLCInvalidArgument, "Timestamp %r not recognized -- %r"%(input,e) + else: + raise PLCInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input)) + + if check_future and input < time.time(): + raise PLCInvalidArgument, "'%s' not in the future" % sql + + if Timestamp.debug: print 'sql_validate, out:',sql + return sql + + @staticmethod + def sql_validate_utc (timestamp): + "For convenience, return sql_validate(intput, timezone=True, check_future=False)" + return Timestamp.sql_validate (timestamp, timezone=True, check_future=False) + + + @staticmethod + def cast_long (input): + """ + Translates input timestamp as a unix timestamp. + + Input may be a number (seconds since UNIX epoch, i.e., 1970-01-01 + 00:00:00 GMT), a string (in one of the supported input formats above). + + """ + if Timestamp.debug: print 'cast_long, in:',input, + if isinstance(input, StringTypes): + timestamp=0 + for time_format in Timestamp.input_formats: + try: + result=calendar.timegm(time.strptime(input, time_format)) + if Timestamp.debug: print 'out:',result + return result + # wrong format: ignore + except ValueError: pass + raise PLCInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats) + elif isinstance (input,(int,long,float)): + result=long(input) + if Timestamp.debug: print 'out:',result + return result + else: + raise PLCInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input)) + + +# utility for displaying durations +# be consistent in avoiding the datetime stuff +class Duration: + + MINUTE = 60 + HOUR = 3600 + DAY = 3600*24 + + @staticmethod + def to_string(duration): + result=[] + left=duration + (days,left) = divmod(left,Duration.DAY) + if days: result.append("%d d)"%td.days) + (hours,left) = divmod (left,Duration.HOUR) + if hours: result.append("%d h"%hours) + (minutes, seconds) = divmod (left, Duration.MINUTE) + if minutes: result.append("%d m"%minutes) + if seconds: result.append("%d s"%seconds) + if not result: result = ['void'] + return "-".join(result) + + @staticmethod + def validate (duration): + # support seconds only for now, works for int/long/str + try: + return long (duration) + except: + raise PLCInvalidArgument, "Could not parse duration %r"%duration diff --git a/PLC/__init__.py b/PLC/__init__.py new file mode 100644 index 0000000..39b6f70 --- /dev/null +++ b/PLC/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/python -tt + +import os +all = [i[:-3] for i in os.listdir(os.path.dirname(__file__)) if i.endswith(".py") and not i.startswith(".")] diff --git a/PLC/sendmail.py b/PLC/sendmail.py new file mode 100644 index 0000000..a5f0c5e --- /dev/null +++ b/PLC/sendmail.py @@ -0,0 +1,99 @@ +import os +import sys +import pprint +from types import StringTypes +from email.MIMEText import MIMEText +from email.Header import Header +from smtplib import SMTP + +from PLC.Logger import logger +from PLC.Faults import * + +def sendmail(api, To, Subject, Body, From = None, Cc = None, Bcc = None): + """ + Uses sendmail (must be installed and running locally) to send a + message to the specified recipients. If the API is running under + mod_python, the apache user must be listed in e.g., + /etc/mail/trusted-users. + + To, Cc, and Bcc may be addresses or lists of addresses. Each + address may be either a plain text address or a tuple of (name, + address). + """ + + # Fix up defaults + if not isinstance(To, list): + To = [To] + if Cc is not None and not isinstance(Cc, list): + Cc = [Cc] + if Bcc is not None and not isinstance(Bcc, list): + Bcc = [Bcc] + if From is None: + From = ("%s Support" % api.config.PLC_NAME, + api.config.PLC_MAIL_SUPPORT_ADDRESS) + + # Create a MIME-encoded UTF-8 message + msg = MIMEText(Body.encode(api.encoding), _charset = api.encoding) + + # Unicode subject headers are automatically encoded correctly + msg['Subject'] = Subject + + def encode_addresses(addresses, header_name = None): + """ + Unicode address headers are automatically encoded by + email.Header, but not correctly. The correct way is to put the + textual name inside quotes and the address inside brackets: + + To: "=?utf-8?b?encoded" + + Each address in addrs may be a tuple of (name, address) or + just an address. Returns a tuple of (header, addrlist) + representing the encoded header text and the list of plain + text addresses. + """ + + header = [] + addrs = [] + + for addr in addresses: + if isinstance(addr, tuple): + (name, addr) = addr + try: + name = name.encode('ascii') + header.append('%s <%s>' % (name, addr)) + except: + h = Header(name, charset = api.encoding, header_name = header_name) + header.append('"%s" <%s>' % (h.encode(), addr)) + else: + header.append(addr) + addrs.append(addr) + + return (", ".join(header), addrs) + + (msg['From'], from_addrs) = encode_addresses([From], 'From') + (msg['To'], to_addrs) = encode_addresses(To, 'To') + + if Cc is not None: + (msg['Cc'], cc_addrs) = encode_addresses(Cc, 'Cc') + to_addrs += cc_addrs + + if Bcc is not None: + (unused, bcc_addrs) = encode_addresses(Bcc, 'Bcc') + to_addrs += bcc_addrs + + # Needed to pass some spam filters + msg['Reply-To'] = msg['From'] + msg['X-Mailer'] = "Python/" + sys.version.split(" ")[0] + + if not api.config.PLC_MAIL_ENABLED: + logger.info("PLC_MAIL_ENABLED not set") + logger.info("From: %(From)s, To: %(To)s, Subject: %(Subject)s" % msg) + return + + s = SMTP() + s.connect() + rejected = s.sendmail(from_addrs[0], to_addrs, msg.as_string(), rcpt_options = ["NOTIFY=NEVER"]) + s.close() + + if rejected: + raise PLCAPIError, "Error sending message to " + ", ".join(rejected.keys()) diff --git a/Server.py b/Server.py new file mode 100755 index 0000000..4a0dc29 --- /dev/null +++ b/Server.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# +# Simple standalone HTTP server for testing PLCAPI +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import os +import sys +import getopt +import traceback +import BaseHTTPServer + +# Append PLC to the system path +sys.path.append(os.path.dirname(os.path.realpath(sys.argv[0]))) + +from PLC.API import PLCAPI + +class PLCAPIRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """ + Simple standalone HTTP request handler for testing PLCAPI. + """ + + def do_POST(self): + try: + # Read request + request = self.rfile.read(int(self.headers["Content-length"])) + + # Handle request + response = self.server.api.handle(self.client_address, request) + + # Write response + self.send_response(200) + self.send_header("Content-type", "text/xml") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + self.wfile.flush() + self.connection.shutdown(1) + + except Exception, e: + # Log error + sys.stderr.write(traceback.format_exc()) + sys.stderr.flush() + + def do_GET(self): + self.send_response(200) + self.send_header("Content-type", 'text/html') + self.end_headers() + self.wfile.write(""" + +PLCAPI XML-RPC/SOAP Interface + +

PLCAPI XML-RPC/SOAP Interface

+

Please use XML-RPC or SOAP to access the PLCAPI.

+ +""") + +class PLCAPIServer(BaseHTTPServer.HTTPServer): + """ + Simple standalone HTTP server for testing PLCAPI. + """ + + def __init__(self, addr, config): + self.api = PLCAPI(config) + self.allow_reuse_address = 1 + BaseHTTPServer.HTTPServer.__init__(self, addr, PLCAPIRequestHandler) + +# Defaults +addr = "0.0.0.0" +port = 8000 +config = "/etc/planetlab/plc_config" + +def usage(): + print "Usage: %s [OPTION]..." % sys.argv[0] + print "Options:" + print " -p PORT, --port=PORT TCP port number to listen on (default: %d)" % port + print " -f FILE, --config=FILE PLC configuration file (default: %s)" % config + print " -h, --help This message" + sys.exit(1) + +# Get options +try: + (opts, argv) = getopt.getopt(sys.argv[1:], "p:f:h", ["port=", "config=", "help"]) +except getopt.GetoptError, err: + print "Error: " + err.msg + usage() + +for (opt, optval) in opts: + if opt == "-p" or opt == "--port": + try: + port = int(optval) + except ValueError: + usage() + elif opt == "-f" or opt == "--config": + config = optval + elif opt == "-h" or opt == "--help": + usage() + +# Start server +PLCAPIServer((addr, port), config).serve_forever() diff --git a/TODO b/TODO new file mode 100644 index 0000000..95ccdec --- /dev/null +++ b/TODO @@ -0,0 +1,31 @@ +* Event logging + * In the current API, every call is logged and certain interesting + events are logged in the events table. I haven't implemented event + logging yet in the new API. + +* Tests + * With Shell.py, it should be easy to write a large set of tests. I've + thought about writing a SQLite DB backend so that MyPLC/PostgreSQL + doesn't have to be setup in order for the tests to be run. But there + are some technical limitations to SQLite. It would probably be best + to run the testsuite against MyPLC for now. + +* Authentication + * Need to implement node and certificate/federation authentication. + * Need to (re)implement "capability" (i.e. trusted host) + authentication. Maybe implement it in the same way as node + authentication. + +* Anonymous functions + * Implement anonymous functions for now for backward compatibility, + but get rid of them as soon as possible + +* Hierarchical layout + * Probably need to organize the functions inside PLC/Methods/ + +* Deletion + * Need to come up with a sane, consistent principal deletion policy. + +* Validation + * Need to come up with a policy (truncation? fault?) for dealing with + variable length strings. diff --git a/apache/ModPython.py b/apache/ModPython.py new file mode 100644 index 0000000..c091894 --- /dev/null +++ b/apache/ModPython.py @@ -0,0 +1,61 @@ +# +# Apache mod_python interface +# +# Aaron Klingaman +# Mark Huang +# +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import sys +import time +import traceback +import xmlrpclib +from mod_python import apache + +from PLC.Logger import logger + +from PLC.API import PLCAPI +api = PLCAPI() + +def handler(req): + try: + if req.method != "POST": + req.content_type = "text/html" + req.send_http_header() + req.write(""" + +PLCAPI XML-RPC/SOAP Interface + +

PLCAPI XML-RPC/SOAP Interface

+

Please use XML-RPC or SOAP to access the PLCAPI.

+ +""") + return apache.OK + + # Read request + request = req.read(int(req.headers_in['content-length'])) + + # mod_python < 3.2: The IP address portion of remote_addr is + # incorrect (always 0.0.0.0) when IPv6 is enabled. + # http://issues.apache.org/jira/browse/MODPYTHON-64?page=all + (remote_ip, remote_port) = req.connection.remote_addr + remote_addr = (req.connection.remote_ip, remote_port) + + # Handle request + response = api.handle(remote_addr, request) + + # Shut down database connection, otherwise up to MaxClients DB + # connections will remain open. + api.db.close() + + # Write response + req.content_type = "text/xml; charset=" + api.encoding + req.send_http_header() + req.write(response) + + return apache.OK + + except Exception as err: + logger.exception("INTERNAL ERROR !!") + return apache.HTTP_INTERNAL_SERVER_ERROR diff --git a/apache/ModPythonJson.py b/apache/ModPythonJson.py new file mode 100644 index 0000000..78cb363 --- /dev/null +++ b/apache/ModPythonJson.py @@ -0,0 +1,60 @@ +# +# Apache mod_python interface for JSON requests +# +# Aaron Klingaman +# Mark Huang +# +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import sys +import traceback +import xmlrpclib +from mod_python import apache + +from PLC.Logger import logger + +from PLC.API import PLCAPI +api = PLCAPI() + +def handler(req): + try: + if req.method != "POST": + req.content_type = "text/html" + req.send_http_header() + req.write(""" + +PLCAPI JSON Interface + +

PLCAPI JSON Interface

+

Please POST JSON to access the PLCAPI.

+ +""") + return apache.OK + + # Read request + request = req.read(int(req.headers_in['content-length'])) + + # mod_python < 3.2: The IP address portion of remote_addr is + # incorrect (always 0.0.0.0) when IPv6 is enabled. + # http://issues.apache.org/jira/browse/MODPYTHON-64?page=all + (remote_ip, remote_port) = req.connection.remote_addr + remote_addr = (req.connection.remote_ip, remote_port) + + # Handle request + response = api.handle_json(remote_addr, request) + + # Shut down database connection, otherwise up to MaxClients DB + # connections will remain open. + api.db.close() + + # Write response + req.content_type = "text/json; charset=" + api.encoding + req.send_http_header() + req.write(response) + + return apache.OK + + except Exception, err: + logger.exception("INTERNAL ERROR !!") + return apache.HTTP_INTERNAL_SERVER_ERROR diff --git a/apache/__init__.py b/apache/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apache/plc.wsgi b/apache/plc.wsgi new file mode 100644 index 0000000..3e0d51c --- /dev/null +++ b/apache/plc.wsgi @@ -0,0 +1,54 @@ +# -*- python -*- +# +# Apache mod_wsgi python interface +# +# Copyright (C) 2004-2006 The Trustees of Princeton University +# + +import sys +sys.path.append('/usr/share/plc_api') +sys.stdout = sys.stderr +import traceback +from PLC.Logger import logger +from PLC.API import PLCAPI + +def application(environ, start_response): + try: + status = '200 OK' + if environ.get('REQUEST_METHOD') != 'POST': + content_type = 'text/html' + output = """ + +PLCAPI WSGI XML-RPC/SOAP Interface + +

PLCAPI WSGI XML-RPC/SOAP Interface

+

Please use XML-RPC or SOAP to access the PLCAPI.

+ +""" + else: + # Thomas Dreibholz + # Note that this function is called within multiple threads! + # "api" MUST be a local variable instead of a global one. + # Otherwise, this causes concurrent accesses to the same + # object within different threads! + api = PLCAPI() + api.environ = environ + content_type = 'text/xml' + ip = environ.get('REMOTE_ADDR') + port = environ.get('REMOTE_PORT') + output = api.handle((ip,port), environ.get('wsgi.input').read()) + # Shut down database connection, otherwise up to MaxClients DB + # connections will remain open. + api.db.close() + except Exception as err: + status = '500 Internal Server Error' + content_type = 'text/html' + output = 'Internal Server Error' + logger.exception("INTERNAL ERROR !!") + + # Write response + response_headers = [('Content-type', '%s' % content_type), + ('Content-Length', str(len(output)))] + start_response(status, response_headers) + return [output] + diff --git a/aspects/__init__.py b/aspects/__init__.py new file mode 100644 index 0000000..41cbe77 --- /dev/null +++ b/aspects/__init__.py @@ -0,0 +1,14 @@ +from pyaspects.weaver import weave_class_method + +from PLC.Method import Method +from aspects.ratelimitaspects import RateLimitAspect + +def apply_ratelimit_aspect(): + weave_class_method(RateLimitAspect(), Method, "__call__") + +def apply_debugger_aspect(): + # just log all method calls w/ their parameters + from pyaspects.debuggeraspect import DebuggerAspect + weave_class_method(DebuggerAspect(out=open("/tmp/all_method_calls.log", "a")), Method, "__call__") + + diff --git a/aspects/ratelimitaspects.py b/aspects/ratelimitaspects.py new file mode 100644 index 0000000..836b2ba --- /dev/null +++ b/aspects/ratelimitaspects.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +#-*- coding: utf-8 -*- +# +# S.Çağlar Onur + +from PLC.Config import Config +from PLC.Faults import PLCPermissionDenied + +from PLC.Nodes import Node, Nodes +from PLC.Persons import Person, Persons +from PLC.Sessions import Session, Sessions + +from datetime import datetime, timedelta + +from pyaspects.meta import MetaAspect + +import memcache + +import os +import sys +import socket + +class BaseRateLimit(object): + + def __init__(self): + self.config = Config("/etc/planetlab/plc_config") + + # FIXME: change with Config values + self.prefix = "ratelimit" + self.minutes = 5 # The time period + self.requests = 50 # Number of allowed requests in that time period + self.expire_after = (self.minutes + 1) * 60 + + self.whitelist = [] + + def log(self, line): + log = open("/var/log/plc_api_ratelimit.log", "a") + date = datetime.now().strftime("%d/%m/%y %H:%M") + log.write("%s - %s\n" % (date, line)) + log.flush() + + def mail(self, to): + sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % self.config.PLC_MAIL_SUPPORT_ADDRESS, "w") + + subject = "[PLCAPI] Maximum allowed number of API calls exceeded" + + header = {'from': "%s Support <%s>" % (self.config.PLC_NAME, self.config.PLC_MAIL_SUPPORT_ADDRESS), + 'to': "%s, %s" % (to, self.config.PLC_MAIL_SUPPORT_ADDRESS), + 'version': sys.version.split(" ")[0], + 'subject': subject} + + body = "Maximum allowed number of API calls exceeded for the user %s within the last %s minutes." % (to, self.minutes) + + # Write headers + sendmail.write( +""" +Content-type: text/plain +From: %(from)s +Reply-To: %(from)s +To: %(to)s +X-Mailer: Python/%(version)s +Subject: %(subject)s + +""".lstrip() % header) + + # Write body + sendmail.write(body) + # Done + sendmail.close() + + def before(self, wobj, data, *args, **kwargs): + # ratelimit_128.112.139.115_201011091532 = 1 + # ratelimit_128.112.139.115_201011091533 = 14 + # ratelimit_128.112.139.115_201011091534 = 11 + # Now, on every request we work out the keys for the past five minutes and use get_multi to retrieve them. + # If the sum of those counters exceeds the maximum allowed for that time period, we block the request. + + api_method_name = wobj.name + api_method_source = wobj.source + + try: + api_method = args[0]["AuthMethod"] + except: + return + + # decode api_method_caller + if api_method == "session": + api_method_caller = Sessions(wobj.api, {'session_id': args[0]["session"]}) + if api_method_caller == []: + return + elif api_method_caller[0]["person_id"] != None: + api_method_caller = Persons(wobj.api, api_method_caller[0]["person_id"])[0]["email"] + elif api_method_caller[0]["node_id"] != None: + api_method_caller = Nodes(wobj.api, api_method_caller[0]["node_id"])[0]["hostname"] + else: + api_method_caller = args[0]["session"] + elif api_method == "password" or api_method == "capability": + api_method_caller = args[0]["Username"] + elif api_method == "gpg": + api_method_caller = args[0]["name"] + elif api_method == "hmac" or api_method == "hmac_dummybox": + api_method_caller = args[0]["node_id"] + elif api_method == "anonymous": + api_method_caller = "anonymous" + else: + api_method_caller = "unknown" + + # excludes + if api_method_source == None or api_method_source[0] == socket.gethostbyname(self.config.PLC_API_HOST) or api_method_source[0] in self.whitelist: + return + + # sanity check + if api_method_caller == None: + self.log("%s called from %s with Username = None?" % (api_method_name, api_method_source[0])) + return + + # normalize unicode string otherwise memcache throws an exception + api_method_caller = str(api_method_caller) + + mc = memcache.Client(["%s:11211" % self.config.PLC_API_HOST]) + now = datetime.now() + + current_key = "%s_%s_%s_%s" % (self.prefix, api_method_caller, api_method_source[0], now.strftime("%Y%m%d%H%M")) + keys_to_check = ["%s_%s_%s_%s" % (self.prefix, api_method_caller, api_method_source[0], (now - timedelta(minutes = minute)).strftime("%Y%m%d%H%M")) for minute in range(self.minutes + 1)] + + try: + value = mc.incr(current_key) + except ValueError: + value = None + + if value == None: + mc.set(current_key, 1, time=self.expire_after) + + results = mc.get_multi(keys_to_check) + total_requests = 0 + for i in results: + total_requests += results[i] + + if total_requests > self.requests: + self.log("%s - %s" % (api_method_source[0], api_method_caller)) + + caller_key = "%s_%s" % (self.prefix, api_method_caller) + if mc.get(caller_key) == None: + mc.set(caller_key, 1, time = self.expire_after) + if (api_method == "session" and api_method_caller.__contains__("@")) or (api_method == "password" or api_method == "capability"): + self.mail(api_method_caller) + + raise PLCPermissionDenied, "Maximum allowed number of API calls exceeded" + + def after(self, wobj, data, *args, **kwargs): + return + +class RateLimitAspect_class(BaseRateLimit): + __metaclass__ = MetaAspect + name = "ratelimitaspect_class" + + def __init__(self): + BaseRateLimit.__init__(self) + + def before(self, wobj, data, *args, **kwargs): + BaseRateLimit.before(self, wobj, data, *args, **kwargs) + + def after(self, wobj, data, *args, **kwargs): + BaseRateLimit.after(self, wobj, data, *args, **kwargs) + +RateLimitAspect = RateLimitAspect_class diff --git a/cache_utils/__init__.py b/cache_utils/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/cache_utils/__init__.py @@ -0,0 +1 @@ + diff --git a/cache_utils/decorators.py b/cache_utils/decorators.py new file mode 100644 index 0000000..4dd1d4d --- /dev/null +++ b/cache_utils/decorators.py @@ -0,0 +1,54 @@ +#coding: utf-8 +from django.core.cache import cache +from django.utils.functional import wraps +from cache_utils.utils import _cache_key, _func_info, _func_type + +def cached(timeout, group=None): + """ Caching decorator. Can be applied to function, method or classmethod. + Supports bulk cache invalidation and invalidation for exact parameter + set. Cache keys are human-readable because they are constructed from + callable's full name and arguments and then sanitized to make + memcached happy. + + It can be used with or without group_backend. Without group_backend + bulk invalidation is not supported. + + Wrapped callable gets `invalidate` methods. Call `invalidate` with + same arguments as function and the result for these arguments will be + invalidated. + """ + + backend_kwargs = {'group': group} if group else {} + + def _cached(func): + + func_type = _func_type(func) + + @wraps(func) + def wrapper(*args, **kwargs): + + # full name is stored as attribute on first call + if not hasattr(wrapper, '_full_name'): + name, _args = _func_info(func, args) + wrapper._full_name = name + + # try to get the value from cache + key = _cache_key(wrapper._full_name, func_type, args, kwargs) + value = cache.get(key, **backend_kwargs) + + # in case of cache miss recalculate the value and put it to the cache + if value is None: + value = func(*args, **kwargs) + cache.set(key, value, timeout, **backend_kwargs) + return value + + def invalidate(*args, **kwargs): + ''' invalidates cache result for function called with passed arguments ''' + if not hasattr(wrapper, '_full_name'): + return + key = _cache_key(wrapper._full_name, 'function', args, kwargs) + cache.delete(key, **backend_kwargs) + + wrapper.invalidate = invalidate + return wrapper + return _cached diff --git a/cache_utils/group_backend.py b/cache_utils/group_backend.py new file mode 100644 index 0000000..8377477 --- /dev/null +++ b/cache_utils/group_backend.py @@ -0,0 +1,110 @@ +""" +Memcached cache backend with group O(1) invalidation ability, dog-pile +effect prevention using MintCache algorythm and project version support to allow +gracefull updates and multiple django projects on same memcached instance. +Long keys (>250) are truncated and appended with md5 hash. +""" + +import uuid +import logging +import sys +import time +from django.core.cache.backends.memcached import CacheClass as MemcachedCacheClass +from django.conf import settings +from cache_utils.utils import sanitize_memcached_key + +# This prefix is appended to the group name to prevent cache key clashes. +_VERSION_PREFIX = getattr(settings, 'VERSION', "") +_KEY_PREFIX = "_group::" + +# MINT_DELAY is an upper bound on how long any value should take to +# be generated (in seconds) +MINT_DELAY = 30 + +class CacheClass(MemcachedCacheClass): + + def add(self, key, value, timeout=0, group=None): + key = self._make_key(group, key) + + refresh_time = timeout + time.time() + real_timeout = timeout + MINT_DELAY + packed_value = (value, refresh_time, False) + + return super(CacheClass, self).add(key, packed_value, real_timeout) + + def get(self, key, default=None, group=None): + key = self._make_key(group, key) + packed_value = super(CacheClass, self).get(key, default) + if packed_value is None: + return default + value, refresh_time, refreshed = packed_value + if (time.time() > refresh_time) and not refreshed: + # Store the stale value while the cache revalidates for another + # MINT_DELAY seconds. + self.set(key, value, timeout=MINT_DELAY, group=group, refreshed=True) + return default + return value + + def set(self, key, value, timeout=0, group=None, refreshed=False): + key = self._make_key(group, key) + refresh_time = timeout + time.time() + real_timeout = timeout + MINT_DELAY + packed_value = (value, refresh_time, refreshed) + return super(CacheClass, self).set(key, packed_value, real_timeout) + + def delete(self, key, group=None): + key = self._make_key(group, key) + return super(CacheClass, self).delete(key) + + def invalidate_group(self, group): + """ Invalidates all cache keys belonging to group """ + key = "%s%s%s" % (_VERSION_PREFIX, _KEY_PREFIX, group) + super(CacheClass, self).delete(key) + + def _make_key(self, group, key, hashkey=None): + """ Generates a new cache key which belongs to a group, has + _VERSION_PREFIX prepended and is shorter than memcached key length + limit. + """ + key = _VERSION_PREFIX + key + if group: + if not hashkey: + hashkey = self._get_hashkey(group) + key = "%s:%s-%s" % (group, key, hashkey) + return sanitize_memcached_key(key) + + def _get_hashkey(self, group): + """ This can be useful sometimes if you're doing a very large number + of operations and you want to avoid all of the extra cache hits. + """ + key = "%s%s%s" % (_VERSION_PREFIX, _KEY_PREFIX, group) + hashkey = super(CacheClass, self).get(key) + if hashkey is None: + hashkey = str(uuid.uuid4()) + super(CacheClass, self).set(key, hashkey) + return hashkey + + def clear(self): + self._cache.flush_all() + +# ====================================== +# I didn't implement methods below to work with MintCache so raise +# NotImplementedError for them. + + def incr(self, key, delta=1, group=None): +# if group: +# key = self._make_key(group, key) +# return super(CacheClass, self).incr(key, delta) + raise NotImplementedError + + def decr(self, key, delta=1, group=None): +# if group: +# key = self._make_key(group, key) +# return super(CacheClass, self).decr(key, delta) + raise NotImplementedError + + def get_many(self, keys, group=None): +# hashkey = self._get_hashkey(group) +# keys = [self._make_key(group, k, hashkey) for k in keys] +# return super(CacheClass, self).get_many(keys) + raise NotImplementedError diff --git a/cache_utils/models.py b/cache_utils/models.py new file mode 100644 index 0000000..da0c0d5 --- /dev/null +++ b/cache_utils/models.py @@ -0,0 +1 @@ +# Hello, testrunner! diff --git a/cache_utils/tests.py b/cache_utils/tests.py new file mode 100644 index 0000000..b55857e --- /dev/null +++ b/cache_utils/tests.py @@ -0,0 +1,150 @@ +#coding: utf-8 + +from unittest import TestCase + +from django.core.cache import cache +from cache_utils.decorators import cached +from cache_utils.utils import sanitize_memcached_key, _func_type, _func_info + +def foo(a,b): + pass + +class Foo(object): + def foo(self, a, b): + pass + @classmethod + def bar(cls, x): + pass + +class FuncTypeTest(TestCase): + def assertFuncType(self, func, tp): + self.assertEqual(_func_type(func), tp) + + def test_func(self): + self.assertFuncType(foo, 'function') + + def test_method(self): + self.assertFuncType(Foo.foo, 'method') + + def test_classmethod(self): + self.assertFuncType(Foo.bar, 'classmethod') + + +class FuncInfoTest(TestCase): + def assertFuncInfo(self, func, args_in, name, args_out): + info = _func_info(func, args_in) + self.assertEqual(info[0], name) + self.assertEqual(info[1], args_out) + + def test_func(self): + self.assertFuncInfo(foo, [1,2], 'cache_utils.tests.foo', [1,2]) + + def test_method(self): + foo_obj = Foo() + self.assertFuncInfo(Foo.foo, [foo_obj, 1, 2], + 'cache_utils.tests.Foo.foo', [1,2]) + + def test_classmethod(self): + self.assertFuncInfo(Foo.bar, [Foo, 1], + 'cache_utils.tests.Foo.bar', [1]) + + +class SanitizeTest(TestCase): + def test_sanitize_keys(self): + key = u"12345678901234567890123456789012345678901234567890" + self.assertTrue(len(key) >= 40) + key = sanitize_memcached_key(key, 40) + self.assertTrue(len(key) <= 40) + + +class ClearMemcachedTest(TestCase): + def tearDown(self): + cache._cache.flush_all() + + def setUp(self): + cache._cache.flush_all() + + +class InvalidationTest(ClearMemcachedTest): + + def test_group_invalidation(self): + cache.set('vasia', 'foo', 60, group='names') + cache.set('petya', 'bar', 60, group='names') + cache.set('red', 'good', 60, group='colors') + + self.assertEqual(cache.get('vasia', group='names'), 'foo') + self.assertEqual(cache.get('petya', group='names'), 'bar') + self.assertEqual(cache.get('red', group='colors'), 'good') + + cache.invalidate_group('names') + self.assertEqual(cache.get('petya', group='names'), None) + self.assertEqual(cache.get('vasia', group='names'), None) + self.assertEqual(cache.get('red', group='colors'), 'good') + + cache.set('vasia', 'foo', 60, group='names') + self.assertEqual(cache.get('vasia', group='names'), 'foo') + + def test_func_invalidation(self): + self.call_count = 0 + + @cached(60) + def my_func(a, b): + self.call_count += 1 + return self.call_count + + self.assertEqual(my_func(1,2), 1) + self.assertEqual(my_func(1,2), 1) + self.assertEqual(my_func(3,2), 2) + self.assertEqual(my_func(3,2), 2) + my_func.invalidate(3,2) + self.assertEqual(my_func(1,2), 1) + self.assertEqual(my_func(3,2), 3) + self.assertEqual(my_func(3,2), 3) + + def test_method_invalidation(self): + self.call_count = 0 + this = self + + class Foo(object): + @cached(60) + def bar(self, x): + this.call_count += 1 + return this.call_count + + foo = Foo() + self.assertEqual(foo.bar(1), 1) + self.assertEqual(foo.bar(1), 1) + Foo.bar.invalidate(1) + self.assertEqual(foo.bar(1), 2) + + def test_invalidate_nonexisting(self): + @cached(60) + def foo(x): + return 1 + foo.invalidate(5) # this shouldn't raise exception + + +class DecoratorTest(ClearMemcachedTest): + + def test_decorator(self): + self._x = 0 + + @cached(60, group='test-group') + def my_func(params=""): + self._x = self._x + 1 + return u"%d%s" % (self._x, params) + + self.assertEqual(my_func(), "1") + self.assertEqual(my_func(), "1") + + self.assertEqual(my_func("x"), u"2x") + self.assertEqual(my_func("x"), u"2x") + + self.assertEqual(my_func(u"Василий"), u"3Василий") + self.assertEqual(my_func(u"Василий"), u"3Василий") + + self.assertEqual(my_func(u"й"*240), u"4"+u"й"*240) + self.assertEqual(my_func(u"й"*240), u"4"+u"й"*240) + + self.assertEqual(my_func(u"Ы"*500), u"5"+u"Ы"*500) + self.assertEqual(my_func(u"Ы"*500), u"5"+u"Ы"*500) diff --git a/cache_utils/utils.py b/cache_utils/utils.py new file mode 100644 index 0000000..ed7474d --- /dev/null +++ b/cache_utils/utils.py @@ -0,0 +1,60 @@ +from hashlib import md5 + +CONTROL_CHARACTERS = set([chr(i) for i in range(0,33)]) +CONTROL_CHARACTERS.add(chr(127)) + +def sanitize_memcached_key(key, max_length=250): + """ Removes control characters and ensures that key will + not hit the memcached key length limit by replacing + the key tail with md5 hash if key is too long. + """ + key = ''.join([c for c in key if c not in CONTROL_CHARACTERS]) + if len(key) > max_length: + hash = md5(key).hexdigest() + key = key[:max_length-33]+'-'+hash + return key + +def _args_to_unicode(args, kwargs): + key = "" + if args: + key += unicode(args) + if kwargs: + key += unicode(kwargs) + return key + + +def _func_type(func): + """ returns if callable is a function, method or a classmethod """ + argnames = func.func_code.co_varnames[:func.func_code.co_argcount] + if len(argnames) > 0: + if argnames[0] == 'self': + return 'method' + if argnames[0] == 'cls': + return 'classmethod' + return 'function' + + +def _func_info(func, args): + ''' introspect function's or method's full name. + Returns a tuple (name, normalized_args,) with + 'cls' and 'self' removed from normalized_args ''' + + func_type = _func_type(func) + + if func_type == 'function': + return ".".join([func.__module__, func.__name__]), args + + class_name = args[0].__class__.__name__ + if func_type == 'classmethod': + class_name = args[0].__name__ + + return ".".join([func.__module__, class_name, func.__name__]), args[1:] + + +def _cache_key(func_name, func_type, args, kwargs): + """ Construct readable cache key """ + if func_type == 'function': + args_string = _args_to_unicode(args, kwargs) + else: + args_string = _args_to_unicode(args[1:], kwargs) + return sanitize_memcached_key('[cached]%s(%s)' % (func_name, args_string,)) diff --git a/db-config.d/000-functions b/db-config.d/000-functions new file mode 100644 index 0000000..5779b0e --- /dev/null +++ b/db-config.d/000-functions @@ -0,0 +1,177 @@ +# -*-python-*- +#################### +import sys, os + +g_url = "" +def GetMyPLCURL(): return g_url +def SetMyPLCURL(url): + global g_url + g_url = url + +# Get all currently registered roles +g_role_names = [ role['name'] for role in GetRoles()] +g_role_names.sort() + +def SetRole(level, role): + global g_role_names + if role not in g_role_names: + AddRole(level, role) + g_role_names.append(role) + g_role_names.sort() + +# Get list of existing tag types +g_known_tag_types = [tag_type['tagname'] for tag_type in GetTagTypes()] +g_known_tag_types.sort() + +def AllPersonRoles (): return [ 'pi','user','tech' ] + +def SetTagType(tag_type): + try: + tagname=tag_type['tagname'] + global g_known_tag_types + # handle 'roles' field differently + if 'roles' in tag_type: + roles=tag_type['roles'] + del tag_type['roles'] + else: + roles=['admin'] + # just in case + if 'min_role_id' in tag_type: + print "WARNING: ignoring deprecated field min_role_id for tagtype %s"%tagname + del tag_type['min_role_id'] + # Create/update default slice tag types + if tagname not in g_known_tag_types: + AddTagType(tag_type) + g_known_tag_types.append(tagname) + g_known_tag_types.sort() + else: + UpdateTagType(tagname, tag_type) + # enforce provided roles if present + old_roles=GetTagTypes(tagname)[0]['roles'] + for minus_role in set(old_roles).difference(set(roles)): + DeleteRoleFromTagType(minus_role,tagname) + for plus_role in set(roles).difference(set(old_roles)): + AddRoleToTagType(plus_role,tagname) + except: + # something went wrong for that tagname, + # but don't want to break the whole startup sequence + print "Could not enforce tagtype %s --- beg"%tagname + import traceback + traceback.print_exc() + print "Could not enforce tagtype %s --- end"%tagname + +# Get list of existing (enabled, global) files +g_conf_files = GetConfFiles() +g_conf_files = filter(lambda conf_file: conf_file['enabled'] and \ + not conf_file['node_ids'] and \ + not conf_file['nodegroup_ids'], + g_conf_files) +g_dests = [conf_file['dest'] for conf_file in g_conf_files] +g_conf_files = dict(zip(g_dests, g_conf_files)) + +# Get list of existing initscripts +g_oldinitscripts = GetInitScripts() +g_oldinitscript_names = [script['name'] for script in g_oldinitscripts] +g_oldinitscripts = dict(zip(g_oldinitscript_names, g_oldinitscripts)) + +def SetInitScript(initscript): + global g_oldinitscripts, g_oldinitscript_names + if initscript['name'] not in g_oldinitscript_names: + initscript_id = AddInitScript(initscript) + g_oldinitscript_names.append(initscript['name']) + initscript['initscript_id']=initscript_id + g_oldinitscripts[initscript['name']]=initscript + else: + orig_initscript = g_oldinitscripts[initscript['name']] + initscript_id = orig_initscript['initscript_id'] + UpdateInitScript(initscript_id, initscript) + +def SetConfFile(conf_file): + global g_conf_files, g_dests + if conf_file['dest'] not in g_dests: + AddConfFile(conf_file) + else: + orig_conf_file = g_conf_files[conf_file['dest']] + conf_file_id = orig_conf_file['conf_file_id'] + UpdateConfFile(conf_file_id, conf_file) + +def SetSlice(slice, tags): + try: + # Create or Update slice + slice_name = slice['name'] + slices = GetSlices([slice_name]) + if len(slices)==1: + slice_id = slices[0]['slice_id'] + if slice.has_key('name'): + del slice['name'] + UpdateSlice(slice_id, slice) + slice['name']=slice_name + else: + expires = None + if slice.has_key('expires'): + expires = slice['expires'] + del slice['expires'] + slice_id = AddSlice(slice) + if expires <> None: + UpdateSlice(slice_id, {'expires':expires}) + + # Get slice structure with all fields + slice = GetSlices([slice_name])[0] + + # Create/delete all tags + # NOTE: update is not needed, since unspecified tags are deleted, + # and new tags are added + slice_tags = [] + if slice['slice_tag_ids']: + # Delete unknown attributes + for slice_tag in GetSliceTags(slice['slice_tag_ids']): + # ignore sliver tags, as those are custom/run-time values + if slice_tag['node_id'] <> None: continue + if (slice_tag['tagname'], slice_tag['value']) not in tags: + DeleteSliceTag(slice_tag['slice_tag_id']) + else: + slice_tags.append((slice_tag['tagname'],slice_tag['value'])) + + # only add slice tags that are new + for (name, value) in tags: + if (name,value) not in slice_tags: + AddSliceTag(slice_name, name, value) + else: + # NOTE: this confirms that the user-specified tag is + # returned by GetSliceTags + pass + except: + # something went wrong for that tagname, + print "Could not create init slice %s --- beg"%slice['name'] + import traceback + traceback.print_exc() + print "Could not create init slice %s --- end"%slice['name'] + +def SetMessage(message): + messages = GetMessages([message['message_id']]) + if len(messages)==0: + AddMessage(message) + ### Thierry 2012-03 + # let people customize their messages if they want to + #else: + # UpdateMessage(message['message_id'],message) + +# Get all model names +g_pcu_models = [type['model'] for type in GetPCUTypes()] + +def SetPCUType(pcu_type): + global g_pcu_models + if 'pcu_protocol_types' in pcu_type: + protocol_types = pcu_type['pcu_protocol_types'] + # Take this value out of the struct. + del pcu_type['pcu_protocol_types'] + else: + protocol_types = [] + + if pcu_type['model'] not in g_pcu_models: + # Add the name/model info into DB + id = AddPCUType(pcu_type) + # for each protocol, also add this. + for ptype in protocol_types: + AddPCUProtocolType(id, ptype) + diff --git a/db-config.d/001-admin_user b/db-config.d/001-admin_user new file mode 100644 index 0000000..74a9723 --- /dev/null +++ b/db-config.d/001-admin_user @@ -0,0 +1,23 @@ +# -*-python-*- +#################### +# Create/update the default administrator account (should be person_id 2). + +the_admin_id=2 +admin = { 'first_name': "Default", + 'last_name': "Administrator", + 'email': plc['root_user'], + 'password': plc['root_password'] } +persons = GetPersons(the_admin_id) +if not persons: + # AddPerson won't let you pass a person_id + person_id = AddPerson(admin) + if person_id != the_admin_id: + # Huh? Someone deleted the account manually from the database. + DeletePerson(person_id) + raise Exception, "Someone deleted the \"%s %s\" account from the database!" % \ + (admin['first_name'], admin['last_name']) + UpdatePerson(person_id, { 'enabled': True }) +else: + person_id = persons[0]['person_id'] + UpdatePerson(person_id, admin) + diff --git a/db-config.d/002-system_site b/db-config.d/002-system_site new file mode 100644 index 0000000..4a60581 --- /dev/null +++ b/db-config.d/002-system_site @@ -0,0 +1,67 @@ +# -*-python-*- +#################### +# Create/update and populate the default site (should be site_id 1) + +### plc_www holds the contents of the PLC_WWW configuration category +if plc_www['port'] == '80': + url = "http://" + plc_www['host'] + "/" +elif plc_www['port'] == '443': + url = "https://" + plc_www['host'] + "/" +else: + url = "http://" + plc_www['host'] + ":" + plc_www['port'] + "/" + +SetMyPLCURL(url) + +site = { 'site_id': 1, + 'name': plc['name'] + " Central", + 'abbreviated_name': plc['name'], + 'login_base': plc['slice_prefix'], + 'is_public': False, + 'url': url, + 'max_slices': 100 } + +sites = GetSites([site['site_id']]) +if not sites: + site_id = AddSite(site['name'], site['abbreviated_name'], site['login_base'], site) + if site_id != site['site_id']: + DeleteSite(site_id) + raise Exception, "Someone deleted the \"%s\" site from the database!" % \ + site['name'] + sites = [site] + +# Must call UpdateSite() even after AddSite() to update max_slices +site_id = sites[0]['site_id'] +UpdateSite(site_id, site) + +# The default administrator account must be associated with a site +# in order to login - see 001-admin_user +AddPersonToSite(the_admin_id, site['site_id']) +SetPersonPrimarySite(the_admin_id, site['site_id']) + +# Grant admin and PI roles to the default administrator account +AddRoleToPerson(10, the_admin_id) +AddRoleToPerson(20, the_admin_id) + +# Associate root ssh key with the default administrator +keyfile=plc['root_ssh_key_pub'] +person = GetPersons(the_admin_id)[0] +keys = GetKeys(person['key_ids']) +if os.path.exists(keyfile): + sshkeyfp = file(keyfile,"r") + sshkey = sshkeyfp.read() + sshkeyfp.close() + + found=False + for key in keys: + if key['key_type']=='ssh': + if key['key'] == sshkey: + found=True + else: + # should we delete other keys? + pass + if not found: + key_id = AddPersonKey(the_admin_id,{'key_type':'ssh','key':sshkey}) +else: + if len(keys)==0: + print "WARNING: default administrator does not have an ssh key" + print "and the default ssh root pub key (%s) file does not exist." % keyfile diff --git a/db-config.d/003-accessors b/db-config.d/003-accessors new file mode 100644 index 0000000..6b43079 --- /dev/null +++ b/db-config.d/003-accessors @@ -0,0 +1,4 @@ +# -*-python-*- +from PLC.Accessor import AccessorSingleton + +AccessorSingleton(api).run_all_tag_locators() diff --git a/db-config.d/010-slice_tags b/db-config.d/010-slice_tags new file mode 100644 index 0000000..9a4184f --- /dev/null +++ b/db-config.d/010-slice_tags @@ -0,0 +1,157 @@ +# -*-python-*- +#################### slice tag types +# xxx this should move to PLC/Accessors + +# vref is now defined in an accessor +# initscript is now defined in an accessor + +# Setup default slice tag types +slicetag_types = \ +[ + +### this applies on Node, not on Slice +### # Slice type (only vserver is supported) +### {'tagname': "type", +### 'description': "Type of slice (e.g. vserver)", +### 'category' : 'slice/general', +### 'roles': ['admin','pi']}, + + # System slice + {'tagname': "system", + 'description': "Is a default system slice (1) or not (0 or unset)", + 'category' : 'slice/general'}, + + # Slice enabled (1) or suspended (0) + {'tagname': "enabled", + 'description': "Slice enabled (1 or unset) or suspended (0)", + 'category' : 'slice/general'}, + + # IP Addresses for a Slice + {'tagname': "ip_addresses", + 'description': "Add an ip address to a slice/sliver.", + 'category' : 'slice/rspec'}, + {'tagname': "isolate_loopback", + 'description': "Create an isolated loopback interface within the vserver rather than sharing with all vservers.", + 'category' : 'slice/rspec'}, + + # CPU share + {'tagname': "cpu_pct", + 'description': "Reserved CPU percent", + 'category' : 'slice/rspec'}, + {'tagname': "cpu_share", + 'description': "Number of CPU shares", + 'category' : 'slice/rspec'}, + {'tagname': "cpu_cores", + 'description': "Number of CPU cores", + 'category': 'slice/rspec'}, + {'tagname': "cpu_freezable", + 'description': "Slice processes should be frozen if cpu_cores is 0", + 'category': 'slice/rspec'}, + + # Bandwidth limits + {'tagname': "net_min_rate", + 'description': "Minimum bandwidth (kbps)", + 'category' : 'slice/rspec'}, + {'tagname': "net_max_rate", + 'description': "Maximum bandwidth (kbps)", + 'category' : 'slice/rspec'}, + {'tagname': "net_i2_min_rate", + 'description': "Minimum bandwidth over I2 routes (kbps)", + 'category' : 'slice/rspec'}, + {'tagname': "net_i2_max_rate", + 'description': "Maximum bandwidth over I2 routes (kbps)", + 'category' : 'slice/rspec'}, + {'tagname': "net_max_kbyte", + 'description': "Maximum daily network Tx KByte limit.", + 'category' : 'slice/rspec'}, + {'tagname': "net_thresh_kbyte", + 'description': "KByte limit before warning and throttling.", + 'category' : 'slice/rspec'}, + {'tagname': "net_i2_max_kbyte", + 'description': "Maximum daily network Tx KByte limit to I2 hosts.", + 'category' : 'slice/rspec'}, + {'tagname': "net_i2_thresh_kbyte", + 'description': "KByte limit to I2 hosts before warning and throttling.", + 'category' : 'slice/rspec'}, + {'tagname': "net_share", + 'description': "Number of bandwidth shares", + 'category' : 'slice/rspec'}, + {'tagname': "net_i2_share", + 'description': "Number of bandwidth shares over I2 routes", + 'category' : 'slice/rspec'}, + + # Disk quota + {'tagname': "disk_max", + 'description': "Disk quota (1k disk blocks)", + 'category' : 'slice/rspec'}, + + # deprecated in nov. 2010 + # Proper operations + #{'tagname': "proper_op", + # 'description': "Proper operation (e.g. bind_socket)", + # 'category' : 'slice/rspec'}, + + # VServer capabilities + {'tagname': "capabilities", + 'description': "VServer bcapabilities (separate by commas)", + 'category' : 'slice/rspec'}, + + # Vsys + # need to allow this one so that slice can have that set from PLC_VSYS_DEFAULT + {'tagname': "vsys", + 'description': "Bind vsys script fd's to a slice's /vsys directory.", + 'category' : 'slice/rspec', + 'roles': AllPersonRoles()}, + {'tagname': "vsys_vnet", + 'description': """Specify the IP range that can be used in a given slice +for virtual devices involved in topologies, e.g. 192.168.100.0/24""", + 'category': 'slice/rspec'}, + + # CoDemux + {'tagname': "codemux", + 'description': "Demux HTTP between slices using localhost ports. Value in the form 'host, localhost port'.", + 'category' : 'slice/rspec'}, + + # Delegation + {'tagname': "delegations", + 'description': "Coma seperated list of slices to give delegation authority to.", + 'category' : 'slice/rspec', + 'roles' : ['admin','pi','user']}, + + # Capability to give a sliver access to unused raw disk + {'tagname': "rawdisk", + 'description': "map unused raw disk devices into the slice", + 'category' : 'slice/access', # we should get rid of this category thing + 'roles': ['admin','pi']}, + + { 'tagname' : 'exempt_slice_until', + 'description' : 'Exclude this slice from MyOps until given date (YYYYMMDD)', + 'category' : 'slice/myops'}, + + # DistributedRateLimiting slice + {'tagname': "drl", + 'description': "Is a default Distributed Rate Limiting slice (1) or not (0 or unset)", + 'category' : 'slice/general'}, + + {'tagname' : 'interface', + 'description' : 'The interface tag holds network configuration information until VirtualInterface objects are in PLCAPI', + 'category' : 'slice/network'}, + +] + +import resource +# add in the platform supported rlimits to the default_attribute_types +for entry in resource.__dict__.keys() + ["VLIMIT_OPENFD"]: + if entry.find("LIMIT_")==1: + rlim = entry[len("RLIMIT_"):] + rlim = rlim.lower() + for ty in ("min","soft","hard"): + attribute = { + 'tagname': "%s_%s"%(rlim,ty), + 'description': "Per sliver RLIMIT %s_%s."%(rlim,ty), + 'category': 'slice/limit', + } + slicetag_types.append(attribute) + +for slicetag_type in slicetag_types: + SetTagType(slicetag_type) diff --git a/db-config.d/020-boot_states b/db-config.d/020-boot_states new file mode 100644 index 0000000..0f7c768 --- /dev/null +++ b/db-config.d/020-boot_states @@ -0,0 +1,22 @@ +# -*-python-*- +#################### slice tag types +default_boot_states = [ + 'boot', + 'failboot', + 'safeboot', + 'install', + 'reinstall', + 'upgrade', + 'disabled', +] +current_boot_states = GetBootStates() +for state in default_boot_states: + if state not in current_boot_states: + AddBootState(state) + +# TODO: Delete old boot states. +if False:# NOTE: Only set to true if all federating peers have the new default boot states above. + for state in current_boot_states: + if state not in default_boot_states: + DeleteBootState(state) + diff --git a/db-config.d/030-interface_tags b/db-config.d/030-interface_tags new file mode 100644 index 0000000..6d3d46b --- /dev/null +++ b/db-config.d/030-interface_tags @@ -0,0 +1,26 @@ +# -*-python-*- +#################### interface tag types +# xxx this should move to PLC/Accessors + +interfacetag_types = \ +[ + {'category': u'interface/ovs', + 'description': u'Name of Open vSwitch bridge', + 'tagname': u'ovs_bridge'}, + + # Used by M-lab for IPv6 addresses + {'category': u'interface/config', + 'description': u'IPv6 gateway', + 'tagname': u'ipv6_defaultgw'}, + {'category': u'interface/config', + 'description': u'IPv6 address for the interface', + 'tagname': u'ipv6addr'}, + {'category': u'interface/config', + 'description': u'IPv6 slice addresses', + 'tagname': u'ipv6addr_secondaries'}, +] + +for interfacetag_type in interfacetag_types: + SetTagType(interfacetag_type) + AddRoleToTagType('admin', interfacetag_type['tagname']) + AddRoleToTagType('tech', interfacetag_type['tagname']) diff --git a/db-config.d/050-pcu_types b/db-config.d/050-pcu_types new file mode 100644 index 0000000..8db5bb3 --- /dev/null +++ b/db-config.d/050-pcu_types @@ -0,0 +1,62 @@ +# -*-python-*- +#################### PCUs +### Setup Initial PCU information + +pcu_types = [ + {'model': 'HPiLO', + 'name': 'HP iLO v1 or v2 (Integrated Lights-Out)', }, + + {'model': 'IntelAMT', + 'name': 'Intel AMT v2.5 or v3.0 (Active Management Technology)', }, + + {'model': 'DRAC', + 'name': 'DRAC - Dell Remote Access Control (not Modular Chassis (MC))', }, + + {'model': 'OpenIPMI', + 'name': 'OpenIPMI - Intelligent Platform Management Interface', }, + + {'model': 'APCControl12p3', + 'name': 'APC AP79xx or Masterswitch (sequence 1-2-port-3)', }, + {'model': 'APCControl1p4', + 'name': 'APC AP79xx or Masterswitch (sequence 1-port-4)', }, + {'model': 'APCControl121p3', + 'name': 'APC AP79xx or Masterswitch (sequence 1-2-1-port-3)', }, + {'model': 'APCControl121p1', + 'name': 'APC AP79xx or Masterswitch (sequence 1-2-1-port-1)', }, + {'model': 'APCControl13p13', + 'name': 'APC AP79xx or Masterswitch (sequence 1-3-port-1-3)', }, + + {'model': 'BayTechRPC3NC', + 'name': 'BayTech with prompt RPC3-NC>', }, + {'model': 'BayTechRPC16', + 'name': 'BayTech with prompt RPC-16>', }, + {'model': 'BayTech', + 'name': 'BayTech with prompt DS-RPC>', }, + {'model': 'BayTechCtrlC', + 'name': 'BayTech Ctrl-C, 5, then with prompt DS-RPC>', }, + {'model': 'BayTechCtrlCUnibe', + 'name': 'BayTech Ctrl-C, 3, then with prompt DS-RPC>', }, + + {'model': 'BlackBoxPSMaverick', + 'name': 'BlackBoxPSMaverick Web based controller'}, + + {'model': 'IPAL', + 'name': 'IPAL - Dataprobe IP-41x & IP-81x', }, + {'model': 'ePowerSwitchNew', + 'name': 'ePowerSwitch Newer Models 1/4/8x', }, + {'model': 'ePowerSwitchOld', + 'name': 'ePowerSwitch Older Models 1/4/8x', }, + + {'model': 'PM211MIP', + 'name': 'Infratec PM221-MIP', }, + + {'model': 'WTIIPS4', + 'name': 'Western Telematic (WTI IPS-4)', }, + + {'model': 'ManualPCU', + 'name': 'Manual Administrator Operation (choose if model unknown)', }, + ] + +for pcu_type in pcu_types: + SetPCUType(pcu_type) + diff --git a/db-config.d/060-messages b/db-config.d/060-messages new file mode 100644 index 0000000..bee865e --- /dev/null +++ b/db-config.d/060-messages @@ -0,0 +1,291 @@ +# -*-python-*- +#################### body for messages + +installfailed = """Once the node meets these requirements, please reinitiate the install +by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +Update the BootState to 'Reinstall', then reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we may investigate the problem. +""" + +# Load default message templates +message_templates = [ + {'message_id': 'Verify account', + 'subject': "Verify account registration", + 'template': """ +Please verify that you registered for a %(PLC_NAME)s account with the +username %(email)s by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/register.php?id=%(person_id)d&key=%(verification_key)s + +You must wait for this account to be approved before you can begin using it, please be patient. + +If you did not register for a %(PLC_NAME)s account, please ignore this +message, or contact %(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. +""" + }, + + {'message_id': 'New PI account', + 'subject': "New PI account registration from %(first_name)s %(last_name)s <%(email)s> at %(site_name)s", + 'template': """ +%(first_name)s %(last_name)s <%(email)s> has signed up for a new +%(PLC_NAME)s account at %(site_name)s and has requested a PI role. PIs +are responsible for enabling user accounts, creating slices, and +ensuring that all users abide by the %(PLC_NAME)s Acceptable Use +Policy. + +Only %(PLC_NAME)s administrators may enable new PI accounts. If you +are a PI at %(site_name)s, please respond and indicate whether this +registration is acceptable. + +To view the request, visit: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d +""" + }, + + {'message_id': 'New account', + 'subject': "New account registration from %(first_name)s %(last_name)s <%(email)s> at %(site_name)s", + 'template': """ +%(first_name)s %(last_name)s <%(email)s> has signed up for a new +%(PLC_NAME)s account at %(site_name)s and has requested the following +roles: %(roles)s. + +To deny the request or enable the account, visit: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d +""" + }, + + {'message_id': 'Password reset requested', + 'subject': "Password reset requested", + 'template': """ +Someone has requested that the password of your %(PLC_NAME)s account +%(email)s be reset. If this person was you, you may continue with the +reset by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/reset_password.php?id=%(person_id)d&key=%(verification_key)s + +If you did not request that your password be reset, please contact +%(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. Do not quote or +otherwise include any of this text in any correspondence. +""" + }, + + {'message_id': 'Password reset', + 'subject': "Password reset", + 'template': """ +The password of your %(PLC_NAME)s account %(email)s has been +temporarily reset to: + +%(password)s + +Please change it at as soon as possible by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d + +If you did not request that your password be reset, please contact +%(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. Do not quote or +otherwise include any of this text in any correspondence. +""" + }, + + # Boot Manager messages + {'message_id': "installfinished", + 'subject': "%(hostname)s completed installation", + 'template': """ +%(hostname)s just completed installation. + +The node should be usable in a couple of minutes if installation was +successful. +""" + }, + + {'message_id': "insufficientdisk", + 'subject': "%(hostname)s does not have sufficient disk space", + 'template': """ +%(hostname)s failed to boot because it does not have sufficent disk +space, or because its disk controller was not recognized. + +Please replace the current disk or disk controller or install +additional disks to meet the current hardware requirements. +""" + installfailed + }, + + {'message_id': "insufficientmemory", + 'subject': "%(hostname)s does not have sufficient memory", + 'template': """ +%(hostname)s failed to boot because it does not have sufficent +memory. + +Please install additional memory to meet the current hardware +requirements. +""" + installfailed + }, + + {'message_id': "authfail", + 'subject': "%(hostname)s failed to authenticate", + 'template': +""" +%(hostname)s failed to authenticate for the following reason: + +%(fault)s + +The most common reason for authentication failure is that the +authentication key stored in the node configuration file, does not +match the key on record. + +There are two possible steps to resolve the problem. + +1. If you have used an All-in-one BootCD that includes the plnode.txt file, + then please check your machine for any old boot media, either in the + floppy drive, or on a USB stick. It is likely that an old configuration + is being used instead of the new configuration stored on the BootCD. +Or, +2. If you are using Generic BootCD image, then regenerate the node + configuration file by visiting: + + https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + + Under 'Download', follow the 'Download plnode.txt file for %(hostname)s' + option, and save the downloaded file as plnode.txt on either a floppy + disk or a USB flash drive. Be sure the 'Boot State' is set to 'Boot', + and, then reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we can help investigate the problem. +""" + }, + + {'message_id': "notinstalled", + 'subject': "%(hostname)s is not installed", + 'template': +""" +%(hostname)s failed to boot because it has either never been +installed, or the installation is corrupt. + +Please check if the hard drive has failed, and replace it if so. After +doing so, visit: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +Change the 'Boot State' to 'Reinstall', and then reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we may investigate the problem. +""" + }, + + {'message_id': "missingkernel", + 'subject': "%(hostname)s is missing its production kernel", + 'template': +""" +%(hostname)s failed to boot because the filesystem is missing its production +kernel. + +No action is needed from you at this time; this message is merely +informational. + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +We will investigate the problem shortly. +""" + }, + + {'message_id': "filesystemcorrupted", + 'subject': "%(hostname)s may have corrupt filesystem", + 'template': +""" +%(hostname)s failed to boot because the filesystem appears to be corrupted. + +No action is needed from you at this time; this message is merely +informational. + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +We will investigate the problem shortly. +""" + }, + + {'message_id': "mountfailed", + 'subject': "%(hostname)s could not mount filesystem", + 'template': +""" +%(hostname)s failed to boot because the boot scripts could not mount the +filesystem. + +This could be for a number of reasons. No action is needed from you at this +time; this message is merely informational. + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +We will investigate the problem shortly. +""" + }, + + {'message_id': "hostnamenotresolve", + 'subject': "%(hostname)s does not resolve", + 'template': +""" +%(hostname)s failed to boot because its hostname does not resolve, or +does resolve but does not match its configured IP address. + +Please check the network settings for the node, especially its +hostname, IP address, and DNS servers, by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +Correct any errors, and change the 'Boot State' to 'Reinstall', and then +reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we may investigate the problem. +""" + }, + + # XXX N.B. I don't think these are necessary, since there's no + # way that the Boot Manager would even be able to contact the + # API to send these messages. + + {'message_id': "noconfig", + 'subject': "%(hostname)s does not have a configuration file", + 'template': """ +%(hostname)s failed to boot because it could not find a PlanetLab +configuration file. To create this file, visit: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +Click the Configuration File link, and save the downloaded file as +plnode.txt on either a floppy disk or a USB flash drive. Change the +'Boot State' to 'Reinstall', and then reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we may investigate the problem. +""" + }, + + {'message_id': "nodetectednetwork", + 'subject': "%(hostname)s has unsupported network hardware", + 'template': +""" + +%(hostname)s failed to boot because it has network hardware that is +unsupported by the current production kernel. If it has booted +successfully in the past, please try re-installing it by visiting: + +https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d + +Change the 'Boot State' to 'Reinstall', and then reboot the node. + +If you have already performed this step and are still receiving this +message, please reply so that we may investigate the problem. +""" + }, +] + +for message in message_templates: + SetMessage(message) diff --git a/db-config.d/099-hrns b/db-config.d/099-hrns new file mode 100644 index 0000000..879c9d0 --- /dev/null +++ b/db-config.d/099-hrns @@ -0,0 +1,6 @@ +# -*-python-*- +#################### +# quick and dirty, make sure all hrns are set on local nodes +# could/should get trashed somedy + +for node in GetNodes({'peer_id':None}): UpdateNode(node['node_id'],{'hostname':node['hostname']}) diff --git a/doc/DocBook.py b/doc/DocBook.py new file mode 100755 index 0000000..90b384d --- /dev/null +++ b/doc/DocBook.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# +# Generates a DocBook section documenting all PLCAPI methods on +# stdout. +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +import xml.dom.minidom +from xml.dom.minidom import Element, Text +import codecs + +from PLC.Parameter import Parameter, Mixed, xmlrpc_type, python_type + +# xml.dom.minidom.Text.writexml adds surrounding whitespace to textual +# data when pretty-printing. Override this behavior. +class TrimText(Text): + """text""" + def __init__(self, text = None): + self.data = unicode(text) + + def writexml(self, writer, indent="", addindent="", newl=""): + Text.writexml(self, writer, "", "", "") + +class TrimTextElement(Element): + """text""" + def __init__(self, tagName, text = None): + Element.__init__(self, tagName) + if text is not None: + self.appendChild(TrimText(text)) + + def writexml(self, writer, indent="", addindent="", newl=""): + writer.write(indent) + Element.writexml(self, writer, "", "", "") + writer.write(newl) + +class simpleElement(TrimTextElement): pass + +class paraElement(simpleElement): + """text""" + def __init__(self, text = None): + simpleElement.__init__(self, 'para', text) + +class blockquoteElement(Element): + """
text......text
""" + def __init__(self, text = None): + Element.__init__(self, 'blockquote') + if text is not None: + # Split on blank lines + lines = [line.strip() for line in text.strip().split("\n")] + lines = "\n".join(lines) + paragraphs = lines.split("\n\n") + + for paragraph in paragraphs: + self.appendChild(paraElement(paragraph)) + +def param_type(param): + """Return the XML-RPC type of a parameter.""" + if isinstance(param, Mixed) and len(param): + subtypes = [param_type(subparam) for subparam in param] + return " or ".join(subtypes) + elif isinstance(param, (list, tuple, set)) and len(param): + return "array of " + " or ".join([param_type(subparam) for subparam in param]) + else: + return xmlrpc_type(python_type(param)) + +class paramElement(Element): + """An optionally named parameter.""" + def __init__(self, name, param): + # + Element.__init__(self, 'listitem') + + description = Element('para') + + if name: + description.appendChild(simpleElement('parameter', name)) + description.appendChild(TrimText(": ")) + + description.appendChild(TrimText(param_type(param))) + + if isinstance(param, (list, tuple, set)) and len(param) == 1: + param = param[0] + + if isinstance(param, Parameter): + description.appendChild(TrimText(", " + param.doc)) + param = param.type + + self.appendChild(description) + + if isinstance(param, dict): + itemizedlist = Element('itemizedlist') + self.appendChild(itemizedlist) + for name, subparam in param.iteritems(): + itemizedlist.appendChild(paramElement(name, subparam)) + + elif isinstance(param, (list, tuple, set)) and len(param): + itemizedlist = Element('itemizedlist') + self.appendChild(itemizedlist) + for subparam in param: + itemizedlist.appendChild(paramElement(None, subparam)) + +class DocBook: + + def __init__ (self,functions_list): + self.functions_list = functions_list + + def Process (self): + + for func in self.functions_list: + method = func.name + + if func.status == "deprecated": + continue + + (min_args, max_args, defaults) = func.args() + + section = Element('section') + section.setAttribute('id', func.name) + section.appendChild(simpleElement('title', func.name)) + + prototype = "%s (%s)" % (method, ", ".join(max_args)) + para = paraElement('Prototype:') + para.appendChild(blockquoteElement(prototype)) + section.appendChild(para) + + para = paraElement('Description:') + para.appendChild(blockquoteElement(func.__doc__)) + section.appendChild(para) + + para = paraElement('Allowed Roles:') + para.appendChild(blockquoteElement(", ".join(func.roles))) + section.appendChild(para) + + section.appendChild(paraElement('Parameters:')) + params = Element('itemizedlist') + if func.accepts: + for name, param, default in zip(max_args, func.accepts, defaults): + params.appendChild(paramElement(name, param)) + else: + listitem = Element('listitem') + listitem.appendChild(paraElement('None')) + params.appendChild(listitem) + section.appendChild(params) + + section.appendChild(paraElement('Returns:')) + returns = Element('itemizedlist') + returns.appendChild(paramElement(None, func.returns)) + section.appendChild(returns) + + print section.toprettyxml(encoding = "UTF-8") diff --git a/doc/DocBookLocal.py b/doc/DocBookLocal.py new file mode 100755 index 0000000..317330e --- /dev/null +++ b/doc/DocBookLocal.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +from PLC.API import PLCAPI +from PLC.Faults import PLCInvalidAPIMethod +from DocBook import DocBook +import sys + +api = PLCAPI(None) +methods = api.all_methods +methods.sort() +good_apis = [] +bad_apis = [] +for method in methods: + try: + good_api = api.callable(method) + good_apis.append(good_api) + except PLCInvalidAPIMethod, e: + bad_apis.append((method,e)) + +DocBook(good_apis).Process() + +if len(bad_apis): + sys.stderr.write("UNEXPECTED: There are %d non-callable methods:\n"%(len(bad_apis))) + for bad_api,e in bad_apis: + sys.stderr.write("\t%s:%s\n" % (bad_api,e)) + sys.exit(-1) diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000..508d3ac --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,59 @@ +# +# (Re)builds API documentation +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +all: PLCAPI.html + +# XML - as opposed to SGML - requires an identifier - see +# http://www.docbook.org/tdg/en/html/appb.html +# and, openjade opens http connections when using the official URL +# as an identifier; this is slow, and sometimes fails and breaks the build + +# locating locally installed docbook43 dtd - fedora-specific +remote-docbook-43 = http://www.oasis-open.org/docbook/xml/4.3/docbookx.dtd +local-docbook-43 = $(wildcard /usr/share/sgml/docbook/xml-dtd-4.3*/docbookx.dtd) +docbook-43=$(if $(local-docbook-43),$(local-docbook-43),$(remote-docbook-43)) + +PLCAPI.xml: PLCAPI.xml.in + $(if $(local-docbook-43), \ + echo Using locally installed DTD $(local-docbook-43), \ + echo WARNING - could not locate local DTD - using remote $(remote-docbook-43)) + sed -e "s,@DOCBOOK-43@,$(docbook-43)," $< > $@ + +.PLCAPI.xml.valid: Methods.xml + +API_SOURCES = ../PLC/__init__.py ../PLC/Methods/__init__.py + +Methods.xml: DocBook.py DocBookLocal.py $(API_SOURCES) + PYTHONPATH=.. ./DocBookLocal.py > $@ + +# +# Documentation +# + +# Validate the XML +.%.xml.valid: %.xml + xmllint --valid --output $@ $< + +# Remove the temporary output file after compilation +.SECONDARY: .%.xml.valid + +# Compile it into other formats +FORMATS := dvi html man ps pdf rtf tex texi txt + +DOCBOOK2FLAGS := -V biblio-number=1 + +define docbook2 +%.$(1): %.xml .%.xml.valid + docbook2$(1) --nochunks $$(DOCBOOK2FLAGS) $$< +endef + +$(foreach format,$(FORMATS),$(eval $(call docbook2,$(format)))) + +clean: + rm -f $(patsubst %,*.%,$(FORMATS)) .*.xml.valid Methods.xml + +.PHONY: clean all diff --git a/doc/PLCAPI.xml.in b/doc/PLCAPI.xml.in new file mode 100644 index 0000000..02ed262 --- /dev/null +++ b/doc/PLCAPI.xml.in @@ -0,0 +1,585 @@ + + + +]> + + + + PlanetLab Central API Documentation + + + + Introduction + + The PlanetLab Central API (PLCAPI) is the interface through + which the PlanetLab Central database should be accessed and + maintained. The API is used by the website, by nodes, by automated + scripts, and by users to access and update information about + users, nodes, sites, slices, and other entities maintained by the + database. + +
+ Authentication + + The API should be accessed via XML-RPC over HTTPS. The API + supports the standard introspection calls system.listMethods, system.methodSignature, + and system.methodHelp, + and the standard batching call system.multicall. With the + exception of these calls, all PLCAPI calls take an + authentication structure as their first argument. All + authentication structures require the specification of + AuthMethod. If the documentation for a + call does not further specify the authentication structure, then + any of (but only) the following authentication structures may be + used: + + + + Session authentication. User sessions are typically + valid for 24 hours. Node sessions are valid until the next + reboot. Obtain a session key with GetSession using another form of + authentication, such as password or GnuPG + authentication. + + + + AuthMethodsession + sessionSession key + + + + + + Password authentication. + + + + AuthMethodpassword + UsernameUsername, typically an e-mail address + AuthStringAuthentication string, typically a password + + + + + + GnuPG authentication. Users may upload a GPG public key + using AddPersonKey. Peer + GPG keys should be added with AddPeer or UpdatePeer. + + + + + AuthMethodgpg + namePeer or user name + signatureGnuPG signature of + the canonicalized + XML-RPC + representation of the rest of the arguments to the + call. + + + + + + Anonymous authentication. + + + + AuthMethodanonymous + + + + + +
+ +
+ Roles + + Some functions may only be called by users with certain + roles (see GetRoles), and others + may return different information to different callers depending + on the role(s) of the caller. + + The node and + anonymous roles are pseudo-roles. A function + that allows the node role may be called by + automated scripts running on a node, such as the Boot and Node + Managers. A function that allows the + anonymous role may be called by anyone; an + API authentication structure must still be specified (see ). +
+ +
+ Filters + + Most of the Get methods take a + filter argument. Filters may be arrays of integer (and sometimes + string) identifiers, or a struct representing a filter on the + attributes of the entities being queried. For example, + + +>>> GetNodes([1,2,3]) +>>> GetNodes({'node_id': [1,2,3]}) + + + + Would be equivalent queries. Attributes that are + themselves arrays (such as interface_ids + and slice_ids for nodes) cannot be used in + filters. + + Filters support a few extra features illustrated in the following examples. + +
+ Pattern Matching + * can be used in a text value and have the usual meaning, so all nodes in the fr can be obtained with: + GetNodes ( { 'hostname' : '*.fr' } ) + +
+ +
+ Negation + Fields starting with a ~ are negated, so non-local nodes can be fetched with: + GetNodes( { '~peer_id' : None } ) + +
+ +
+ Numeric comparisons + Strictly greater/smaller operations are achieved by prepending the field name like in: + GetEvents( { '>time' : 1178531418 } ) + + Greater/smaller or equal: + GetEvents( { ']event_id' : 2305 } ) + +
+ +
+ Filtering on a sequence field + A field starting with '&' or '|' should refer to a sequence type; + the semantics is then that the object's value (expected to be a list) + should contain all (&) or any (|) value specified in the corresponding + filter value. + GetPersons ( { '|role_ids' : [ 20, 40 ] } ) + GetPersons ( { '|roles' : ['tech', 'pi'] } ) + GetPersons ( { '&roles' : ['admin', 'tech'] } ) + GetPersons ( { '&roles' : 'tech' } ) + +
+ +
+ Sorting and Clipping + The following 3 special fields can be used to extract only a subset of the results for pagination: + GetNodes( { '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } + +
+
+ +
+ All criteria / Any criteria + The default in the vast majority of the code is to select + objects that match ALL the criteria specified in the struct. It + is possible to search for objects that match ANY of these by + adding the special '-OR' key (the value is then ignored), as in: + GetPersons ( { '-OR' : 'anything', 'site_id':2, '&roles':['admin'] } ) + +
+ +
+ Tags + + The PLC API comes with a feature called + tags, that basically aims at supporting an + extensible data model. A few classes (as of this writing, Nodes, + Interfaces, Sites, Persons and Slices) are eligible for being dynamically + extended beyond the basic set of fields that are built into the + database schema. + + Historically, this is a generalization of the concept of + SliceAttribute , and the more recent + concept of InterfaceSetting , that with + release 5.0 have been renamed into SliceTag + and InterfaceTag , + respectively. + +
+ Low level + The low level interface to tags relies on the following items: + + + A TagType object basically models a + new column that needs to be added to other objects. In much + the same way as nodes are named through a + hostname , tagtypes are named with a + tagname, plus additional information + (category, + description). + + + + description is mostly informative, it + is used by the web interface to provide more details on + the meaning of that tag. + + + + + category is used in a variety of ways, + in the web interface again. Over time this has become a + means to attach various information to a tag type, so it is + used as some sort of a poorman's tag tag system :). + + + + + + The convention is to set in category a set of slash-separated + fields, like the following real examples demonstrate. + +>>> tagnames=['arch','fcdistro','hrn','hmac','exempt_node_until'] +>>> for tt in GetTagTypes(tagnames,['tagname','category']): +>>> ... print "tagname=%-18s category=%s"%(tt['tagname'], tt['category']) +tagname=hrn category=node/sfa +tagname=hmac category=slice/auth +tagname=exempt_node_until category=node/myops +tagname=fcdistro category=node/slice/config/ui/header=f/rank=w +tagname=arch category=node/slice/config/ui/header=A/rank=x + + + + + roles may also be + attached to a given tag_type (use AddRoleToTagType or + DeleteRoleFromTagType). This is an evolution over the former + system based on so-called 'min_role_id', and now any set of + roles may be mentioned. More importantly, each type (Node, + Person, ...) implements its own policy to let or not non-admin + callers change their tags. For example in the current + implementation, non-admin users can only change their own + person tags. See PLC/AuthorizeHelpers.py for that code. + + + + The low-level method for managaing tags is then, once + the TagType is known to the system, to attach a value to, + say, a Node, by calling AddNodeTag , + and then as usual change this value with + UpdateNodeTag , or delete it with + DeleteNodeTag . + + + +
+ +
+ Accessors + A rather more convenient way to use tags is through + Accessors. This convenience is located in + PLC/Accessors , and allows you to easily define Get + or Set methods dedicated to a given tag. This is for instance + how the GetNodeArch and + SetNodeArch methods are implemented. These methods + greatly simplify tags manipulation as they take care of + + + Creating and enforcing TagTypes + ; each time you restart your plc, the tag_types + mentioned in accessor definitions are created and checked + (in terms of the category, description and roles defined in + the various calls to define_accessors). + + + Create or update the, say, NodeTag + object, as needed. + + In addition, an accessor definition mentions + get_roles (defaults to all_roles), and + set_roles . These values are used as + follows. get_roles is attached to the + Get accessor, so callers that do not have this role cannot run + the Get accessor. set_roles is attached + to the Set accessor, as well as to the corresponding TagType, + which in turn is used for checking write access to the tag + type. + + + + + Site-specific accessors can be + defined in + /usr/share/plc_api/PLC/Accessors/Accessors_site.py + and will be preserved across updates of the + plcapi rpm. + + + + The accessors mechanism does not currently support setting slice + tags that apply only on a given node or nodegroup. + +
+ +
+ Through regular Add/Get/Update methods + + Finally, tags may also get manipulated through the + AddNode, GetNodes, + and UpdateNode methods: + + + + The define_accessors function in the + Accessors factory has an optional argument named + expose_in_api . When this is set, the + corresponding tag becomes visible from the Add/Get/Update + methods almost as if it was a native tag. + + + + So for instance the following code would be legal and do as expected: + +# create a x86_64 node +>>> AddNode({'hostname':'pl1.foo.com','arch':'x86_64'}) +# get details for pl1.foo.com including tag 'arch' tag +>>> GetNodes(['pl1.foo.com'],['boot_state','node_type','arch']) +# set the 'deployment' tag +>>> UpdateNode('pl1.foo.com',{'deployment':'beta'}) +# get all alpha and beta nodes +>>> GetNodes({'deployment':'*a'},['hostname','deployment']) + + + + + The current limitations about tags, as opposed to native + fields, is that for performance, tags won't get returned + when using the implicit set of columns. So for instance: + +# get all details for 'pl1.foo.com' +>>> node=GetNodes(['pl1.foo.com'])[0] +# this did not return the 'arch' tag +>>> 'arch' in node +False + + + + + For a similar reason, any tag used in the filter argument will have to be mentioned in the list of returned columns as well. For example: + +# if 'hrn' is not part of the result, this does not work +>>> ns=GetNodes({'hrn':'ple.*'},['hostname']) +Database error b59e068c-589a-4ad5-9dd8-63cc38f2a2eb: +column "hrn" does not exist +LINE 1: ...M view_nodes WHERE deleted IS False AND (True AND hrn ILIKE ... +... abridged ... +# this can be worked around by just returning 'hrn' as well +>>> ns=GetNodes({'hrn':'ple.*'},['hrn','hostname']) + + + + + +
+
+ +
+ Nodegroups + + In earlier versions up to v4.2, NodeGroups + used to be defined extensively. So you would, + basically, create an empty nodegroup instance, and then use + AddNodeToNodeGroup or + DeleteNodeFromNodeGroup to manage the nodegroup's + contents. + + The new model has been redefined as follows. You now define + a nodegroup as the set of nodes for which a given Tag + has a given value, which are defined once and for good + when creating the NodeGroup object. + + So for instance for managing the set of nodes that are + running various levels of software code, PLC has defined two + NodeGroups named alpha + and beta . With the new model, we would now do + something like the following, using the built-in + deployment tag that is created for that purpose: + +### creating node groups +>>> AddNodeGroup('alphanodes','deployment','alpha') +21 +>>> AddNodeGroup('betanodes','deployment','beta') +22 +### checking contents (no node has 'deployment' set to either 'alpha' or 'beta' yet) +>>> for ng in GetNodeGroups(['alphanodes','betanodes'],['groupname','node_ids']): print ng +{'groupname': u'alphanodes', 'node_ids': []} +{'groupname': u'betanodes', 'node_ids': []} + +### displaying node ids +>>> for n in GetNodes({'hostname':'*.inria.fr'},['hostname','node_id']): print n +{'hostname': u'vnode01.inria.fr', 'node_id': 1} +{'hostname': u'vnode02.inria.fr', 'node_id': 2} + +### setting 'deployment' for these two nodes +>>> SetNodeDeployment('vnode01.inria.fr','alpha') +>>> for ng in GetNodeGroups(['alphanodes','betanodes'],['groupname','node_ids']): print ng +{'groupname': u'alphanodes', 'node_ids': [1]} +{'groupname': u'betanodes', 'node_ids': []} +>>> SetNodeDeployment('vnode02.inria.fr','beta') + +### checking contents again +>>> for ng in GetNodeGroups(['alphanodes','betanodes'],['groupname','node_ids']): print ng +{'groupname': u'alphanodes', 'node_ids': [1]} +{'groupname': u'betanodes', 'node_ids': [2]} + + + +
+ +
+ PlanetLab shell + + A command-line program called plcsh + simplifies authentication structure handling, and is useful for + scripting. This program is distributed as a Linux RPM called + PLCAPI and requires Python ≥2.4. + + +usage: plcsh [options] + +options: + -f CONFIG, --config=CONFIG + PLC configuration file + -h URL, --url=URL API URL + -c CACERT, --cacert=CACERT + API SSL certificate + -k INSECURE, --insecure=INSECURE + Do not check SSL certificate + -m METHOD, --method=METHOD + API authentication method + -s SESSION, --session=SESSION + API session key + -u USER, --user=USER API user name + -p PASSWORD, --password=PASSWORD + API password + -r ROLE, --role=ROLE API role + -x, --xmlrpc Use XML-RPC interface + --help show this help message and exit + + + Specify at least the API URL and your user name: + + +plcsh --url https://www.planet-lab.org/PLCAPI/ -u user@site.edu + + + You will be presented with a prompt. From here, you can + invoke API calls and omit the authentication structure, as it will + be filled in automatically. + + +user@site.edu connected using password authentication +Type "system.listMethods()" or "help(method)" for more information. +[user@site.edu]>>> AuthCheck() +1 +[user@site.edu]>>> GetNodes([121], ['node_id', 'hostname']) +[{'node_id': 121, 'hostname': 'planetlab-1.cs.princeton.edu'}] + + + As this program is actually a Python interpreter, you may + create variables, execute for loops, import other packages, etc., + directly on the command line as you would using the regular Python + shell. + + To use plcsh programmatically, import + the PLC.Shell module: + + +#!/usr/bin/python + +import sys + +# Default location that the PLCAPI RPM installs the PLC class +sys.path.append('/usr/share/plc_api') + +# Initialize shell environment. Shell() will define all PLCAPI methods +# in the specified namespace (specifying globals() will define them +# globally). +from PLC.Shell import Shell +plc = Shell(globals(), + url = "https://www.planet-lab.org/PLCAPI/", + user = "user@site.edu", + password = "password") + +# Both are equivalent +nodes = GetNodes([121], ['node_id', 'hostname']) +nodes = plc.GetNodes([121], ['node_id', 'hostname']) + +
+ +
+ Using regular python + + It is also possible to write simple regular-python scripts, + as illustrated in the example below. The only difference with the + examples above is that all API calls need to be passed a first + argument for authentication. This example would write in a file + the name of all the hosts attached to a given slice. + + +#!/usr/bin/env python + +import xmlrpclib + +plc_host='www.planet-lab.eu' + +slice_name='inria_heartbeat' + +auth = { 'AuthMethod' : 'password', + 'Username' : 'thierry.parmentelat@inria.fr', + 'AuthString' : 'xxxxxx', +} + +api_url="https://%s:443/PLCAPI/"%plc_host + +plc_api = xmlrpclib.ServerProxy(api_url,allow_none=True) + +# the slice's node ids +node_ids = plc_api.GetSlices(auth,slice_name,['node_ids'])[0]['node_ids'] + +# get hostname for these nodes +slice_nodes = plc_api.GetNodes(auth,node_ids,['hostname']) + +# store in a file +with ('mynodes.txt','a') as f: + for node in slice_nodes: + f.write(node['hostname'] + "\n") +f.close() + +
+ +
+ + + PlanetLab API Methods + + + &Methods; + + +
+ + + + diff --git a/extensions/README.txt b/extensions/README.txt new file mode 100644 index 0000000..f2ec3ac --- /dev/null +++ b/extensions/README.txt @@ -0,0 +1,14 @@ +Create a database extension by creating a tree like follows: + +- /usr/share/plc_api/extensions/-up* +Contains the SQL or script that sets up the extension's database needs. +This needs to execute +INSERT INTO plc_db_extensions VALUES ('', ); + +- /usr/share/plc_api/extensions/-down* +Contains the SQL or script that removes the extension from the database. + +- /usr/share/plc_api/extensions//migrations/[0-9][0-9][0-9]-{up,down}-* +Migration scripts for the extension. One of the scripts for each version +has to execute +UPDATE plc_db_extensions SET version = WHERE name = '' diff --git a/migrations/100-up-major-to-5.sql b/migrations/100-up-major-to-5.sql new file mode 100644 index 0000000..6c24f25 --- /dev/null +++ b/migrations/100-up-major-to-5.sql @@ -0,0 +1,9 @@ +-- myplc v5.0 starts with (5,100) +-- the expected former values would be (4,11) +-- +-- if you somehow start from a 4.3 not entirely up-dated to rc17, +-- then manually run +-- http://git.onelab.eu/?p=plcapi.git;a=blob;f=migrations/011-up-site-and-person-tags.sql;hb=refs/heads/4.3 +-- +UPDATE plc_db_version SET version = 5; +UPDATE plc_db_version SET subversion = 100; diff --git a/migrations/101-down-leases.sql b/migrations/101-down-leases.sql new file mode 100644 index 0000000..f72bbb0 --- /dev/null +++ b/migrations/101-down-leases.sql @@ -0,0 +1,15 @@ +-- revert cleanup on node_types +INSERT INTO node_types VALUES ('dummynet'); + +UPDATE nodes SET node_type='regular' WHERE node_type='reservable'; +DELETE FROM node_types WHERE node_type='reservable'; + +-- drop new tables +DROP VIEW view_leases; +DROP VIEW view_all_leases; +DROP TABLE leases; + +DROP FUNCTION IF EXISTS overlapping_trigger(); + +-------------------------------------------------- +UPDATE plc_db_version SET subversion = 100; diff --git a/migrations/101-up-leases.sql b/migrations/101-up-leases.sql new file mode 100644 index 0000000..0c9b538 --- /dev/null +++ b/migrations/101-up-leases.sql @@ -0,0 +1,80 @@ +-- we're using the 'lease' nodetype to model reservable nodes +INSERT INTO node_types VALUES ('reservable'); +-- also the dummynet node_type is obsolete +DELETE FROM node_types WHERE node_type='dummynet'; + +SET TIMEZONE TO 'UTC'; + +CREATE TABLE leases ( + lease_id serial PRIMARY KEY, -- id + t_from timestamp with time zone NOT NULL, -- from + t_until timestamp with time zone NOT NULL, -- until + node_id integer REFERENCES nodes NOT NULL, -- subject node + slice_id integer REFERENCES slices, -- slice owning the node +-- xxx for testing +-- CONSTRAINT future CHECK (t_from > CURRENT_TIMESTAMP), + CONSTRAINT start_before_end CHECK (t_until > t_from) +) WITH OIDS; + +-- +-- hook to check for overlapping time slots on a given node_id +-- xxx might use the builtin OVERLAPS feature +-- http://www.postgresql.org/docs/8.3/interactive/functions-datetime.html +-- +CREATE language plpgsql; +CREATE FUNCTION overlapping_trigger() RETURNS trigger AS $overlapping_trigger$ +BEGIN + PERFORM lease_id FROM leases WHERE + -- consider only leases on the same node + NEW.node_id = node_id + -- consider only non expired leases + AND t_until > CURRENT_TIMESTAMP + -- useful for updates + AND NEW.lease_id <> lease_id + -- new start date is in range + AND ( (NEW.t_from >= t_from AND NEW.t_from < t_until) + -- new end date is in range + OR (NEW.t_until > t_from AND NEW.t_until <= t_until) + -- complete overlap: new from before from, new until after until + OR (NEW.t_from <= t_from AND NEW.t_until >= t_until)); + IF FOUND THEN + RAISE EXCEPTION 'overlapping error: node % - slice %, % -> %', NEW.node_id, NEW.slice_id, NEW.t_from, NEW.t_until; + END IF; + RETURN NEW; +END; +$overlapping_trigger$ LANGUAGE plpgsql; + +CREATE + TRIGGER overlapping_trigger BEFORE INSERT OR UPDATE + ON leases FOR EACH ROW EXECUTE PROCEDURE overlapping_trigger(); + + +-- this is to let the API a chance to check for leases attached +-- to a node that is not 'reservable' +CREATE OR REPLACE VIEW view_all_leases AS +SELECT +leases.lease_id, +CAST(date_part('epoch', leases.t_from) AS bigint) AS t_from, +CAST(date_part('epoch', leases.t_until) AS bigint) AS t_until, +-- dbg +leases.t_from as s_from, +leases.t_until as s_until, +leases.node_id, +leases.slice_id, +nodes.hostname, +nodes.node_type, +slices.name, +slices.site_id, +CAST( date_part ('epoch',leases.t_until-leases.t_from) AS bigint) AS duration, +leases.t_until < CURRENT_TIMESTAMP as expired +FROM slices INNER JOIN leases USING (slice_id) +JOIN nodes USING (node_id); + +-- only the relevant leases +CREATE OR REPLACE VIEW view_leases AS +SELECT * FROM view_all_leases +WHERE node_type = 'reservable'; + + +-------------------------------------------------- +UPDATE plc_db_version SET subversion = 101; diff --git a/migrations/102-down-isvalid.sql b/migrations/102-down-isvalid.sql new file mode 100644 index 0000000..e147fd1 --- /dev/null +++ b/migrations/102-down-isvalid.sql @@ -0,0 +1,88 @@ +ALTER TABLE nodes DROP COLUMN last_download; +ALTER TABLE nodes DROP COLUMN last_pcu_reboot; +ALTER TABLE nodes DROP COLUMN last_pcu_confirmation; + +ALTER TABLE pcus DROP COLUMN last_updated timestamp without time zone; + +ALTER TABLE interfaces DROP COLUMN last_updated timestamp without time zone; + +DROP VIEW view_nodes; +CREATE OR REPLACE VIEW view_nodes AS +SELECT +nodes.node_id, +nodes.node_type, +nodes.hostname, +nodes.site_id, +nodes.boot_state, +nodes.run_level, +nodes.deleted, +nodes.model, +nodes.boot_nonce, +nodes.version, +nodes.verified, +nodes.ssh_rsa_key, +nodes.key, +CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, +CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, +peer_node.peer_id, +peer_node.peer_node_id, +COALESCE((SELECT interface_ids FROM node_interfaces + WHERE node_interfaces.node_id = nodes.node_id), '{}') +AS interface_ids, +COALESCE((SELECT nodegroup_ids FROM node_nodegroups + WHERE node_nodegroups.node_id = nodes.node_id), '{}') +AS nodegroup_ids, +COALESCE((SELECT slice_ids FROM node_slices + WHERE node_slices.node_id = nodes.node_id), '{}') +AS slice_ids, +COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist + WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') +AS slice_ids_whitelist, +COALESCE((SELECT pcu_ids FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS pcu_ids, +COALESCE((SELECT ports FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS ports, +COALESCE((SELECT conf_file_ids FROM node_conf_files + WHERE node_conf_files.node_id = nodes.node_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_tag_ids FROM node_tags + WHERE node_tags.node_id = nodes.node_id), '{}') +AS node_tag_ids, +node_session.session_id AS session +FROM nodes +LEFT JOIN peer_node USING (node_id) +LEFT JOIN node_session USING (node_id); + +DROP VIEW view_pcus; +CREATE OR REPLACE VIEW view_pcus AS +SELECT +pcus.*, +COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, +COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports +FROM pcus; + + +DROP VIEW view_interfaces; +CREATE OR REPLACE VIEW view_interfaces AS +SELECT +interfaces.interface_id, +interfaces.node_id, +interfaces.is_primary, +interfaces.type, +interfaces.method, +interfaces.ip, +interfaces.mac, +interfaces.gateway, +interfaces.network, +interfaces.broadcast, +interfaces.netmask, +interfaces.dns1, +interfaces.dns2, +interfaces.bwlimit, +interfaces.hostname, +COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids +FROM interfaces; + diff --git a/migrations/102-up-isvalid.sql b/migrations/102-up-isvalid.sql new file mode 100644 index 0000000..c1bd5c2 --- /dev/null +++ b/migrations/102-up-isvalid.sql @@ -0,0 +1,106 @@ +ALTER TABLE nodes ADD COLUMN last_boot timestamp without time zone; +ALTER TABLE nodes ADD COLUMN last_download timestamp without time zone; +ALTER TABLE nodes ADD COLUMN last_pcu_reboot timestamp without time zone; +ALTER TABLE nodes ADD COLUMN last_pcu_confirmation timestamp without time zone; + +ALTER TABLE pcus ADD COLUMN last_updated timestamp without time zone; + +ALTER TABLE interfaces ADD COLUMN last_updated timestamp without time zone; + +DROP VIEW view_nodes; +CREATE OR REPLACE VIEW view_nodes AS +SELECT +nodes.node_id, +nodes.node_type, +nodes.hostname, +nodes.site_id, +nodes.boot_state, +nodes.run_level, +nodes.deleted, +nodes.model, +nodes.boot_nonce, +nodes.version, +nodes.verified, +nodes.ssh_rsa_key, +nodes.key, +CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, +CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, +CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, +CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, +CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, +CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, +peer_node.peer_id, +peer_node.peer_node_id, +COALESCE((SELECT interface_ids FROM node_interfaces + WHERE node_interfaces.node_id = nodes.node_id), '{}') +AS interface_ids, +COALESCE((SELECT nodegroup_ids FROM node_nodegroups + WHERE node_nodegroups.node_id = nodes.node_id), '{}') +AS nodegroup_ids, +COALESCE((SELECT slice_ids FROM node_slices + WHERE node_slices.node_id = nodes.node_id), '{}') +AS slice_ids, +COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist + WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') +AS slice_ids_whitelist, +COALESCE((SELECT pcu_ids FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS pcu_ids, +COALESCE((SELECT ports FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS ports, +COALESCE((SELECT conf_file_ids FROM node_conf_files + WHERE node_conf_files.node_id = nodes.node_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_tag_ids FROM node_tags + WHERE node_tags.node_id = nodes.node_id), '{}') +AS node_tag_ids, +node_session.session_id AS session +FROM nodes +LEFT JOIN peer_node USING (node_id) +LEFT JOIN node_session USING (node_id); + +-------------------------------------------------------------------------------- +DROP VIEW view_pcus; +CREATE OR REPLACE VIEW view_pcus AS +SELECT +pcus.pcu_id, +pcus.site_id, +pcus.hostname, +pcus.ip, +pcus.protocol, +pcus.username, +pcus.password, +pcus.model, +pcus.notes, +CAST(date_part('epoch', pcus.last_updated) AS bigint) AS last_updated, +COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, +COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports +FROM pcus; + + +DROP VIEW view_interfaces; +CREATE OR REPLACE VIEW view_interfaces AS +SELECT +interfaces.interface_id, +interfaces.node_id, +interfaces.is_primary, +interfaces.type, +interfaces.method, +interfaces.ip, +interfaces.mac, +interfaces.gateway, +interfaces.network, +interfaces.broadcast, +interfaces.netmask, +interfaces.dns1, +interfaces.dns2, +interfaces.bwlimit, +interfaces.hostname, +CAST(date_part('epoch', interfaces.last_updated) AS bigint) AS last_updated, +COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids +FROM interfaces; + + +UPDATE plc_db_version SET subversion = 102; diff --git a/migrations/103-down-extensions.sql b/migrations/103-down-extensions.sql new file mode 100644 index 0000000..a0797df --- /dev/null +++ b/migrations/103-down-extensions.sql @@ -0,0 +1 @@ +DROP TABLE plc_db_extensions; diff --git a/migrations/103-up-extensions.sql b/migrations/103-up-extensions.sql new file mode 100644 index 0000000..c411049 --- /dev/null +++ b/migrations/103-up-extensions.sql @@ -0,0 +1,6 @@ +CREATE TABLE plc_db_extensions ( + name text NOT NULL PRIMARY KEY, + version integer NOT NULL +) WITH OIDS; + +UPDATE plc_db_version SET subversion = 103; diff --git a/migrations/104-down-noderole.sql b/migrations/104-down-noderole.sql new file mode 100644 index 0000000..b8b4adb --- /dev/null +++ b/migrations/104-down-noderole.sql @@ -0,0 +1,28 @@ +-- recreate the min_role_id column +ALTER TABLE tag_types ADD COLUMN min_role_id integer REFERENCES roles; + +-- compute the highest role available for each tag_type and store it as min_role_id +CREATE OR REPLACE VIEW tag_type_max_role_id AS +SELECT tag_type_id, max(role_id) from tag_type_role GROUP BY tag_type_id; + +-- tag_types that have at least one role in the new model get the max +UPDATE tag_types +SET min_role_id = tag_type_max_role_id.max +FROM tag_type_max_role_id WHERE tag_type_max_role_id.tag_type_id = tag_types.tag_type_id; + +-- the ones with no roles end up with min_role_id=10 +UPDATE tag_types +SET min_role_id=10 +WHERE min_role_id IS NULL; + +DELETE VIEW tag_type_max_role_id; + +DROP TABLE tag_type_role CASCADE; +-- done by cascade +--DROP VIEW view_tag_types; +--DROP VIEW tag_type_roles; + +DELETE from roles WHERE name='node'; + +-------------------- +UPDATE plc_db_version SET subversion = 103; diff --git a/migrations/104-up-noderole.sql b/migrations/104-up-noderole.sql new file mode 100644 index 0000000..95c0e93 --- /dev/null +++ b/migrations/104-up-noderole.sql @@ -0,0 +1,124 @@ +-- changing the permission model on tags +-- we replace the single 'min_role_id' field attached to tag_types +-- with a set of roles + + +-- create a separate table to keep the tag-type x role relationship +CREATE TABLE tag_type_role ( + tag_type_id integer REFERENCES tag_types NOT NULL, -- tag_type ID + role_id integer REFERENCES roles NOT NULL, -- role ID + PRIMARY KEY (tag_type_id, role_id) +); +CREATE INDEX tag_type_role_tag_type_id_idx ON tag_type_role (tag_type_id); +CREATE INDEX tag_type_role_role_id_idx ON tag_type_role (role_id); + +-- fill this from the former min_role_id field in the tag_types table +-- add all roles lower or equal to the min_role_id +INSERT INTO tag_type_role ("tag_type_id","role_id") SELECT tag_type_id,role_id FROM tag_types,roles where role_id<=min_role_id; + +-- we can now drop the min_role_id column +ALTER TABLE tag_types DROP COLUMN min_role_id CASCADE; + +-- create views to expose roles +CREATE OR REPLACE VIEW tag_type_roles AS +SELECT tag_type_id, +array_accum(role_id) AS role_ids, +array_accum(roles.name) AS roles +FROM tag_type_role +LEFT JOIN roles USING (role_id) +GROUP BY tag_type_id; + +CREATE OR REPLACE VIEW view_tag_types AS +SELECT +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +COALESCE((SELECT role_ids FROM tag_type_roles WHERE tag_type_roles.tag_type_id = tag_types.tag_type_id), '{}') AS role_ids, +COALESCE((SELECT roles FROM tag_type_roles WHERE tag_type_roles.tag_type_id = tag_types.tag_type_id), '{}') AS roles +FROM tag_types; + + +-- remove min_role_id from the object views +CREATE OR REPLACE VIEW view_person_tags AS +SELECT +person_tag.person_tag_id, +person_tag.person_id, +persons.email, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +person_tag.value +FROM person_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN persons USING (person_id); + +CREATE OR REPLACE VIEW view_site_tags AS +SELECT +site_tag.site_tag_id, +site_tag.site_id, +sites.login_base, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +site_tag.value +FROM site_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN sites USING (site_id); + +CREATE OR REPLACE VIEW view_interface_tags AS +SELECT +interface_tag.interface_tag_id, +interface_tag.interface_id, +interfaces.ip, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +interface_tag.value +FROM interface_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN interfaces USING (interface_id); + +CREATE OR REPLACE VIEW view_node_tags AS +SELECT +node_tag.node_tag_id, +node_tag.node_id, +nodes.hostname, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +node_tag.value +FROM node_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN nodes USING (node_id); + +CREATE OR REPLACE VIEW view_slice_tags AS +SELECT +slice_tag.slice_tag_id, +slice_tag.slice_id, +slice_tag.node_id, +slice_tag.nodegroup_id, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +slice_tag.value, +slices.name +FROM slice_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN slices USING (slice_id); + +-- same for ilinks +CREATE OR REPLACE VIEW view_ilinks AS +SELECT * FROM tag_types +INNER JOIN ilink USING (tag_type_id); + +-- use this to allow nodes to set slice tags +INSERT INTO roles (role_id, name) VALUES (50, 'node'); + +-------------------- +UPDATE plc_db_version SET subversion = 104; diff --git a/migrations/105-down-timespent.sql b/migrations/105-down-timespent.sql new file mode 100644 index 0000000..d2d1f2c --- /dev/null +++ b/migrations/105-down-timespent.sql @@ -0,0 +1,60 @@ +ALTER TABLE nodes DROP COLUMN last_time_spent_online CASCADE; +ALTER TABLE nodes DROP COLUMN last_time_spent_offline CASCADE; + +DROP VIEW view_nodes; +CREATE OR REPLACE VIEW view_nodes AS +SELECT +nodes.node_id, +nodes.node_type, +nodes.hostname, +nodes.site_id, +nodes.boot_state, +nodes.run_level, +nodes.deleted, +nodes.model, +nodes.boot_nonce, +nodes.version, +nodes.verified, +nodes.ssh_rsa_key, +nodes.key, +CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, +CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, +CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, +CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, +CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, +CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, +peer_node.peer_id, +peer_node.peer_node_id, +COALESCE((SELECT interface_ids FROM node_interfaces + WHERE node_interfaces.node_id = nodes.node_id), '{}') +AS interface_ids, +COALESCE((SELECT nodegroup_ids FROM node_nodegroups + WHERE node_nodegroups.node_id = nodes.node_id), '{}') +AS nodegroup_ids, +COALESCE((SELECT slice_ids FROM node_slices + WHERE node_slices.node_id = nodes.node_id), '{}') +AS slice_ids, +COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist + WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') +AS slice_ids_whitelist, +COALESCE((SELECT pcu_ids FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS pcu_ids, +COALESCE((SELECT ports FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS ports, +COALESCE((SELECT conf_file_ids FROM node_conf_files + WHERE node_conf_files.node_id = nodes.node_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_tag_ids FROM node_tags + WHERE node_tags.node_id = nodes.node_id), '{}') +AS node_tag_ids, +node_session.session_id AS session +FROM nodes +LEFT JOIN peer_node USING (node_id) +LEFT JOIN node_session USING (node_id); + +-------------------------------------------------------------------------------- + +UPDATE plc_db_version SET subversion = 104; diff --git a/migrations/105-up-timespent.sql b/migrations/105-up-timespent.sql new file mode 100644 index 0000000..2dbdfb9 --- /dev/null +++ b/migrations/105-up-timespent.sql @@ -0,0 +1,62 @@ +ALTER TABLE nodes ADD COLUMN last_time_spent_online integer; +ALTER TABLE nodes ADD COLUMN last_time_spent_offline integer; + +DROP VIEW view_nodes; +CREATE OR REPLACE VIEW view_nodes AS +SELECT +nodes.node_id, +nodes.node_type, +nodes.hostname, +nodes.site_id, +nodes.boot_state, +nodes.run_level, +nodes.deleted, +nodes.model, +nodes.boot_nonce, +nodes.version, +nodes.verified, +nodes.ssh_rsa_key, +nodes.key, +CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, +CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, +CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, +CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, +CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, +CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, +nodes.last_time_spent_online, +nodes.last_time_spent_offline, +peer_node.peer_id, +peer_node.peer_node_id, +COALESCE((SELECT interface_ids FROM node_interfaces + WHERE node_interfaces.node_id = nodes.node_id), '{}') +AS interface_ids, +COALESCE((SELECT nodegroup_ids FROM node_nodegroups + WHERE node_nodegroups.node_id = nodes.node_id), '{}') +AS nodegroup_ids, +COALESCE((SELECT slice_ids FROM node_slices + WHERE node_slices.node_id = nodes.node_id), '{}') +AS slice_ids, +COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist + WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') +AS slice_ids_whitelist, +COALESCE((SELECT pcu_ids FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS pcu_ids, +COALESCE((SELECT ports FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS ports, +COALESCE((SELECT conf_file_ids FROM node_conf_files + WHERE node_conf_files.node_id = nodes.node_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_tag_ids FROM node_tags + WHERE node_tags.node_id = nodes.node_id), '{}') +AS node_tag_ids, +node_session.session_id AS session +FROM nodes +LEFT JOIN peer_node USING (node_id) +LEFT JOIN node_session USING (node_id); + +-------------------------------------------------------------------------------- + +UPDATE plc_db_version SET subversion = 105; diff --git a/migrations/README.txt b/migrations/README.txt new file mode 100644 index 0000000..0d14ca7 --- /dev/null +++ b/migrations/README.txt @@ -0,0 +1,13 @@ +Store here migration scripts, named +-up-.sql + handled as a sql script to be run against planetlab5, or +-up-.sh + which is assumed to be a shell script and is run as is + +Another assumption is that + * nnn-up- script will set subversion number to + * nnn-down script will set subversion number to -1 + +=== +See the migration script in plc.d/db for how this is used +=== diff --git a/migrations/extract-views.py b/migrations/extract-views.py new file mode 100755 index 0000000..4fd5090 --- /dev/null +++ b/migrations/extract-views.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +import sys +import re + +class Schema: + + def __init__ (self,input,output=None): + self.input=input + self.output=output + + # left part is non-greedy + comment=re.compile("(.*?)--.*") + spaces=re.compile("^\s+(\S.*)") + view=re.compile("(?i)\s*create\s+(or\s+replace)?\s+view.*") + + def parse (self): + if self.output: + outfile = open(self.output, "a") + else: + outfile = sys.stdout + contents = file(self.input).read() + parts=contents.split(";") + for part in parts: + # normalize: remove comments, linebreaks, trailing spaces.. + normalized='' + lines=part.split('\n'); + out_lines=[] + for line in lines: + # remove comment + match=Schema.comment.match(line) + if match: + line=match.group(1) + out_lines.append(line) + # get them together + out_line = " ".join(out_lines) + # remove trailing spaces + match=Schema.spaces.match(out_line) + if match: + out_line=match.group(1) + match=Schema.view.match(out_line) + if match: + outfile.write("{};\n".format(out_line)) + if outfile != sys.stdout: + outfile.close() + +if __name__ == '__main__': + if len(sys.argv) not in [2,3]: + print 'Usage:',sys.argv[0],'input [output]' + sys.exit(1) + input=sys.argv[1] + try: + output=sys.argv[2] + except: + output=None + Schema(input,output).parse() + diff --git a/php/phpxmlrpc/.gitignore b/php/phpxmlrpc/.gitignore new file mode 100644 index 0000000..1305331 --- /dev/null +++ b/php/phpxmlrpc/.gitignore @@ -0,0 +1,6 @@ +/.idea +composer.phar +composer.lock +/vendor/* +/tests/coverage/* +/build/* diff --git a/.travis.yml b/php/phpxmlrpc/.travis.yml similarity index 100% rename from .travis.yml rename to php/phpxmlrpc/.travis.yml diff --git a/ChangeLog b/php/phpxmlrpc/ChangeLog similarity index 100% rename from ChangeLog rename to php/phpxmlrpc/ChangeLog diff --git a/INSTALL.md b/php/phpxmlrpc/INSTALL.md similarity index 100% rename from INSTALL.md rename to php/phpxmlrpc/INSTALL.md diff --git a/NEWS b/php/phpxmlrpc/NEWS similarity index 100% rename from NEWS rename to php/phpxmlrpc/NEWS diff --git a/README.md b/php/phpxmlrpc/README.md similarity index 100% rename from README.md rename to php/phpxmlrpc/README.md diff --git a/composer.json b/php/phpxmlrpc/composer.json similarity index 100% rename from composer.json rename to php/phpxmlrpc/composer.json diff --git a/debugger/action.php b/php/phpxmlrpc/debugger/action.php similarity index 100% rename from debugger/action.php rename to php/phpxmlrpc/debugger/action.php diff --git a/debugger/common.php b/php/phpxmlrpc/debugger/common.php similarity index 100% rename from debugger/common.php rename to php/phpxmlrpc/debugger/common.php diff --git a/debugger/controller.php b/php/phpxmlrpc/debugger/controller.php similarity index 100% rename from debugger/controller.php rename to php/phpxmlrpc/debugger/controller.php diff --git a/debugger/index.php b/php/phpxmlrpc/debugger/index.php similarity index 100% rename from debugger/index.php rename to php/phpxmlrpc/debugger/index.php diff --git a/demo/client/agesort.php b/php/phpxmlrpc/demo/client/agesort.php similarity index 100% rename from demo/client/agesort.php rename to php/phpxmlrpc/demo/client/agesort.php diff --git a/demo/client/getstatename.php b/php/phpxmlrpc/demo/client/getstatename.php similarity index 100% rename from demo/client/getstatename.php rename to php/phpxmlrpc/demo/client/getstatename.php diff --git a/demo/client/introspect.php b/php/phpxmlrpc/demo/client/introspect.php similarity index 100% rename from demo/client/introspect.php rename to php/phpxmlrpc/demo/client/introspect.php diff --git a/demo/client/mail.php b/php/phpxmlrpc/demo/client/mail.php similarity index 100% rename from demo/client/mail.php rename to php/phpxmlrpc/demo/client/mail.php diff --git a/demo/client/proxy.php b/php/phpxmlrpc/demo/client/proxy.php similarity index 100% rename from demo/client/proxy.php rename to php/phpxmlrpc/demo/client/proxy.php diff --git a/demo/client/which.php b/php/phpxmlrpc/demo/client/which.php similarity index 100% rename from demo/client/which.php rename to php/phpxmlrpc/demo/client/which.php diff --git a/demo/client/wrap.php b/php/phpxmlrpc/demo/client/wrap.php similarity index 100% rename from demo/client/wrap.php rename to php/phpxmlrpc/demo/client/wrap.php diff --git a/demo/demo1.xml b/php/phpxmlrpc/demo/demo1.xml similarity index 100% rename from demo/demo1.xml rename to php/phpxmlrpc/demo/demo1.xml diff --git a/demo/demo2.xml b/php/phpxmlrpc/demo/demo2.xml similarity index 100% rename from demo/demo2.xml rename to php/phpxmlrpc/demo/demo2.xml diff --git a/demo/demo3.xml b/php/phpxmlrpc/demo/demo3.xml similarity index 100% rename from demo/demo3.xml rename to php/phpxmlrpc/demo/demo3.xml diff --git a/demo/server/discuss.php b/php/phpxmlrpc/demo/server/discuss.php similarity index 100% rename from demo/server/discuss.php rename to php/phpxmlrpc/demo/server/discuss.php diff --git a/demo/server/proxy.php b/php/phpxmlrpc/demo/server/proxy.php similarity index 100% rename from demo/server/proxy.php rename to php/phpxmlrpc/demo/server/proxy.php diff --git a/demo/server/server.php b/php/phpxmlrpc/demo/server/server.php similarity index 100% rename from demo/server/server.php rename to php/phpxmlrpc/demo/server/server.php diff --git a/demo/vardemo.php b/php/phpxmlrpc/demo/vardemo.php similarity index 100% rename from demo/vardemo.php rename to php/phpxmlrpc/demo/vardemo.php diff --git a/doc/api_changes_v4.md b/php/phpxmlrpc/doc/api_changes_v4.md similarity index 100% rename from doc/api_changes_v4.md rename to php/phpxmlrpc/doc/api_changes_v4.md diff --git a/doc/build/custom.fo.xsl b/php/phpxmlrpc/doc/build/custom.fo.xsl similarity index 100% rename from doc/build/custom.fo.xsl rename to php/phpxmlrpc/doc/build/custom.fo.xsl diff --git a/doc/build/custom.xsl b/php/phpxmlrpc/doc/build/custom.xsl similarity index 100% rename from doc/build/custom.xsl rename to php/phpxmlrpc/doc/build/custom.xsl diff --git a/doc/manual/images/debugger.gif b/php/phpxmlrpc/doc/manual/images/debugger.gif similarity index 100% rename from doc/manual/images/debugger.gif rename to php/phpxmlrpc/doc/manual/images/debugger.gif diff --git a/doc/manual/images/progxmlrpc.s.gif b/php/phpxmlrpc/doc/manual/images/progxmlrpc.s.gif similarity index 100% rename from doc/manual/images/progxmlrpc.s.gif rename to php/phpxmlrpc/doc/manual/images/progxmlrpc.s.gif diff --git a/doc/manual/phpxmlrpc_manual.adoc b/php/phpxmlrpc/doc/manual/phpxmlrpc_manual.adoc similarity index 100% rename from doc/manual/phpxmlrpc_manual.adoc rename to php/phpxmlrpc/doc/manual/phpxmlrpc_manual.adoc diff --git a/extras/rsakey.pem b/php/phpxmlrpc/extras/rsakey.pem similarity index 100% rename from extras/rsakey.pem rename to php/phpxmlrpc/extras/rsakey.pem diff --git a/extras/test.pl b/php/phpxmlrpc/extras/test.pl similarity index 100% rename from extras/test.pl rename to php/phpxmlrpc/extras/test.pl diff --git a/extras/test.py b/php/phpxmlrpc/extras/test.py similarity index 100% rename from extras/test.py rename to php/phpxmlrpc/extras/test.py diff --git a/extras/workspace.testPhpServer.fttb b/php/phpxmlrpc/extras/workspace.testPhpServer.fttb similarity index 100% rename from extras/workspace.testPhpServer.fttb rename to php/phpxmlrpc/extras/workspace.testPhpServer.fttb diff --git a/lib/xmlrpc.inc b/php/phpxmlrpc/lib/xmlrpc.inc similarity index 100% rename from lib/xmlrpc.inc rename to php/phpxmlrpc/lib/xmlrpc.inc diff --git a/lib/xmlrpc_wrappers.inc b/php/phpxmlrpc/lib/xmlrpc_wrappers.inc similarity index 100% rename from lib/xmlrpc_wrappers.inc rename to php/phpxmlrpc/lib/xmlrpc_wrappers.inc diff --git a/lib/xmlrpcs.inc b/php/phpxmlrpc/lib/xmlrpcs.inc similarity index 100% rename from lib/xmlrpcs.inc rename to php/phpxmlrpc/lib/xmlrpcs.inc diff --git a/license.txt b/php/phpxmlrpc/license.txt similarity index 100% rename from license.txt rename to php/phpxmlrpc/license.txt diff --git a/pakefile.php b/php/phpxmlrpc/pakefile.php similarity index 100% rename from pakefile.php rename to php/phpxmlrpc/pakefile.php diff --git a/src/Autoloader.php b/php/phpxmlrpc/src/Autoloader.php similarity index 100% rename from src/Autoloader.php rename to php/phpxmlrpc/src/Autoloader.php diff --git a/src/Client.php b/php/phpxmlrpc/src/Client.php similarity index 100% rename from src/Client.php rename to php/phpxmlrpc/src/Client.php diff --git a/src/Encoder.php b/php/phpxmlrpc/src/Encoder.php similarity index 100% rename from src/Encoder.php rename to php/phpxmlrpc/src/Encoder.php diff --git a/src/Helper/Charset.php b/php/phpxmlrpc/src/Helper/Charset.php similarity index 100% rename from src/Helper/Charset.php rename to php/phpxmlrpc/src/Helper/Charset.php diff --git a/src/Helper/Date.php b/php/phpxmlrpc/src/Helper/Date.php similarity index 100% rename from src/Helper/Date.php rename to php/phpxmlrpc/src/Helper/Date.php diff --git a/src/Helper/Http.php b/php/phpxmlrpc/src/Helper/Http.php similarity index 100% rename from src/Helper/Http.php rename to php/phpxmlrpc/src/Helper/Http.php diff --git a/src/Helper/Logger.php b/php/phpxmlrpc/src/Helper/Logger.php similarity index 100% rename from src/Helper/Logger.php rename to php/phpxmlrpc/src/Helper/Logger.php diff --git a/src/Helper/XMLParser.php b/php/phpxmlrpc/src/Helper/XMLParser.php similarity index 100% rename from src/Helper/XMLParser.php rename to php/phpxmlrpc/src/Helper/XMLParser.php diff --git a/src/PhpXmlRpc.php b/php/phpxmlrpc/src/PhpXmlRpc.php similarity index 100% rename from src/PhpXmlRpc.php rename to php/phpxmlrpc/src/PhpXmlRpc.php diff --git a/src/Request.php b/php/phpxmlrpc/src/Request.php similarity index 100% rename from src/Request.php rename to php/phpxmlrpc/src/Request.php diff --git a/src/Response.php b/php/phpxmlrpc/src/Response.php similarity index 100% rename from src/Response.php rename to php/phpxmlrpc/src/Response.php diff --git a/src/Server.php b/php/phpxmlrpc/src/Server.php similarity index 100% rename from src/Server.php rename to php/phpxmlrpc/src/Server.php diff --git a/src/Value.php b/php/phpxmlrpc/src/Value.php similarity index 100% rename from src/Value.php rename to php/phpxmlrpc/src/Value.php diff --git a/src/Wrapper.php b/php/phpxmlrpc/src/Wrapper.php similarity index 100% rename from src/Wrapper.php rename to php/phpxmlrpc/src/Wrapper.php diff --git a/tests/0CharsetTest.php b/php/phpxmlrpc/tests/0CharsetTest.php similarity index 100% rename from tests/0CharsetTest.php rename to php/phpxmlrpc/tests/0CharsetTest.php diff --git a/tests/1ParsingBugsTest.php b/php/phpxmlrpc/tests/1ParsingBugsTest.php similarity index 100% rename from tests/1ParsingBugsTest.php rename to php/phpxmlrpc/tests/1ParsingBugsTest.php diff --git a/tests/2InvalidHostTest.php b/php/phpxmlrpc/tests/2InvalidHostTest.php similarity index 100% rename from tests/2InvalidHostTest.php rename to php/phpxmlrpc/tests/2InvalidHostTest.php diff --git a/tests/3LocalhostTest.php b/php/phpxmlrpc/tests/3LocalhostTest.php similarity index 100% rename from tests/3LocalhostTest.php rename to php/phpxmlrpc/tests/3LocalhostTest.php diff --git a/tests/4LocalhostMultiTest.php b/php/phpxmlrpc/tests/4LocalhostMultiTest.php similarity index 100% rename from tests/4LocalhostMultiTest.php rename to php/phpxmlrpc/tests/4LocalhostMultiTest.php diff --git a/tests/5DemofilesTest.php b/php/phpxmlrpc/tests/5DemofilesTest.php similarity index 100% rename from tests/5DemofilesTest.php rename to php/phpxmlrpc/tests/5DemofilesTest.php diff --git a/tests/6DebuggerTest.php b/php/phpxmlrpc/tests/6DebuggerTest.php similarity index 100% rename from tests/6DebuggerTest.php rename to php/phpxmlrpc/tests/6DebuggerTest.php diff --git a/tests/7ExtraTest.php b/php/phpxmlrpc/tests/7ExtraTest.php similarity index 100% rename from tests/7ExtraTest.php rename to php/phpxmlrpc/tests/7ExtraTest.php diff --git a/tests/LocalFileTestCase.php b/php/phpxmlrpc/tests/LocalFileTestCase.php similarity index 100% rename from tests/LocalFileTestCase.php rename to php/phpxmlrpc/tests/LocalFileTestCase.php diff --git a/tests/benchmark.php b/php/phpxmlrpc/tests/benchmark.php similarity index 100% rename from tests/benchmark.php rename to php/phpxmlrpc/tests/benchmark.php diff --git a/tests/ci/travis/apache_vhost b/php/phpxmlrpc/tests/ci/travis/apache_vhost similarity index 100% rename from tests/ci/travis/apache_vhost rename to php/phpxmlrpc/tests/ci/travis/apache_vhost diff --git a/tests/ci/travis/apache_vhost_hhvm b/php/phpxmlrpc/tests/ci/travis/apache_vhost_hhvm similarity index 100% rename from tests/ci/travis/apache_vhost_hhvm rename to php/phpxmlrpc/tests/ci/travis/apache_vhost_hhvm diff --git a/tests/ci/travis/privoxy b/php/phpxmlrpc/tests/ci/travis/privoxy similarity index 100% rename from tests/ci/travis/privoxy rename to php/phpxmlrpc/tests/ci/travis/privoxy diff --git a/tests/ci/travis/setup_apache.sh b/php/phpxmlrpc/tests/ci/travis/setup_apache.sh similarity index 100% rename from tests/ci/travis/setup_apache.sh rename to php/phpxmlrpc/tests/ci/travis/setup_apache.sh diff --git a/tests/ci/travis/setup_apache_hhvm.sh b/php/phpxmlrpc/tests/ci/travis/setup_apache_hhvm.sh similarity index 100% rename from tests/ci/travis/setup_apache_hhvm.sh rename to php/phpxmlrpc/tests/ci/travis/setup_apache_hhvm.sh diff --git a/tests/ci/travis/setup_hhvm.sh b/php/phpxmlrpc/tests/ci/travis/setup_hhvm.sh similarity index 100% rename from tests/ci/travis/setup_hhvm.sh rename to php/phpxmlrpc/tests/ci/travis/setup_hhvm.sh diff --git a/tests/ci/travis/setup_php_fpm.sh b/php/phpxmlrpc/tests/ci/travis/setup_php_fpm.sh similarity index 100% rename from tests/ci/travis/setup_php_fpm.sh rename to php/phpxmlrpc/tests/ci/travis/setup_php_fpm.sh diff --git a/tests/ci/travis/setup_privoxy.sh b/php/phpxmlrpc/tests/ci/travis/setup_privoxy.sh similarity index 100% rename from tests/ci/travis/setup_privoxy.sh rename to php/phpxmlrpc/tests/ci/travis/setup_privoxy.sh diff --git a/tests/parse_args.php b/php/phpxmlrpc/tests/parse_args.php similarity index 100% rename from tests/parse_args.php rename to php/phpxmlrpc/tests/parse_args.php diff --git a/tests/phpunit_coverage.php b/php/phpxmlrpc/tests/phpunit_coverage.php similarity index 100% rename from tests/phpunit_coverage.php rename to php/phpxmlrpc/tests/phpunit_coverage.php diff --git a/tests/verify_compat.php b/php/phpxmlrpc/tests/verify_compat.php similarity index 100% rename from tests/verify_compat.php rename to php/phpxmlrpc/tests/verify_compat.php diff --git a/php/plc_api.php b/php/plc_api.php new file mode 100644 index 0000000..9468c04 --- /dev/null +++ b/php/plc_api.php @@ -0,0 +1,375 @@ + +// Copyright (C) 2005-2006 The Trustees of Princeton University +// + +//ini_set('error_reporting', 1); + +/* + * May 2017 - Ciro Scognamiglio + * + * xmlrpc php module is not compatible anymore with the PLCAPI class, + * if the package phpxmlrpc is installed in the same dir it will be used instead + * + * https://github.com/gggeek/phpxmlrpc + * + * If the package is not found the php module XML-RPC is used if available + * + */ +if (file_exists(__DIR__ . '/phpxmlrpc/src/Autoloader.php')) { + include_once __DIR__ . '/phpxmlrpc/src/Autoloader.php'; + PhpXmlRpc\Autoloader::register(); +} + +require_once 'plc_config.php'; + +class PLCAPI +{ + var $auth; + var $server; + var $port; + var $path; + var $errors; + var $trace; + var $calls; + var $multicall; + + function PLCAPI($auth = NULL, + $server = PLC_API_HOST, + $port = PLC_API_PORT, + $path = PLC_API_PATH, + $cainfo = NULL) + { + $this->auth = $auth; + $this->server = $server; + $this->port = $port; + $this->path = $path; + $this->cainfo = $cainfo; + $this->errors = array(); + $this->trace = array(); + $this->calls = array(); + $this->multicall = false; + } + + function rec_join ($arg) { + if ( is_array($arg) ) { + $ret = ""; + foreach ( $arg as $i ) { + $l = $this->rec_join($i); + # ignore html code. + if ( $l[0] != "<" ) { $ret .= $l . ", "; } + } + return $ret; + } else { + settype($arg, "string"); + return $arg; + } + } + + function backtrace_php () { + $backtrace = debug_backtrace(); + $msg = ""; + $len = count($backtrace); + $cnt = 1; + foreach( array_reverse($backtrace) as $line ) { + $msg .= "File '". $line['file'] . "' line " . $line['line'] . "\n"; + $msg .= " " . $line['function'] . "( " . $this->rec_join($line['args']) . ")\n"; + $cnt += 1; + if ( $cnt == $len ) { break; } + } + return $msg; + } + + function error_log($error_msg, $backtrace_level = 1) + { + $backtrace = debug_backtrace(); + $file = $backtrace[$backtrace_level]['file']; + $line = $backtrace[$backtrace_level]['line']; + + $error_line='PLCAPI error: ' . $error_msg ; + if ($file) $error_line .= ' in file ' . $file; + if ($line) $error_line .= ' on line ' . $line; + $this->errors[] = $error_line; + # TODO: setup a config variable for more detailed stack traces, for API errors. + if ( TRUE ){ + error_log($error_line); + } else { + error_log($this->backtrace_php()); + } + } + + function error() + { + if (empty($this->trace)) { + return NULL; + } else { + $last_trace = end($this->trace); + return implode("\\n", $last_trace['errors']); + } + } + + function trace() + { + return $this->trace; + } + + function microtime_float() + { + list($usec, $sec) = explode(" ", microtime()); + return ((float) $usec + (float) $sec); + } + + function call($method, $args = NULL) + { + if ($this->multicall) { + $this->calls[] = array ('methodName' => $method, + 'params' => $args); + return NULL; + } else { + return $this->internal_call($method, $args, 3); + } + } + + /* + * Use PhpXmlRpc\Value before encoding the request + */ + function xmlrpcValue($value) { + switch(gettype($value)) { + case 'array': + $members = array(); + foreach($value as $vk => $vv) { + $members[$vk] = $this->xmlrpcValue($vv); + } + + if ((array_key_exists(0, $value)) || (empty($value))) { + return new PhpXmlRpc\Value( + $members, + 'array' + ); + } else { + return new PhpXmlRpc\Value( + $members, + 'struct' + ); + } + + break; + case 'double': + return new PhpXmlRpc\Value($value, 'double'); + break; + case 'boolean': + return new PhpXmlRpc\Value($value, 'boolean'); + break; + case 'NULL': + case 'null': + return new PhpXmlRpc\Value(null, 'null'); + break; + case 'integer': + return new PhpXmlRpc\Value($value, 'int'); + break; + default: + return new PhpXmlRpc\Value($value); + break; + } + } + + function internal_call($method, $args = NULL, $backtrace_level = 2) + { + if (class_exists('PhpXmlRpc\\PhpXmlRpc')) { + return $this->internal_call_phpxmlrpc($method, $args, $backtrace_level); + } else { + return $this->internal_call_xmlrpc($method, $args, $backtrace_level); + } + } + + /* + * the new internal call, will use PhpXmlRpc + */ + function internal_call_phpxmlrpc($method, $args = NULL, $backtrace_level = 2) + { +// +// echo '
';
+//      var_dump($method);
+//      var_dump($args);
+//      echo '
'; + + PhpXmlRpc\PhpXmlRpc::$xmlrpc_null_extension = true; + + if ($this->port == 443) { + $url = 'https://'; + } else { + $url = 'http://'; + } + + // Set the URL for the request + $url .= $this->server . ':' . $this->port . '/' . $this->path; + + $client = new PhpXmlRpc\Client($url); + $client->setSSLVerifyPeer(false); + /* + * 1 -> not verify CN + * 2 -> verify CN (default) + */ + $client->setSSLVerifyHost(1); + + $values = $this->xmlrpcValue($args); + + $response = $client->send(new PhpXmlRpc\Request($method, $values)); + + + if (!$response->faultCode()) { + $encoder = new PhpXmlRpc\Encoder(); + $v = $encoder->decode($response->value()); + + return $v; + } else { + $this->error_log("An error occurred [" . $response->faultCode() . "] ". + $response->faultString()); + return NULL; + } + } + + /* + * The original internal call that uses php XML-RPC + */ + function internal_call_xmlrpc($method, $args = NULL, $backtrace_level = 2) + { + $curl = curl_init(); + + // Verify peer certificate if talking over SSL + if ($this->port == 443) { + curl_setopt($curl, CURLOPT_SSL_VERIFYPEER, 2); + if (!empty($this->cainfo)) { + curl_setopt($curl, CURLOPT_CAINFO, $this->cainfo); + } elseif (defined('PLC_API_CA_SSL_CRT')) { + curl_setopt($curl, CURLOPT_CAINFO, PLC_API_CA_SSL_CRT); + } + $url = 'https://'; + } else { + $url = 'http://'; + } + + // Set the URL for the request + $url .= $this->server . ':' . $this->port . '/' . $this->path; + curl_setopt($curl, CURLOPT_URL, $url); + + // Marshal the XML-RPC request as a POST variable. is an + // extension to the XML-RPC spec that is supported in our custom + // version of xmlrpc.so via the 'allow_null' output_encoding key. + $request = xmlrpc_encode_request($method, $args, array('null_extension')); + curl_setopt($curl, CURLOPT_POSTFIELDS, $request); + + // Construct the HTTP header + $header[] = 'Content-type: text/xml'; + $header[] = 'Content-length: ' . strlen($request); + curl_setopt($curl, CURLOPT_HTTPHEADER, $header); + + // Set some miscellaneous options + curl_setopt($curl, CURLOPT_TIMEOUT, 180); + + // Get the output of the request + curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1); + $t0 = $this->microtime_float(); + $output = curl_exec($curl); + $t1 = $this->microtime_float(); + + if (curl_errno($curl)) { + $this->error_log('curl: ' . curl_error($curl), true); + $ret = NULL; + } else { + $ret = xmlrpc_decode($output); + if (is_array($ret) && xmlrpc_is_fault($ret)) { + $this->error_log('Fault Code ' . $ret['faultCode'] . ': ' . + $ret['faultString'], $backtrace_level, true); + $ret = NULL; + } + } + + curl_close($curl); + + $this->trace[] = array('method' => $method, + 'args' => $args, + 'runtime' => $t1 - $t0, + 'return' => $ret, + 'errors' => $this->errors); + $this->errors = array(); + + return $ret; + } + + function begin() + { + if (!empty($this->calls)) { + $this->error_log ('Warning: multicall already in progress'); + } + + $this->multicall = true; + } + + function xmlrpc_is_fault($arr) + { + // check if xmlrpc_is_fault exists + return is_array($arr) && array_key_exists('faultCode', $arr) && array_key_exists('faultString', $arr); + } + function commit() + { + if (!empty ($this->calls)) { + $ret = array(); + $results = $this->internal_call('system.multicall', array ($this->calls)); + foreach ($results as $result) { + if (is_array($result)) { + if ($this->xmlrpc_is_fault($result)) { + $this->error_log('Fault Code ' . $result['faultCode'] . ': ' . + $result['faultString'], 1, true); + $ret[] = NULL; + // Thierry - march 30 2007 + // using $adm->error() is broken with begin/commit style + // this is because error() uses last item in trace and checks for ['errors'] + // when using begin/commit we do run internal_call BUT internal_call checks for + // multicall's result globally, not individual results, so ['errors'] comes empty + // I considered hacking internal_call + // to *NOT* maintain this->trace at all when invoked with multicall + // but it is too complex to get all values right + // so let's go for the hacky way, and just record individual errors at the right place + $this->trace[count($this->trace)-1]['errors'][] = end($this->errors); + } else { + $ret[] = $result[0]; + } + } else { + $ret[] = $result; + } + } + } else { + $ret = NULL; + } + + $this->calls = array(); + $this->multicall = false; + + return $ret; + } + + // + // PLCAPI Methods + // + + function __call($name, $args) + { + array_unshift($args, $this->auth); + return $this->call($name, $args); + } +} + +global $adm; + +$adm = new PLCAPI(array('AuthMethod' => "capability", + 'Username' => PLC_API_MAINTENANCE_USER, + 'AuthString' => PLC_API_MAINTENANCE_PASSWORD)); + +?> diff --git a/planetlab5.sql b/planetlab5.sql new file mode 100644 index 0000000..95eb02b --- /dev/null +++ b/planetlab5.sql @@ -0,0 +1,1369 @@ +-- +-- PlanetLab Central database schema +-- Version 5, PostgreSQL +-- +-- Aaron Klingaman +-- Reid Moran +-- Mark Huang +-- Tony Mack +-- Thierry Parmentelat +-- +-- Copyright (C) 2006 The Trustees of Princeton University +-- +-- NOTE: this file was first created for version 4.3, the filename might be confusing +-- + +SET client_encoding = 'UNICODE'; + +-------------------------------------------------------------------------------- +-- Version +-------------------------------------------------------------------------------- + +-- Database version +CREATE TABLE plc_db_version ( + version integer NOT NULL, + subversion integer NOT NULL DEFAULT 0 +) WITH OIDS; + +-- the migration scripts do not use the major 'version' number +-- so 5.0 sets subversion at 100 +-- in case your database misses the site and persons tags feature, +-- you might wish to first upgrade to 4.3-rc16 before moving to some 5.0 +-- or run the up script here +-- http://svn.planet-lab.org/svn/PLCAPI/branches/4.3/migrations/ + +INSERT INTO plc_db_version (version, subversion) VALUES (5, 100); + +-------------------------------------------------------------------------------- +-- Aggregates and store procedures +-------------------------------------------------------------------------------- + +-- Like MySQL GROUP_CONCAT(), this function aggregates values into a +-- PostgreSQL array. +CREATE AGGREGATE array_accum ( + sfunc = array_append, + basetype = anyelement, + stype = anyarray, + initcond = '{}' +); + +-------------------------------------------------------------------------------- +-- Roles +-------------------------------------------------------------------------------- + +-- Valid account roles +CREATE TABLE roles ( + role_id integer PRIMARY KEY, -- Role identifier + name text UNIQUE NOT NULL -- Role symbolic name +) WITH OIDS; +INSERT INTO roles (role_id, name) VALUES (10, 'admin'); +INSERT INTO roles (role_id, name) VALUES (20, 'pi'); +INSERT INTO roles (role_id, name) VALUES (30, 'user'); +INSERT INTO roles (role_id, name) VALUES (40, 'tech'); + +-------------------------------------------------------------------------------- +-- The building block for attaching tags +-------------------------------------------------------------------------------- +CREATE TABLE tag_types ( + + tag_type_id serial PRIMARY KEY, -- ID + tagname text UNIQUE NOT NULL, -- Tag Name + description text, -- Optional Description +-- this is deprecated -- see migrations/104* +-- starting with subversion 104, a tag type has a SET OF roles attached to it + min_role_id integer REFERENCES roles DEFAULT 10, -- set minimal role required + category text NOT NULL DEFAULT 'general' -- Free text for grouping tags together +) WITH OIDS; + +-------------------------------------------------------------------------------- +-- Accounts +-------------------------------------------------------------------------------- + +-- Accounts +CREATE TABLE persons ( + -- Mandatory + person_id serial PRIMARY KEY, -- Account identifier + email text NOT NULL, -- E-mail address + first_name text NOT NULL, -- First name + last_name text NOT NULL, -- Last name + deleted boolean NOT NULL DEFAULT false, -- Has been deleted + enabled boolean NOT NULL DEFAULT false, -- Has been disabled + + password text NOT NULL DEFAULT 'nopass', -- Password (md5crypted) + verification_key text, -- Reset password key + verification_expires timestamp without time zone, + + -- Optional + title text, -- Honorific + phone text, -- Telephone number + url text, -- Home page + bio text, -- Biography + + -- Timestamps + date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP +) WITH OIDS; +CREATE INDEX persons_email_idx ON persons (email); + +-------------------------------------------------------------------------------- +-- person tags +-------------------------------------------------------------------------------- +CREATE TABLE person_tag ( + person_tag_id serial PRIMARY KEY, -- ID + person_id integer REFERENCES persons NOT NULL, -- person id + tag_type_id integer REFERENCES tag_types, -- tag type id + value text -- value attached +) WITH OIDS; + +CREATE OR REPLACE VIEW person_tags AS +SELECT person_id, +array_accum(person_tag_id) AS person_tag_ids +FROM person_tag +GROUP BY person_id; + +CREATE OR REPLACE VIEW view_person_tags AS +SELECT +person_tag.person_tag_id, +person_tag.person_id, +persons.email, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +tag_types.min_role_id, +person_tag.value +FROM person_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN persons USING (person_id); + +-------------------------------------------------------------------------------- +-- Sites +-------------------------------------------------------------------------------- + +-- Sites +CREATE TABLE sites ( + -- Mandatory + site_id serial PRIMARY KEY, -- Site identifier + login_base text NOT NULL, -- Site slice prefix + name text NOT NULL, -- Site name + abbreviated_name text NOT NULL, -- Site abbreviated name + enabled boolean NOT NULL Default true, -- Is this site enabled + deleted boolean NOT NULL DEFAULT false, -- Has been deleted + is_public boolean NOT NULL DEFAULT true, -- Shows up in public lists + max_slices integer NOT NULL DEFAULT 0, -- Maximum number of slices + max_slivers integer NOT NULL DEFAULT 1000, -- Maximum number of instantiated slivers + + -- Optional + latitude real, + longitude real, + url text, + ext_consortium_id integer, -- external consortium id + + -- Timestamps + date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP +) WITH OIDS; +CREATE INDEX sites_login_base_idx ON sites (login_base); + +-- Account site membership +CREATE TABLE person_site ( + person_id integer REFERENCES persons NOT NULL, -- Account identifier + site_id integer REFERENCES sites NOT NULL, -- Site identifier + is_primary boolean NOT NULL DEFAULT false, -- Is the primary site for this account + PRIMARY KEY (person_id, site_id) +); +CREATE INDEX person_site_person_id_idx ON person_site (person_id); +CREATE INDEX person_site_site_id_idx ON person_site (site_id); + +-- Ordered by primary site first +CREATE OR REPLACE VIEW person_site_ordered AS +SELECT person_id, site_id +FROM person_site +ORDER BY is_primary DESC; + +-- Sites that each person is a member of +CREATE OR REPLACE VIEW person_sites AS +SELECT person_id, +array_accum(site_id) AS site_ids +FROM person_site_ordered +GROUP BY person_id; + +-- Accounts at each site +CREATE OR REPLACE VIEW site_persons AS +SELECT site_id, +array_accum(person_id) AS person_ids +FROM person_site +GROUP BY site_id; + +-------------------------------------------------------------------------------- +-- site tags +-------------------------------------------------------------------------------- + +CREATE TABLE site_tag ( + site_tag_id serial PRIMARY KEY, -- ID + site_id integer REFERENCES sites NOT NULL, -- site id + tag_type_id integer REFERENCES tag_types, -- tag type id + value text -- value attached +) WITH OIDS; + +CREATE OR REPLACE VIEW site_tags AS +SELECT site_id, +array_accum(site_tag_id) AS site_tag_ids +FROM site_tag +GROUP BY site_id; + +CREATE OR REPLACE VIEW view_site_tags AS +SELECT +site_tag.site_tag_id, +site_tag.site_id, +sites.login_base, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +tag_types.min_role_id, +site_tag.value +FROM site_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN sites USING (site_id); + +-------------------------------------------------------------------------------- +-- Mailing Addresses +-------------------------------------------------------------------------------- + +CREATE TABLE address_types ( + address_type_id serial PRIMARY KEY, -- Address type identifier + name text UNIQUE NOT NULL, -- Address type + description text -- Address type description +) WITH OIDS; + +-- Multi-rows insertion "insert .. values (row1), (row2)" is not supported by pgsql-8.1 +-- 'Billing' Used to be 'Site' +INSERT INTO address_types (name) VALUES ('Personal'); +INSERT INTO address_types (name) VALUES ('Shipping'); +INSERT INTO address_types (name) VALUES ('Billing'); + +-- Mailing addresses +CREATE TABLE addresses ( + address_id serial PRIMARY KEY, -- Address identifier + line1 text NOT NULL, -- Address line 1 + line2 text, -- Address line 2 + line3 text, -- Address line 3 + city text NOT NULL, -- City + state text NOT NULL, -- State or province + postalcode text NOT NULL, -- Postal code + country text NOT NULL -- Country +) WITH OIDS; + +-- Each mailing address can be one of several types +CREATE TABLE address_address_type ( + address_id integer REFERENCES addresses NOT NULL, -- Address identifier + address_type_id integer REFERENCES address_types NOT NULL, -- Address type + PRIMARY KEY (address_id, address_type_id) +) WITH OIDS; +CREATE INDEX address_address_type_address_id_idx ON address_address_type (address_id); +CREATE INDEX address_address_type_address_type_id_idx ON address_address_type (address_type_id); + +CREATE OR REPLACE VIEW address_address_types AS +SELECT address_id, +array_accum(address_type_id) AS address_type_ids, +array_accum(address_types.name) AS address_types +FROM address_address_type +LEFT JOIN address_types USING (address_type_id) +GROUP BY address_id; + +CREATE TABLE site_address ( + site_id integer REFERENCES sites NOT NULL, -- Site identifier + address_id integer REFERENCES addresses NOT NULL, -- Address identifier + PRIMARY KEY (site_id, address_id) +) WITH OIDS; +CREATE INDEX site_address_site_id_idx ON site_address (site_id); +CREATE INDEX site_address_address_id_idx ON site_address (address_id); + +CREATE OR REPLACE VIEW site_addresses AS +SELECT site_id, +array_accum(address_id) AS address_ids +FROM site_address +GROUP BY site_id; + +-------------------------------------------------------------------------------- +-- Authentication Keys +-------------------------------------------------------------------------------- + +-- Valid key types +CREATE TABLE key_types ( + key_type text PRIMARY KEY -- Key type +) WITH OIDS; +INSERT INTO key_types (key_type) VALUES ('ssh'); + +-- Authentication keys +CREATE TABLE keys ( + key_id serial PRIMARY KEY, -- Key identifier + key_type text REFERENCES key_types NOT NULL, -- Key type + key text NOT NULL, -- Key material + is_blacklisted boolean NOT NULL DEFAULT false -- Has been blacklisted +) WITH OIDS; + +-- Account authentication key(s) +CREATE TABLE person_key ( + key_id integer REFERENCES keys PRIMARY KEY, -- Key identifier + person_id integer REFERENCES persons NOT NULL -- Account identifier +) WITH OIDS; +CREATE INDEX person_key_person_id_idx ON person_key (person_id); + +CREATE OR REPLACE VIEW person_keys AS +SELECT person_id, +array_accum(key_id) AS key_ids +FROM person_key +GROUP BY person_id; + +-------------------------------------------------------------------------------- +-- Account roles +-------------------------------------------------------------------------------- + +CREATE TABLE person_role ( + person_id integer REFERENCES persons NOT NULL, -- Account identifier + role_id integer REFERENCES roles NOT NULL, -- Role identifier + PRIMARY KEY (person_id, role_id) +) WITH OIDS; +CREATE INDEX person_role_person_id_idx ON person_role (person_id); + +-- Account roles +CREATE OR REPLACE VIEW person_roles AS +SELECT person_id, +array_accum(role_id) AS role_ids, +array_accum(roles.name) AS roles +FROM person_role +LEFT JOIN roles USING (role_id) +GROUP BY person_id; + +-------------------------------------------------------------------------------- +-- Nodes +-------------------------------------------------------------------------------- + +-- Valid node boot states (Nodes.py expect max length to be 20) +CREATE TABLE boot_states ( + boot_state text PRIMARY KEY +) WITH OIDS; +INSERT INTO boot_states (boot_state) VALUES ('boot'); +INSERT INTO boot_states (boot_state) VALUES ('safeboot'); +INSERT INTO boot_states (boot_state) VALUES ('reinstall'); +INSERT INTO boot_states (boot_state) VALUES ('disabled'); + +CREATE TABLE run_levels ( + run_level text PRIMARY KEY +) WITH OIDS; +INSERT INTO run_levels (run_level) VALUES ('boot'); +INSERT INTO run_levels (run_level) VALUES ('safeboot'); +INSERT INTO run_levels (run_level) VALUES ('failboot'); +INSERT INTO run_levels (run_level) VALUES ('reinstall'); + +-- Known node types (Nodes.py expect max length to be 20) +CREATE TABLE node_types ( + node_type text PRIMARY KEY +) WITH OIDS; +INSERT INTO node_types (node_type) VALUES ('regular'); +-- old dummynet stuff, to be removed +INSERT INTO node_types (node_type) VALUES ('dummynet'); + +-- Nodes +CREATE TABLE nodes ( + -- Mandatory + node_id serial PRIMARY KEY, -- Node identifier + node_type text REFERENCES node_types -- node type + DEFAULT 'regular', + + hostname text NOT NULL, -- Node hostname + site_id integer REFERENCES sites NOT NULL, -- At which site + boot_state text REFERENCES boot_states NOT NULL -- Node boot state + DEFAULT 'reinstall', + run_level text REFERENCES run_levels DEFAULT NULL, -- Node Run Level + deleted boolean NOT NULL DEFAULT false, -- Is deleted + + -- Optional + model text, -- Hardware make and model + boot_nonce text, -- Random nonce updated by Boot Manager + version text, -- Boot CD version string updated by Boot Manager + ssh_rsa_key text, -- SSH host key updated by Boot Manager + key text, -- Node key generated when boot file is downloaded + verified boolean NOT NULL DEFAULT false, -- whether or not the node & pcu are verified + + -- Timestamps + date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + last_download timestamp without time zone, + last_pcu_reboot timestamp without time zone, + last_pcu_confirmation timestamp without time zone, + last_contact timestamp without time zone +) WITH OIDS; +CREATE INDEX nodes_hostname_idx ON nodes (hostname); +CREATE INDEX nodes_site_id_idx ON nodes (site_id); + +-- Nodes at each site +CREATE OR REPLACE VIEW site_nodes AS +SELECT site_id, +array_accum(node_id) AS node_ids +FROM nodes +WHERE deleted IS false +GROUP BY site_id; + +-------------------------------------------------------------------------------- +-- node tags +-------------------------------------------------------------------------------- + +CREATE TABLE node_tag ( + node_tag_id serial PRIMARY KEY, -- ID + node_id integer REFERENCES nodes NOT NULL, -- node id + tag_type_id integer REFERENCES tag_types, -- tag type id + value text -- value attached +) WITH OIDS; + +-------------------------------------------------------------------------------- +-- (network) interfaces +-------------------------------------------------------------------------------- + +-- Valid network addressing schemes +CREATE TABLE network_types ( + type text PRIMARY KEY -- Addressing scheme +) WITH OIDS; +INSERT INTO network_types (type) VALUES ('ipv4'); + +-- Valid network configuration methods +CREATE TABLE network_methods ( + method text PRIMARY KEY -- Configuration method +) WITH OIDS; + +INSERT INTO network_methods (method) VALUES ('static'); +INSERT INTO network_methods (method) VALUES ('dhcp'); +INSERT INTO network_methods (method) VALUES ('proxy'); +INSERT INTO network_methods (method) VALUES ('tap'); +INSERT INTO network_methods (method) VALUES ('ipmi'); +INSERT INTO network_methods (method) VALUES ('unknown'); + +-- Network interfaces +CREATE TABLE interfaces ( + -- Mandatory + interface_id serial PRIMARY KEY, -- Network interface identifier + node_id integer REFERENCES nodes NOT NULL, -- Which node + is_primary boolean NOT NULL DEFAULT false, -- Is the primary interface for this node + type text REFERENCES network_types NOT NULL, -- Addressing scheme + method text REFERENCES network_methods NOT NULL, -- Configuration method + + -- Optional, depending on type and method + ip text, -- IP address + mac text, -- MAC address + gateway text, -- Default gateway address + network text, -- Network address + broadcast text, -- Network broadcast address + netmask text, -- Network mask + dns1 text, -- Primary DNS server + dns2 text, -- Secondary DNS server + bwlimit integer, -- Bandwidth limit in bps + hostname text, -- Hostname of this interface + last_updated timestamp without time zone -- When the interface was last updated +) WITH OIDS; +CREATE INDEX interfaces_node_id_idx ON interfaces (node_id); + +-- Ordered by primary interface first +CREATE OR REPLACE VIEW interfaces_ordered AS +SELECT node_id, interface_id +FROM interfaces +ORDER BY is_primary DESC; + +-- Network interfaces on each node +CREATE OR REPLACE VIEW node_interfaces AS +SELECT node_id, +array_accum(interface_id) AS interface_ids +FROM interfaces_ordered +GROUP BY node_id; + +-------------------------------------------------------------------------------- +-- Interface tags (formerly known as interface settings) +-------------------------------------------------------------------------------- + +CREATE TABLE interface_tag ( + interface_tag_id serial PRIMARY KEY, -- Interface Setting Identifier + interface_id integer REFERENCES interfaces NOT NULL,-- the interface this applies to + tag_type_id integer REFERENCES tag_types NOT NULL, -- the setting type + value text -- value attached +) WITH OIDS; + +CREATE OR REPLACE VIEW interface_tags AS +SELECT interface_id, +array_accum(interface_tag_id) AS interface_tag_ids +FROM interface_tag +GROUP BY interface_id; + +CREATE OR REPLACE VIEW view_interface_tags AS +SELECT +interface_tag.interface_tag_id, +interface_tag.interface_id, +interfaces.ip, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +tag_types.min_role_id, +interface_tag.value +FROM interface_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN interfaces USING (interface_id); + +CREATE OR REPLACE VIEW view_interfaces AS +SELECT +interfaces.interface_id, +interfaces.node_id, +interfaces.is_primary, +interfaces.type, +interfaces.method, +interfaces.ip, +interfaces.mac, +interfaces.gateway, +interfaces.network, +interfaces.broadcast, +interfaces.netmask, +interfaces.dns1, +interfaces.dns2, +interfaces.bwlimit, +interfaces.hostname, +CAST(date_part('epoch', interfaces.last_updated) AS bigint) AS last_updated, +COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids +FROM interfaces; + +-------------------------------------------------------------------------------- +-- ilinks : links between interfaces +-------------------------------------------------------------------------------- +CREATE TABLE ilink ( + ilink_id serial PRIMARY KEY, -- id + tag_type_id integer REFERENCES tag_types, -- id of the tag type + src_interface_id integer REFERENCES interfaces not NULL, -- id of src interface + dst_interface_id integer REFERENCES interfaces NOT NULL, -- id of dst interface + value text -- optional value on the link +) WITH OIDS; + +CREATE OR REPLACE VIEW view_ilinks AS +SELECT * FROM tag_types +INNER JOIN ilink USING (tag_type_id); + +-- xxx TODO : expose to view_interfaces the set of ilinks a given interface is part of +-- this is needed for properly deleting these ilinks when an interface gets deleted +-- as this is not done yet, it prevents DeleteInterface, thus DeleteNode, thus DeleteSite +-- from working correctly when an iLink is set + +-------------------------------------------------------------------------------- +-- Node groups +-------------------------------------------------------------------------------- + +-- Node groups +CREATE TABLE nodegroups ( + nodegroup_id serial PRIMARY KEY, -- Group identifier + groupname text UNIQUE NOT NULL, -- Group name + tag_type_id integer REFERENCES tag_types, -- node is in nodegroup if it has this tag defined + -- can be null, make management faster & easier + value text -- with this value attached +) WITH OIDS; + +-- xxx - first rough implem. similar to former semantics but might be slow +CREATE OR REPLACE VIEW nodegroup_node AS +SELECT nodegroup_id, node_id +FROM tag_types +JOIN node_tag +USING (tag_type_id) +JOIN nodegroups +USING (tag_type_id,value); + +CREATE OR REPLACE VIEW nodegroup_nodes AS +SELECT nodegroup_id, +array_accum(node_id) AS node_ids +FROM nodegroup_node +GROUP BY nodegroup_id; + +-- Node groups that each node is a member of +CREATE OR REPLACE VIEW node_nodegroups AS +SELECT node_id, +array_accum(nodegroup_id) AS nodegroup_ids +FROM nodegroup_node +GROUP BY node_id; + +-------------------------------------------------------------------------------- +-- Node configuration files +-------------------------------------------------------------------------------- + +CREATE TABLE conf_files ( + conf_file_id serial PRIMARY KEY, -- Configuration file identifier + enabled bool NOT NULL DEFAULT true, -- Configuration file is active + source text NOT NULL, -- Relative path on the boot server + -- where file can be downloaded + dest text NOT NULL, -- Absolute path where file should be installed + file_permissions text NOT NULL DEFAULT '0644', -- chmod(1) permissions + file_owner text NOT NULL DEFAULT 'root', -- chown(1) owner + file_group text NOT NULL DEFAULT 'root', -- chgrp(1) owner + preinstall_cmd text, -- Shell command to execute prior to installing + postinstall_cmd text, -- Shell command to execute after installing + error_cmd text, -- Shell command to execute if any error occurs + ignore_cmd_errors bool NOT NULL DEFAULT false, -- Install file anyway even if an error occurs + always_update bool NOT NULL DEFAULT false -- Always attempt to install file even if unchanged +) WITH OIDS; + +CREATE TABLE conf_file_node ( + conf_file_id integer REFERENCES conf_files NOT NULL, -- Configuration file identifier + node_id integer REFERENCES nodes NOT NULL, -- Node identifier + PRIMARY KEY (conf_file_id, node_id) +); +CREATE INDEX conf_file_node_conf_file_id_idx ON conf_file_node (conf_file_id); +CREATE INDEX conf_file_node_node_id_idx ON conf_file_node (node_id); + +-- Nodes linked to each configuration file +CREATE OR REPLACE VIEW conf_file_nodes AS +SELECT conf_file_id, +array_accum(node_id) AS node_ids +FROM conf_file_node +GROUP BY conf_file_id; + +-- Configuration files linked to each node +CREATE OR REPLACE VIEW node_conf_files AS +SELECT node_id, +array_accum(conf_file_id) AS conf_file_ids +FROM conf_file_node +GROUP BY node_id; + +CREATE TABLE conf_file_nodegroup ( + conf_file_id integer REFERENCES conf_files NOT NULL, -- Configuration file identifier + nodegroup_id integer REFERENCES nodegroups NOT NULL, -- Node group identifier + PRIMARY KEY (conf_file_id, nodegroup_id) +); +CREATE INDEX conf_file_nodegroup_conf_file_id_idx ON conf_file_nodegroup (conf_file_id); +CREATE INDEX conf_file_nodegroup_nodegroup_id_idx ON conf_file_nodegroup (nodegroup_id); + +-- Node groups linked to each configuration file +CREATE OR REPLACE VIEW conf_file_nodegroups AS +SELECT conf_file_id, +array_accum(nodegroup_id) AS nodegroup_ids +FROM conf_file_nodegroup +GROUP BY conf_file_id; + +-- Configuration files linked to each node group +CREATE OR REPLACE VIEW nodegroup_conf_files AS +SELECT nodegroup_id, +array_accum(conf_file_id) AS conf_file_ids +FROM conf_file_nodegroup +GROUP BY nodegroup_id; + +-------------------------------------------------------------------------------- +-- Power control units (PCUs) +-------------------------------------------------------------------------------- + +CREATE TABLE pcus ( + -- Mandatory + pcu_id serial PRIMARY KEY, -- PCU identifier + site_id integer REFERENCES sites NOT NULL, -- Site identifier + hostname text, -- Hostname, not necessarily unique + -- (multiple logical sites could use the same PCU) + ip text NOT NULL, -- IP, not necessarily unique + + -- Optional + protocol text, -- Protocol, e.g. ssh or https or telnet + username text, -- Username, if applicable + "password" text, -- Password, if applicable + model text, -- Model, e.g. BayTech or iPal + last_updated timestamp without time zone, + notes text -- Random notes +) WITH OIDS; +CREATE INDEX pcus_site_id_idx ON pcus (site_id); + +CREATE OR REPLACE VIEW site_pcus AS +SELECT site_id, +array_accum(pcu_id) AS pcu_ids +FROM pcus +GROUP BY site_id; + +CREATE TABLE pcu_node ( + pcu_id integer REFERENCES pcus NOT NULL, -- PCU identifier + node_id integer REFERENCES nodes NOT NULL, -- Node identifier + port integer NOT NULL, -- Port number + PRIMARY KEY (pcu_id, node_id), -- The same node cannot be controlled by different ports + UNIQUE (pcu_id, port) -- The same port cannot control multiple nodes +); +CREATE INDEX pcu_node_pcu_id_idx ON pcu_node (pcu_id); +CREATE INDEX pcu_node_node_id_idx ON pcu_node (node_id); + +CREATE OR REPLACE VIEW node_pcus AS +SELECT node_id, +array_accum(pcu_id) AS pcu_ids, +array_accum(port) AS ports +FROM pcu_node +GROUP BY node_id; + +CREATE OR REPLACE VIEW pcu_nodes AS +SELECT pcu_id, +array_accum(node_id) AS node_ids, +array_accum(port) AS ports +FROM pcu_node +GROUP BY pcu_id; + +-------------------------------------------------------------------------------- +-- Slices +-------------------------------------------------------------------------------- + +CREATE TABLE slice_instantiations ( + instantiation text PRIMARY KEY +) WITH OIDS; +INSERT INTO slice_instantiations (instantiation) VALUES ('not-instantiated'); -- Placeholder slice +INSERT INTO slice_instantiations (instantiation) VALUES ('plc-instantiated'); -- Instantiated by Node Manager +INSERT INTO slice_instantiations (instantiation) VALUES ('delegated'); -- Manually instantiated +INSERT INTO slice_instantiations (instantiation) VALUES ('nm-controller'); -- NM Controller + +-- Slices +CREATE TABLE slices ( + slice_id serial PRIMARY KEY, -- Slice identifier + site_id integer REFERENCES sites NOT NULL, -- Site identifier + + name text NOT NULL, -- Slice name + instantiation text REFERENCES slice_instantiations -- Slice state, e.g. plc-instantiated + NOT NULL DEFAULT 'plc-instantiated', + url text, -- Project URL + description text, -- Project description + + max_nodes integer NOT NULL DEFAULT 100, -- Maximum number of nodes that can be assigned to this slice + + creator_person_id integer REFERENCES persons, -- Creator + created timestamp without time zone NOT NULL -- Creation date + DEFAULT CURRENT_TIMESTAMP, + expires timestamp without time zone NOT NULL -- Expiration date + DEFAULT CURRENT_TIMESTAMP + '2 weeks', + + is_deleted boolean NOT NULL DEFAULT false +) WITH OIDS; +CREATE INDEX slices_site_id_idx ON slices (site_id); +CREATE INDEX slices_name_idx ON slices (name); + +-- Slivers +CREATE TABLE slice_node ( + slice_id integer REFERENCES slices NOT NULL, -- Slice identifier + node_id integer REFERENCES nodes NOT NULL, -- Node identifier + PRIMARY KEY (slice_id, node_id) +) WITH OIDS; +CREATE INDEX slice_node_slice_id_idx ON slice_node (slice_id); +CREATE INDEX slice_node_node_id_idx ON slice_node (node_id); + +-- Synonym for slice_node +CREATE OR REPLACE VIEW slivers AS +SELECT * FROM slice_node; + +-- Nodes in each slice +CREATE OR REPLACE VIEW slice_nodes AS +SELECT slice_id, +array_accum(node_id) AS node_ids +FROM slice_node +GROUP BY slice_id; + +-- Slices on each node +CREATE OR REPLACE VIEW node_slices AS +SELECT node_id, +array_accum(slice_id) AS slice_ids +FROM slice_node +GROUP BY node_id; + +-- Slices at each site +CREATE OR REPLACE VIEW site_slices AS +SELECT site_id, +array_accum(slice_id) AS slice_ids +FROM slices +WHERE is_deleted is false +GROUP BY site_id; + +-- Slice membership +CREATE TABLE slice_person ( + slice_id integer REFERENCES slices NOT NULL, -- Slice identifier + person_id integer REFERENCES persons NOT NULL, -- Account identifier + PRIMARY KEY (slice_id, person_id) +) WITH OIDS; +CREATE INDEX slice_person_slice_id_idx ON slice_person (slice_id); +CREATE INDEX slice_person_person_id_idx ON slice_person (person_id); + +-- Members of the slice +CREATE OR REPLACE VIEW slice_persons AS +SELECT slice_id, +array_accum(person_id) AS person_ids +FROM slice_person +GROUP BY slice_id; + +-- Slices of which each person is a member +CREATE OR REPLACE VIEW person_slices AS +SELECT person_id, +array_accum(slice_id) AS slice_ids +FROM slice_person +GROUP BY person_id; + +-------------------------------------------------------------------------------- +-- Slice whitelist +-------------------------------------------------------------------------------- +-- slice whitelist on nodes +CREATE TABLE node_slice_whitelist ( + node_id integer REFERENCES nodes NOT NULL, -- Node id of whitelist + slice_id integer REFERENCES slices NOT NULL, -- Slice id thats allowd on this node + PRIMARY KEY (node_id, slice_id) +) WITH OIDS; +CREATE INDEX node_slice_whitelist_node_id_idx ON node_slice_whitelist (node_id); +CREATE INDEX node_slice_whitelist_slice_id_idx ON node_slice_whitelist (slice_id); + +-- Slices on each node +CREATE OR REPLACE VIEW node_slices_whitelist AS +SELECT node_id, +array_accum(slice_id) AS slice_ids_whitelist +FROM node_slice_whitelist +GROUP BY node_id; + +-------------------------------------------------------------------------------- +-- Slice tags (formerly known as slice attributes) +-------------------------------------------------------------------------------- + +-- Slice/sliver attributes +CREATE TABLE slice_tag ( + slice_tag_id serial PRIMARY KEY, -- Slice attribute identifier + slice_id integer REFERENCES slices NOT NULL, -- Slice identifier + node_id integer REFERENCES nodes, -- Sliver attribute if set + nodegroup_id integer REFERENCES nodegroups, -- Node group attribute if set + tag_type_id integer REFERENCES tag_types NOT NULL, -- Attribute type identifier + value text +) WITH OIDS; +CREATE INDEX slice_tag_slice_id_idx ON slice_tag (slice_id); +CREATE INDEX slice_tag_node_id_idx ON slice_tag (node_id); +CREATE INDEX slice_tag_nodegroup_id_idx ON slice_tag (nodegroup_id); + +-------------------------------------------------------------------------------- +-- Initscripts +-------------------------------------------------------------------------------- + +-- Initscripts +CREATE TABLE initscripts ( + initscript_id serial PRIMARY KEY, -- Initscript identifier + name text NOT NULL, -- Initscript name + enabled bool NOT NULL DEFAULT true, -- Initscript is active + script text NOT NULL, -- Initscript code + UNIQUE (name) +) WITH OIDS; +CREATE INDEX initscripts_name_idx ON initscripts (name); + + +-------------------------------------------------------------------------------- +-- Peers +-------------------------------------------------------------------------------- + +-- Peers +CREATE TABLE peers ( + peer_id serial PRIMARY KEY, -- Peer identifier + peername text NOT NULL, -- Peer name + peer_url text NOT NULL, -- (HTTPS) URL of the peer PLCAPI interface + cacert text, -- (SSL) Public certificate of peer API server + key text, -- (GPG) Public key used for authentication + shortname text, -- abbreviated name for displaying foreign objects + hrn_root text, -- root for this peer domain + deleted boolean NOT NULL DEFAULT false +) WITH OIDS; +CREATE INDEX peers_peername_idx ON peers (peername) WHERE deleted IS false; +CREATE INDEX peers_shortname_idx ON peers (shortname) WHERE deleted IS false; + +-- Objects at each peer +CREATE TABLE peer_site ( + site_id integer REFERENCES sites PRIMARY KEY, -- Local site identifier + peer_id integer REFERENCES peers NOT NULL, -- Peer identifier + peer_site_id integer NOT NULL, -- Foreign site identifier at peer + UNIQUE (peer_id, peer_site_id) -- The same foreign site should not be cached twice +) WITH OIDS; +CREATE INDEX peer_site_peer_id_idx ON peers (peer_id); + +CREATE OR REPLACE VIEW peer_sites AS +SELECT peer_id, +array_accum(site_id) AS site_ids, +array_accum(peer_site_id) AS peer_site_ids +FROM peer_site +GROUP BY peer_id; + +CREATE TABLE peer_person ( + person_id integer REFERENCES persons PRIMARY KEY, -- Local user identifier + peer_id integer REFERENCES peers NOT NULL, -- Peer identifier + peer_person_id integer NOT NULL, -- Foreign user identifier at peer + UNIQUE (peer_id, peer_person_id) -- The same foreign user should not be cached twice +) WITH OIDS; +CREATE INDEX peer_person_peer_id_idx ON peer_person (peer_id); + +CREATE OR REPLACE VIEW peer_persons AS +SELECT peer_id, +array_accum(person_id) AS person_ids, +array_accum(peer_person_id) AS peer_person_ids +FROM peer_person +GROUP BY peer_id; + +CREATE TABLE peer_key ( + key_id integer REFERENCES keys PRIMARY KEY, -- Local key identifier + peer_id integer REFERENCES peers NOT NULL, -- Peer identifier + peer_key_id integer NOT NULL, -- Foreign key identifier at peer + UNIQUE (peer_id, peer_key_id) -- The same foreign key should not be cached twice +) WITH OIDS; +CREATE INDEX peer_key_peer_id_idx ON peer_key (peer_id); + +CREATE OR REPLACE VIEW peer_keys AS +SELECT peer_id, +array_accum(key_id) AS key_ids, +array_accum(peer_key_id) AS peer_key_ids +FROM peer_key +GROUP BY peer_id; + +CREATE TABLE peer_node ( + node_id integer REFERENCES nodes PRIMARY KEY, -- Local node identifier + peer_id integer REFERENCES peers NOT NULL, -- Peer identifier + peer_node_id integer NOT NULL, -- Foreign node identifier + UNIQUE (peer_id, peer_node_id) -- The same foreign node should not be cached twice +) WITH OIDS; +CREATE INDEX peer_node_peer_id_idx ON peer_node (peer_id); + +CREATE OR REPLACE VIEW peer_nodes AS +SELECT peer_id, +array_accum(node_id) AS node_ids, +array_accum(peer_node_id) AS peer_node_ids +FROM peer_node +GROUP BY peer_id; + +CREATE TABLE peer_slice ( + slice_id integer REFERENCES slices PRIMARY KEY, -- Local slice identifier + peer_id integer REFERENCES peers NOT NULL, -- Peer identifier + peer_slice_id integer NOT NULL, -- Slice identifier at peer + UNIQUE (peer_id, peer_slice_id) -- The same foreign slice should not be cached twice +) WITH OIDS; +CREATE INDEX peer_slice_peer_id_idx ON peer_slice (peer_id); + +CREATE OR REPLACE VIEW peer_slices AS +SELECT peer_id, +array_accum(slice_id) AS slice_ids, +array_accum(peer_slice_id) AS peer_slice_ids +FROM peer_slice +GROUP BY peer_id; + +-------------------------------------------------------------------------------- +-- Authenticated sessions +-------------------------------------------------------------------------------- + +-- Authenticated sessions +CREATE TABLE sessions ( + session_id text PRIMARY KEY, -- Session identifier + expires timestamp without time zone +) WITH OIDS; + +-- People can have multiple sessions +CREATE TABLE person_session ( + person_id integer REFERENCES persons NOT NULL, -- Account identifier + session_id text REFERENCES sessions NOT NULL, -- Session identifier + PRIMARY KEY (person_id, session_id), + UNIQUE (session_id) -- Sessions are unique +) WITH OIDS; +CREATE INDEX person_session_person_id_idx ON person_session (person_id); + +-- Nodes can have only one session +CREATE TABLE node_session ( + node_id integer REFERENCES nodes NOT NULL, -- Node identifier + session_id text REFERENCES sessions NOT NULL, -- Session identifier + UNIQUE (node_id), -- Nodes can have only one session + UNIQUE (session_id) -- Sessions are unique +) WITH OIDS; + +------------------------------------------------------------------------------- +-- PCU Types +------------------------------------------------------------------------------ +CREATE TABLE pcu_types ( + pcu_type_id serial PRIMARY KEY, + model text NOT NULL , -- PCU model name + name text -- Full PCU model name +) WITH OIDS; +CREATE INDEX pcu_types_model_idx ON pcu_types (model); + +CREATE TABLE pcu_protocol_type ( + pcu_protocol_type_id serial PRIMARY KEY, + pcu_type_id integer REFERENCES pcu_types NOT NULL, -- PCU type identifier + port integer NOT NULL, -- PCU port + protocol text NOT NULL, -- Protocol + supported boolean NOT NULL DEFAULT True -- Does PLC support +) WITH OIDS; +CREATE INDEX pcu_protocol_type_pcu_type_id ON pcu_protocol_type (pcu_type_id); + + +CREATE OR REPLACE VIEW pcu_protocol_types AS +SELECT pcu_type_id, +array_accum(pcu_protocol_type_id) as pcu_protocol_type_ids +FROM pcu_protocol_type +GROUP BY pcu_type_id; + +-------------------------------------------------------------------------------- +-- Message templates +-------------------------------------------------------------------------------- + +CREATE TABLE messages ( + message_id text PRIMARY KEY, -- Message name + subject text, -- Message summary + template text, -- Message template + enabled bool NOT NULL DEFAULT true -- Whether message is enabled +) WITH OIDS; + +-------------------------------------------------------------------------------- +-- Events +-------------------------------------------------------------------------------- + +-- Events +CREATE TABLE events ( + event_id serial PRIMARY KEY, -- Event identifier + person_id integer REFERENCES persons, -- Person responsible for event, if any + node_id integer REFERENCES nodes, -- Node responsible for event, if any + auth_type text, -- Type of auth used. i.e. AuthMethod + fault_code integer NOT NULL DEFAULT 0, -- Did this event result in error + call_name text NOT NULL, -- Call responsible for this event + call text NOT NULL, -- Call responsible for this event, including parameters + message text, -- High level description of this event + runtime float DEFAULT 0, -- Event run time + time timestamp without time zone NOT NULL -- Event timestamp + DEFAULT CURRENT_TIMESTAMP +) WITH OIDS; + +-- Database object(s) that may have been affected by a particular event +CREATE TABLE event_object ( + event_id integer REFERENCES events NOT NULL, -- Event identifier + object_id integer NOT NULL, -- Object identifier + object_type text NOT NULL Default 'Unknown' -- What type of object is this event affecting +) WITH OIDS; +CREATE INDEX event_object_event_id_idx ON event_object (event_id); +CREATE INDEX event_object_object_id_idx ON event_object (object_id); +CREATE INDEX event_object_object_type_idx ON event_object (object_type); + +CREATE OR REPLACE VIEW event_objects AS +SELECT event_id, +array_accum(object_id) AS object_ids, +array_accum(object_type) AS object_types +FROM event_object +GROUP BY event_id; + +-------------------------------------------------------------------------------- +-- Useful views +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_pcu_types AS +SELECT +pcu_types.pcu_type_id, +pcu_types.model, +pcu_types.name, +COALESCE((SELECT pcu_protocol_type_ids FROM pcu_protocol_types + WHERE pcu_protocol_types.pcu_type_id = pcu_types.pcu_type_id), '{}') +AS pcu_protocol_type_ids +FROM pcu_types; + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_events AS +SELECT +events.event_id, +events.person_id, +events.node_id, +events.auth_type, +events.fault_code, +events.call_name, +events.call, +events.message, +events.runtime, +CAST(date_part('epoch', events.time) AS bigint) AS time, +COALESCE((SELECT object_ids FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_ids, +COALESCE((SELECT object_types FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_types +FROM events; + +CREATE OR REPLACE VIEW view_event_objects AS +SELECT +events.event_id, +events.person_id, +events.node_id, +events.fault_code, +events.call_name, +events.call, +events.message, +events.runtime, +CAST(date_part('epoch', events.time) AS bigint) AS time, +event_object.object_id, +event_object.object_type +FROM events LEFT JOIN event_object USING (event_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_persons AS +SELECT +persons.person_id, +persons.email, +persons.first_name, +persons.last_name, +persons.deleted, +persons.enabled, +persons.password, +persons.verification_key, +CAST(date_part('epoch', persons.verification_expires) AS bigint) AS verification_expires, +persons.title, +persons.phone, +persons.url, +persons.bio, +CAST(date_part('epoch', persons.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', persons.last_updated) AS bigint) AS last_updated, +peer_person.peer_id, +peer_person.peer_person_id, +COALESCE((SELECT role_ids FROM person_roles WHERE person_roles.person_id = persons.person_id), '{}') AS role_ids, +COALESCE((SELECT roles FROM person_roles WHERE person_roles.person_id = persons.person_id), '{}') AS roles, +COALESCE((SELECT site_ids FROM person_sites WHERE person_sites.person_id = persons.person_id), '{}') AS site_ids, +COALESCE((SELECT key_ids FROM person_keys WHERE person_keys.person_id = persons.person_id), '{}') AS key_ids, +COALESCE((SELECT slice_ids FROM person_slices WHERE person_slices.person_id = persons.person_id), '{}') AS slice_ids, +COALESCE((SELECT person_tag_ids FROM person_tags WHERE person_tags.person_id = persons.person_id), '{}') AS person_tag_ids +FROM persons +LEFT JOIN peer_person USING (person_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_peers AS +SELECT +peers.*, +COALESCE((SELECT site_ids FROM peer_sites WHERE peer_sites.peer_id = peers.peer_id), '{}') AS site_ids, +COALESCE((SELECT peer_site_ids FROM peer_sites WHERE peer_sites.peer_id = peers.peer_id), '{}') AS peer_site_ids, +COALESCE((SELECT person_ids FROM peer_persons WHERE peer_persons.peer_id = peers.peer_id), '{}') AS person_ids, +COALESCE((SELECT peer_person_ids FROM peer_persons WHERE peer_persons.peer_id = peers.peer_id), '{}') AS peer_person_ids, +COALESCE((SELECT key_ids FROM peer_keys WHERE peer_keys.peer_id = peers.peer_id), '{}') AS key_ids, +COALESCE((SELECT peer_key_ids FROM peer_keys WHERE peer_keys.peer_id = peers.peer_id), '{}') AS peer_key_ids, +COALESCE((SELECT node_ids FROM peer_nodes WHERE peer_nodes.peer_id = peers.peer_id), '{}') AS node_ids, +COALESCE((SELECT peer_node_ids FROM peer_nodes WHERE peer_nodes.peer_id = peers.peer_id), '{}') AS peer_node_ids, +COALESCE((SELECT slice_ids FROM peer_slices WHERE peer_slices.peer_id = peers.peer_id), '{}') AS slice_ids, +COALESCE((SELECT peer_slice_ids FROM peer_slices WHERE peer_slices.peer_id = peers.peer_id), '{}') AS peer_slice_ids +FROM peers; + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW node_tags AS +SELECT node_id, +array_accum(node_tag_id) AS node_tag_ids +FROM node_tag +GROUP BY node_id; + +CREATE OR REPLACE VIEW view_node_tags AS +SELECT +node_tag.node_tag_id, +node_tag.node_id, +nodes.hostname, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +tag_types.min_role_id, +node_tag.value +FROM node_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN nodes USING (node_id); + +CREATE OR REPLACE VIEW view_nodes AS +SELECT +nodes.node_id, +nodes.node_type, +nodes.hostname, +nodes.site_id, +nodes.boot_state, +nodes.run_level, +nodes.deleted, +nodes.model, +nodes.boot_nonce, +nodes.version, +nodes.verified, +nodes.ssh_rsa_key, +nodes.key, +CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, +CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, +CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, +CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, +CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, +CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, +peer_node.peer_id, +peer_node.peer_node_id, +COALESCE((SELECT interface_ids FROM node_interfaces + WHERE node_interfaces.node_id = nodes.node_id), '{}') +AS interface_ids, +COALESCE((SELECT nodegroup_ids FROM node_nodegroups + WHERE node_nodegroups.node_id = nodes.node_id), '{}') +AS nodegroup_ids, +COALESCE((SELECT slice_ids FROM node_slices + WHERE node_slices.node_id = nodes.node_id), '{}') +AS slice_ids, +COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist + WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') +AS slice_ids_whitelist, +COALESCE((SELECT pcu_ids FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS pcu_ids, +COALESCE((SELECT ports FROM node_pcus + WHERE node_pcus.node_id = nodes.node_id), '{}') +AS ports, +COALESCE((SELECT conf_file_ids FROM node_conf_files + WHERE node_conf_files.node_id = nodes.node_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_tag_ids FROM node_tags + WHERE node_tags.node_id = nodes.node_id), '{}') +AS node_tag_ids, +node_session.session_id AS session +FROM nodes +LEFT JOIN peer_node USING (node_id) +LEFT JOIN node_session USING (node_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_nodegroups AS +SELECT +nodegroups.*, +tag_types.tagname, +COALESCE((SELECT conf_file_ids FROM nodegroup_conf_files + WHERE nodegroup_conf_files.nodegroup_id = nodegroups.nodegroup_id), '{}') +AS conf_file_ids, +COALESCE((SELECT node_ids FROM nodegroup_nodes + WHERE nodegroup_nodes.nodegroup_id = nodegroups.nodegroup_id), '{}') +AS node_ids +FROM nodegroups INNER JOIN tag_types USING (tag_type_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_conf_files AS +SELECT +conf_files.*, +COALESCE((SELECT node_ids FROM conf_file_nodes + WHERE conf_file_nodes.conf_file_id = conf_files.conf_file_id), '{}') +AS node_ids, +COALESCE((SELECT nodegroup_ids FROM conf_file_nodegroups + WHERE conf_file_nodegroups.conf_file_id = conf_files.conf_file_id), '{}') +AS nodegroup_ids +FROM conf_files; + +-------------------------------------------------------------------------------- +DROP VIEW view_pcus; +CREATE OR REPLACE VIEW view_pcus AS +SELECT +pcus.pcu_id, +pcus.site_id, +pcus.hostname, +pcus.ip, +pcus.protocol, +pcus.username, +pcus.password, +pcus.model, +pcus.notes, +CAST(date_part('epoch', pcus.last_updated) AS bigint) AS last_updated, +COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, +COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports +FROM pcus; + + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_sites AS +SELECT +sites.site_id, +sites.login_base, +sites.name, +sites.abbreviated_name, +sites.deleted, +sites.enabled, +sites.is_public, +sites.max_slices, +sites.max_slivers, +sites.latitude, +sites.longitude, +sites.url, +sites.ext_consortium_id, +CAST(date_part('epoch', sites.date_created) AS bigint) AS date_created, +CAST(date_part('epoch', sites.last_updated) AS bigint) AS last_updated, +peer_site.peer_id, +peer_site.peer_site_id, +COALESCE((SELECT person_ids FROM site_persons WHERE site_persons.site_id = sites.site_id), '{}') AS person_ids, +COALESCE((SELECT node_ids FROM site_nodes WHERE site_nodes.site_id = sites.site_id), '{}') AS node_ids, +COALESCE((SELECT address_ids FROM site_addresses WHERE site_addresses.site_id = sites.site_id), '{}') AS address_ids, +COALESCE((SELECT slice_ids FROM site_slices WHERE site_slices.site_id = sites.site_id), '{}') AS slice_ids, +COALESCE((SELECT pcu_ids FROM site_pcus WHERE site_pcus.site_id = sites.site_id), '{}') AS pcu_ids, +COALESCE((SELECT site_tag_ids FROM site_tags WHERE site_tags.site_id = sites.site_id), '{}') AS site_tag_ids +FROM sites +LEFT JOIN peer_site USING (site_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_addresses AS +SELECT +addresses.*, +COALESCE((SELECT address_type_ids FROM address_address_types WHERE address_address_types.address_id = addresses.address_id), '{}') AS address_type_ids, +COALESCE((SELECT address_types FROM address_address_types WHERE address_address_types.address_id = addresses.address_id), '{}') AS address_types +FROM addresses; + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_keys AS +SELECT +keys.*, +person_key.person_id, +peer_key.peer_id, +peer_key.peer_key_id +FROM keys +LEFT JOIN person_key USING (key_id) +LEFT JOIN peer_key USING (key_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW slice_tags AS +SELECT slice_id, +array_accum(slice_tag_id) AS slice_tag_ids +FROM slice_tag +GROUP BY slice_id; + +CREATE OR REPLACE VIEW view_slices AS +SELECT +slices.slice_id, +slices.site_id, +slices.name, +slices.instantiation, +slices.url, +slices.description, +slices.max_nodes, +slices.creator_person_id, +slices.is_deleted, +CAST(date_part('epoch', slices.created) AS bigint) AS created, +CAST(date_part('epoch', slices.expires) AS bigint) AS expires, +peer_slice.peer_id, +peer_slice.peer_slice_id, +COALESCE((SELECT node_ids FROM slice_nodes WHERE slice_nodes.slice_id = slices.slice_id), '{}') AS node_ids, +COALESCE((SELECT person_ids FROM slice_persons WHERE slice_persons.slice_id = slices.slice_id), '{}') AS person_ids, +COALESCE((SELECT slice_tag_ids FROM slice_tags WHERE slice_tags.slice_id = slices.slice_id), '{}') AS slice_tag_ids +FROM slices +LEFT JOIN peer_slice USING (slice_id); + +CREATE OR REPLACE VIEW view_slice_tags AS +SELECT +slice_tag.slice_tag_id, +slice_tag.slice_id, +slice_tag.node_id, +slice_tag.nodegroup_id, +tag_types.tag_type_id, +tag_types.tagname, +tag_types.description, +tag_types.category, +tag_types.min_role_id, +slice_tag.value, +slices.name +FROM slice_tag +INNER JOIN tag_types USING (tag_type_id) +INNER JOIN slices USING (slice_id); + +-------------------------------------------------------------------------------- +CREATE OR REPLACE VIEW view_sessions AS +SELECT +sessions.session_id, +CAST(date_part('epoch', sessions.expires) AS bigint) AS expires, +person_session.person_id, +node_session.node_id +FROM sessions +LEFT JOIN person_session USING (session_id) +LEFT JOIN node_session USING (session_id); + +-------------------------------------------------------------------------------- +-- Built-in maintenance account and default site +-------------------------------------------------------------------------------- + +INSERT INTO persons (first_name, last_name, email, password, enabled) +VALUES ('Maintenance', 'Account', 'maint@localhost.localdomain', 'nopass', true); + +INSERT INTO person_role (person_id, role_id) VALUES (1, 10); +INSERT INTO person_role (person_id, role_id) VALUES (1, 20); +INSERT INTO person_role (person_id, role_id) VALUES (1, 30); +INSERT INTO person_role (person_id, role_id) VALUES (1, 40); + +INSERT INTO sites (login_base, name, abbreviated_name, max_slices) +VALUES ('pl', 'PlanetLab Central', 'PLC', 100); diff --git a/plc.d/api b/plc.d/api new file mode 100755 index 0000000..f54bfc8 --- /dev/null +++ b/plc.d/api @@ -0,0 +1,58 @@ +#!/bin/bash +# +# priority: 800 +# +# Configure the API. Must be done after SSL certificates are generated +# and before the API web server is brought up. +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +# Source function library and configuration +. /etc/plc.d/functions +. /etc/planetlab/plc_config +local_config=/etc/planetlab/configs/site.xml + +# Be verbose +set -x + +case "$1" in + start) + if [ "$PLC_API_ENABLED" != "1" ] ; then + exit 0 + fi + + MESSAGE=$"Configuring the API" + dialog "$MESSAGE" + + # Make sure that the API maintenance account is protected by a + # password. + if [ -z "$PLC_API_MAINTENANCE_PASSWORD" ] ; then + PLC_API_MAINTENANCE_PASSWORD=$(uuidgen) + plc-config --category=plc_api --variable=maintenance_password --value="$PLC_API_MAINTENANCE_PASSWORD" --save=$local_config $local_config + #service plc reload + plc_reload force + fi + + # Make sure that all PLC servers are allowed to access the API + # through the maintenance account. + PLC_API_MAINTENANCE_SOURCES=($(( + for ip in $PLC_API_MAINTENANCE_SOURCES ; do + echo $ip + done + for server in API BOOT WWW ; do + hostname=PLC_${server}_HOST + gethostbyname ${!hostname} + done + ) | sort -u)) + PLC_API_MAINTENANCE_SOURCES=${PLC_API_MAINTENANCE_SOURCES[*]} + plc-config --category=plc_api --variable=maintenance_sources --value="$PLC_API_MAINTENANCE_SOURCES" --save=$local_config $local_config + #service plc reload + plc_reload force + + result "$MESSAGE" + ;; +esac + +exit $ERRORS diff --git a/plc.d/db b/plc.d/db new file mode 100755 index 0000000..3b84b59 --- /dev/null +++ b/plc.d/db @@ -0,0 +1,239 @@ +#!/bin/bash +# +# priority: 900 +# +# Bootstrap the database +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +# Source function library and configuration +. /etc/plc.d/functions +. /etc/planetlab/plc_config + +# Be verbose +set -x + +# Export so that we do not have to specify -p to psql invocations +export PGPORT=$PLC_DB_PORT + +# Install extensions +function extend_db() +{ + shopt -s nullglob + for file in /usr/share/plc_api/extensions/*-up*; do + script=${file##*/} + name=${script%-up*} + extension=${script##*.} + version=$(psql -U $PLC_DB_USER --quiet --tuples-only --no-align -c \ + "SELECT version FROM plc_db_extensions WHERE name='$name' LIMIT 1" \ + $PLC_DB_NAME 2>/dev/null | awk 'BEGIN { ver=0 } /^[0-9]+$/ { ver=$1 } END { print ver }') + if [ $version -eq 0 ]; then + if [ "$extension" = "sql" ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + psql -U $PLC_DB_USER -f $file $PLC_DB_NAME + elif [ -x $file ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + $file + else + dialog "\nWarning: extension $file not executable" + fi + check + fi + for file in /usr/share/plc_api/extensions/$name/migrations/[0-9]*-up-*; do + script=${file##*/} + index=${script%-up-*} + extension=${script##*.} + if [ $index -gt $version ] ; then + if [ "$extension" = "sql" ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + psql -U $PLC_DB_USER -f $file $PLC_DB_NAME + elif [ -x $file ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + $file + else + dialog "\nWarning: migration $file not executable" + fi + check + fi + done + done +} + +# Updates the database by applying all migration scripts in +# /usr/share/plc_api/migrations/N-up-*, where N is greater than the +# current subversion. At least one of the migration scripts with the +# same N must update plc_db_version.subversion. +function migrate_db() +{ + subversion=$(psql -U $PLC_DB_USER --quiet --tuples-only --no-align -c \ + "SELECT subversion FROM plc_db_version LIMIT 1" \ + $PLC_DB_NAME 2>/dev/null || echo 0) + shopt -s nullglob + for file in /usr/share/plc_api/migrations/[0-9]*-up-* ; do + script=$(basename $file) + index=${script%-up*} + extension=${script##*.} + if [ $index -gt $subversion ] ; then + if [ "$extension" = "sql" ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + psql -U $PLC_DB_USER -f $file $PLC_DB_NAME + elif [ -x $file ] ; then + dialog " - $script (dbdumped)" + dump_planetlab_db "before-$script" + $file + else + dialog "\nWarning: migration $file not executable" + fi + check + fi + done +} + +function checkpoint_planetlab_db() +{ + dumpfile=$1 + pg_dump -U $PLC_DB_USER $PLC_DB_NAME > $dumpfile + check +} + +function restore_planetlab_db() +{ + dumpfile=$1 + if [ -n "$dumpfile" ] ; then + [ -f "$dumpfile" ] && psql -a -U $PLC_DB_USER $PLC_DB_NAME < $dumpfile + check + fi +} + +# use a single date of this script invocation for the dump_*_db functions. +DATE=$(date +"%Y-%m-%d-%H-%M-%S") + +# Dumps the database - optional argument to specify filename suffix +function dump_planetlab_db() +{ + if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi + dumpfile=/var/lib/pgsql/backups/$(date +"${PLC_DB_NAME}.${DATE}${suffix}.sql") + checkpoint_planetlab_db $dumpfile +} + +function restore_drupal_db() +{ + dumpfile=$1 + if [ -n "$dumpfile" ] ; then + [ -f "$dumpfile" ] && psql -a -U $PLC_DB_USER drupal < $1 + check + fi +} + +function checkpoint_drupal_db() +{ + dumpfile=$1 + pg_dump -U $PLC_DB_USER drupal > $dumpfile + check +} + +function dump_drupal_db() +{ + dumpfile=/var/lib/pgsql/backups/$(date +"drupal.${DATE}.sql") + checkpoint_drupal_db $dumpfile + check +} + +# Clean up old backups +function clean_dumps() +{ + find /var/lib/pgsql/backups '(' -name "$PLC_DB_NAME.*.sql" -o -name "drupal.*.sql" ')' -a -atime +15 | xargs rm -f + check +} + +[ $PLC_DB_ENABLED -ne 1 ] && exit 0 +case "$1" in + start) + MESSAGE=$"Bootstrapping the database" + dialog "$MESSAGE" + + # Apply schema updates + migrate_db + extend_db + + # Update the maintenance account username. This can't be + # done through the api-config script since it uses the + # maintenance account to access the API. The maintenance + # account should be person_id 1 since it is created by the + # DB schema itself. + psql -U $PLC_DB_USER -c "UPDATE persons SET email='$PLC_API_MAINTENANCE_USER' WHERE person_id=1" $PLC_DB_NAME + + # Update the Drupal site_name variable + # also turn off drupal native user registration + psql -U $PLC_DB_USER drupal < +# Copyright (C) 2006 The Trustees of Princeton University +# + +# Source function library and configuration +. /etc/plc.d/functions +. /etc/planetlab/plc_config +local_config=/etc/planetlab/configs/site.xml + +# Be verbose +set -x + +# Default locations +PGDATA=/var/lib/pgsql/data +postgresql_conf=$PGDATA/postgresql.conf +pghba_conf=$PGDATA/pg_hba.conf +postgresql_sysconfig=/etc/sysconfig/pgsql/postgresql + +# Export so that we do not have to specify -p to psql invocations +export PGPORT=$PLC_DB_PORT + +# can't trust the return of service postgresql start / nor status +function postgresql_check () { + + # wait until postmaster is up and running - or 10s max + if status postmaster && [ -f /var/lock/subsys/postgresql ] ; then + # The only way we can be sure is if we can access it + for i in $(seq 1 10) ; do + # Must do this as the postgres user initially (before we + # fix pg_hba.conf to passwordless localhost access). + su -c 'psql -U postgres -c "" template1' postgres && return 0 + sleep 1 + done + fi + + return 1 +} + +case "$1" in + start) + if [ "$PLC_DB_ENABLED" != "1" ] ; then + exit 0 + fi + + MESSAGE=$"Starting PostgreSQL server" + dialog "$MESSAGE" + + ######## sysconfig +# xxx on f16, the systemd init script won't read /etc/sysconfig/pgsql/postgresql any more +# need to find out how to perform this configuration, if still needed + # Set data directory and redirect startup output to /var/log/pgsql + mkdir -p $(dirname $postgresql_sysconfig) + touch $postgresql_sysconfig + tmp=${postgresql_sysconfig}.new + # remove any previous definitions and write ours + ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig + echo "PGDATA=$PGDATA" + echo "PGLOG=/var/log/pgsql" + echo "PGPORT=$PLC_DB_PORT" + ) > $tmp ; mv -f $tmp $postgresql_sysconfig + + ######## /var/lib/pgsql/data + # Fix ownership of /var/lib/pgsql (rpm installation may have changed it) + chown -R -H postgres:postgres $(dirname $PGDATA) + + # PostgreSQL must be started at least once to bootstrap + # /var/lib/pgsql/data + if [ ! -f $postgresql_conf ] ; then +# fedora 16 uses systemd +# http://docs.fedoraproject.org/en-US/Fedora/16/html/Release_Notes/sect-Release_Notes-Changes_for_Sysadmin.html + if type postgresql-setup >& /dev/null ; then + postgresql-setup initdb || : + check + else + service postgresql initdb &> /dev/null || postgresql : + check + fi + fi + + ######## /var/lib/pgsql/data/postgresql.conf + # Enable DB server. drop Postgresql<=7.x + # PostgreSQL >=8.0 defines listen_addresses + # listen on a specific IP + localhost, more robust when run within a vserver + sed -i -e '/^listen_addresses/d' $postgresql_conf + echo "listen_addresses = '${PLC_DB_HOST},localhost'" >> $postgresql_conf + # tweak timezone to be 'UTC' + sed -i -e '/^timezone=/d' $postgresql_conf + echo "timezone='UTC'" >> $postgresql_conf + + ######## /var/lib/pgsql/data/pg_hba.conf + # Disable access to MyPLC and drupal DBs from all hosts + sed -i -e '/^\(host\|local\)/d' $pghba_conf + + # Enable passwordless localhost access + echo "local all all trust" >>$pghba_conf + + # Enable access from the API, boot, and web servers + PLC_API_IP=$(gethostbyname $PLC_API_HOST) + PLC_BOOT_IP=$(gethostbyname $PLC_BOOT_HOST) + PLC_WWW_IP=$(gethostbyname $PLC_WWW_HOST) + ip_failure=0 + if [ -z "$PLC_API_IP" ] ; then + MESSAGE=$"PLC_API_IP is not set" + dialog "$MESSAGE" + ip_failure=1 + fi + if [ -z "$PLC_BOOT_IP" ] ; then + MESSAGE=$"PLC_BOOT_IP is not set" + dialog "$MESSAGE" + ip_failure=1 + fi + if [ -z "$PLC_WWW_IP" ] ; then + MESSAGE=$"PLC_WWW_IP is not set" + dialog "$MESSAGE" + ip_failure=1 + fi + if [ $ip_failure -eq 1 ] ; then + /bin/false + check + fi + + ( + echo "host $PLC_DB_NAME $PLC_DB_USER 127.0.0.1/32 password" + echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_API_IP/32 password" + echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_BOOT_IP/32 password" + echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_WWW_IP/32 password" + # Drupal also uses PostgreSQL + echo "host drupal $PLC_DB_USER 127.0.0.1/32 password" + echo "host drupal $PLC_DB_USER $PLC_WWW_IP/32 password" + ) >>$pghba_conf + + # Append site-specific access rules + for file in $pghba_conf.d/*.conf ; do + cat "$file" >>$pghba_conf + done + + # Fix ownership (sed -i changes it) + chown postgres:postgres $postgresql_conf $pghba_conf + + ######## Start up the server - ignore retcod and check this our way + (exec 3>&- 4>&- ; service postgresql start) + postgresql_check + check + + ######## Create/update the unprivileged database user and password + if [ -z "$PLC_DB_PASSWORD" ] ; then + PLC_DB_PASSWORD=$(uuidgen) + plc-config --category=plc_db --variable=password --value="$PLC_DB_PASSWORD" --save=$local_config $local_config + #service plc reload + plc_reload force + fi + if ! psql -U $PLC_DB_USER -c "" template1 >/dev/null 2>&1 ; then + psql -U postgres -c "CREATE USER $PLC_DB_USER PASSWORD '$PLC_DB_PASSWORD'" template1 + else + psql -U postgres -c "ALTER USER $PLC_DB_USER WITH PASSWORD '$PLC_DB_PASSWORD'" template1 + fi + check + + ######## Create the databases if necessary + if ! psql -U $PLC_DB_USER -c "" $PLC_DB_NAME >/dev/null 2>&1 ; then + createdb -U postgres --template=template0 --encoding=UNICODE --owner=$PLC_DB_USER $PLC_DB_NAME + psql -U $PLC_DB_USER -f /usr/share/plc_api/$PLC_DB_NAME.sql $PLC_DB_NAME + fi + check + if ! psql -U $PLC_DB_USER -c "" drupal >/dev/null 2>&1 ; then + createdb -U postgres --template=template0 --encoding=UNICODE --owner=$PLC_DB_USER drupal + psql -U $PLC_DB_USER -f /var/www/html/database/database.pgsql drupal + fi + check + + result "$MESSAGE" + ;; + + stop) + MESSAGE=$"Stopping PostgreSQL server" + dialog "$MESSAGE" + + # Drop the current user in case the username changes + psql -U postgres -c "DROP USER $PLC_DB_USER" template1 + + # WARNING: If the DB name changes, the old DB will be left + # intact and a new one will be created. If it changes + # back, the old DB will not be re-created. + + # Shut down the server + service postgresql stop + + # /etc/init.d/postgresql fails if it is not running + [ "$PLC_DB_ENABLED" = 1 ] && check + + result "$MESSAGE" + ;; +esac + +exit $ERRORS diff --git a/plcapi.spec b/plcapi.spec new file mode 100644 index 0000000..ce5701f --- /dev/null +++ b/plcapi.spec @@ -0,0 +1,631 @@ +%define name plcapi +%define version 5.3 +%define taglevel 11 + +%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}} + +Summary: PlanetLab Central API +Name: %{name} +Version: %{version} +Release: %{release} +License: PlanetLab +Group: System Environment/Daemons +Source0: %{name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root + +Vendor: PlanetLab +Packager: PlanetLab Central +Distribution: PlanetLab %{plrelease} +URL: %{SCMURL} + +Provides: PLCAPI +Obsoletes: PLCAPI + +# requirement to mod_python or mod_wsgi: deferred to myplc +Requires: httpd mod_ssl +Requires: Django +Requires: postgresql >= 8.2, postgresql-server >= 8.2 +# We use set everywhere +Requires: python >= 2.7 +Requires: postgresql-python +Requires: python-psycopg2 +Requires: python-pycurl +# used in GPG.py as a replacement to PyXML's Canonicalize +Requires: python-lxml +# Fedora had support for SOAPpy up to fedora20 +# https://lists.fedoraproject.org/pipermail/devel/2014-June/199730.html +# https://lists.fedoraproject.org/pipermail/devel/2014-June/200379.html +%if ("%{distro}" == "Fedora" && %{distrorelease} <= 20) || ("%{distro}" != "Fedora") +Requires: SOAPpy +%endif +#Requires: python-simplejson +# for the RebootNodeWithPCU method +Requires: pcucontrol >= 1.0-6 +# for OMF integration +Requires: pyaspects >= 0.4 +Requires: python-twisted-words +Requires: python-twisted-web +# ldap +Requires: python-ldap +# for memcache +Requires: memcached python-memcached +### avoid having yum complain about updates, as stuff is moving around +# plc.d/api +Conflicts: MyPLC <= 4.3 + +# standard xmlrpc.so that ships with PHP does not marshal NULL +# prior to May 2017 we used to ship our own brew of xmlrpc but +# that does not build anymore on f25 +# So bottom line is: +# * don't use fedora's php-xmlrpc (no support for marshalling NULL) +# * don't use our own that is way too old +# * instead, thanks to Ciro we pull it from +# https://github.com/gggeek/phpxmlrpc.git +# Requires: php-xmlrpc + +# PostgreSQL and SOAPpy are necessary to run the API server, but not +# plcsh. Since the only supported method of running the server is via +# MyPLC anyway, don't be so stringent about binary requirements, in +# case people want to install this package just for plcsh. +# Requires: postgresql-server, SOAPpy +AutoReqProv: no + +%description +The PLCAPI package provides an XML-RPC and SOAP API for accessing the +PlanetLab Central (PLC) database. The API may be accessed directly via +the Python shell program plcsh, through a toy standalone server, or +through Apache mod_python. + +%prep +%setup -q + +%build +# python-pycurl and python-psycopg2 avail. from fedora 5 +# we used to ship our own version of psycopg2 and pycurl, for fedora4 +# starting with 4.3, support for these two modules is taken out +# +# Build __init__.py metafiles and PHP API. +%{__make} %{?_smp_mflags} +%{__make} -C wsdl + +%install +rm -rf $RPM_BUILD_ROOT +%{__make} %{?_smp_mflags} install DESTDIR="$RPM_BUILD_ROOT" datadir="%{_datadir}" bindir="%{_bindir}" + +# Install shell symlink +mkdir -p $RPM_BUILD_ROOT/%{_bindir} +ln -s %{_datadir}/plc_api/plcsh $RPM_BUILD_ROOT/%{_bindir}/plcsh + +### mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/php.d +### cat > $RPM_BUILD_ROOT/%{_sysconfdir}/php.d/xmlrpc.ini < - plcapi-5.3-11 +- mostly issued for the R2lab deployment +- *** major +- * dots allowed in login_base and slice name +- * new lease filter 'day' +- *** minor +- * more explicit message in case of overlapping resas +- * bugfix: escaping unicode in xml +- * GetLeases allowed to anonymous callers +- *** miscell +- * use plain json library + +* Sun Jul 10 2016 Thierry Parmentelat - plcapi-5.3-10 +- GetBootMedium with systemd-debug option : add kernel arg systemd.log_target=console + +* Fri Jun 26 2015 Thierry Parmentelat - plcapi-5.3-9 +- new bootstate 'upgrade' is like reinstall but leaves slices intact + +* Fri Apr 24 2015 Thierry Parmentelat - plcapi-5.3-8 +- GetBootMedium now keeps logs of created bootCD's in /var/tmp/bootmedium + +* Fri Apr 03 2015 Thierry Parmentelat - plcapi-5.3-7 +- reviewed logging strategy, no more direct print but use log instead + +* Wed Feb 18 2015 Thierry Parmentelat - plcapi-5.3-6 +- extensions for the ipv6 feature +- DeleteSliceTag can be run with the 'node' auth +- xmlrpc-epi-php.c has has a tweak for f21/php-5.6 +- also SOAPpy is not present in f21 anymore, so drop that dep. with f>=21 + +* Tue Aug 19 2014 Thierry Parmentelat - plcapi-5.3-5 +- allow GetSlices to filter on tags as well + +* Tue Aug 19 2014 Thierry Parmentelat - plcapi-5.3-4 +- enable filtering on tags (like hrn) with GetPersons and GetSites + +* Mon Jun 02 2014 Thierry Parmentelat - plcapi-5.3-3 +- provide more context in messages from AddPersonToSlice and DeletePersonFromSlice + +* Fri Mar 21 2014 Thierry Parmentelat - plcapi-5.3-2 +- don't use PyXML that is deprecated in f20, use lxml instead +- higher max size for login_base (32 vs 20) and slice name (64 vs 32) + +* Tue Dec 10 2013 Thierry Parmentelat - plcapi-5.3-1 +- create accessor 'hrn' for site as well +- create accessors 'sfa_created' for site/slice/person +- AddSite() and AddSlice() set respectively Site HRN and Slice HRN. +- UpdatePerson() updates Person HRN according to updated email. +- UpdateSite() updates Site HRN according to updated login_base. +- Fix AddPersonToSite(). +- GetPeerData() ignores Sites/Slices/Persons that have tag sfa_created=='True' +- RefreshPeer() manages Site*Person and Person*Role relationships. + +* Thu Oct 10 2013 Thierry Parmentelat - plcapi-5.2-10 +- provide a slicename_to_hrn function + +* Fri Sep 20 2013 Thierry Parmentelat - plcapi-5.2-9 +- add an hrn accessor for slice so the SFA code can keep track of the federation-wide name of the slice + +* Wed Aug 28 2013 Thierry Parmentelat - plcapi-5.2-8 +- fix for wsgi-based deployments, each thread has its own api() + +* Fri Jun 28 2013 Thierry Parmentelat - plcapi-5.2-7 +- also cleanup omf-slicemgr initscript + +* Fri Jun 28 2013 Thierry Parmentelat - plcapi-5.2-6 +- tear down omf-related aspects as this is no longer needed with omfv6 + +* Thu Jun 27 2013 Thierry Parmentelat - plcapi-5.2-5 +- also expose 'expires' in ResolveSlices + +* Wed Jun 26 2013 Thierry Parmentelat - plcapi-5.2-4 +- drop GetSliceSshKeys, new RetrieveSlicePersonKeys and RetrieveSliceSliverKeys + +* Wed May 29 2013 Thierry Parmentelat - plcapi-5.2-3 +- enable netconfig aspects if PLC_NETCONFIG_ENABLED + +* Wed Apr 24 2013 Thierry Parmentelat - plcapi-5.2-2 +- use SFA code for computing hrn's when available + +* Fri Mar 08 2013 Thierry Parmentelat - plcapi-5.2-1 +- new slice tag 'interface' for configuring a virtual interface +- new builtin 030-interface_tags +- new node accessor and tag 'virt' for mixing lxc & vs nodes +- also exposed in GetNodeFlavour based on fcdistro and PLC_FLAVOUR_VIRT_MAP +- moved ModPypthon and plc.wsgi in the apache/ subdir +- renamed PLCAPI.spec into plcapi.spec +- removed old and unused tag 'type' on slices(!) - original intention seemed like virt +- support for php-5.4 + +* Wed Dec 19 2012 Thierry Parmentelat - plcapi-5.1-6 +- implement PLC_VSYS_DEFAULTS in AddSlice + +* Wed Dec 12 2012 Thierry Parmentelat - plcapi-5.1-5 +- add hrn tag to persons, managed by AddPerson and AddPersonToSite +- AddPerson and UpdatePerson are now tag-aware +- as a side-effect AddPerson is more picky and rejects invalid fields +- which results in a requirement to use sfa-2.1-22 with this tag +- marginal improvement on the xml doc on tags + +* Fri Nov 23 2012 Thierry Parmentelat - plcapi-5.1-4 +- tweak omf_slicemgr for smaller logs, split per month for easier cleaning +- reset_xmpp_pubsub_nodes now hos options and usage +- new Accessors for vicci + +* Fri Aug 31 2012 Thierry Parmentelat - plcapi-5.1-3 +- fixed imports for tags management with sites and persons +- add predefined 'cpu_freezable' tag + +* Mon Jul 09 2012 Thierry Parmentelat - plcapi-5.1-2 +- tweaks in interface initialization +- has 'vsys_vnet' as a predefined tagtype +- bugfix: prevent DeleteSliceFromNodes from messing with foreign slices +- bugfix: GetSlivers & nodegroups +- bugfix: in jabber groups management + +* Mon Apr 16 2012 Thierry Parmentelat - plcapi-5.1-1 +- fix gpg-authentication for Persons (thanks Jordan) +- PostgreSQL.quote reviewed for f16/postgresql9 (used deprecated internal helper) +- ip address/network check: v4 or v6 +- customized DB Message survive upgrade +- make sync works in lxc-hosted tests +- no svn keywords anymore + +* Fri Feb 24 2012 Thierry Parmentelat - plcapi-5.0-37 +- fix sorting for methods list in docs +- untested but needed tweak for postgres startup in f16 + +* Mon Nov 28 2011 Thierry Parmentelat - plcapi-5.0-36 +- tweaks in postgresql setup - in line with sfa + +* Mon Sep 26 2011 Thierry Parmentelat - plcapi-5.0-35 +- slight tweaks in Persons.py + +* Wed Aug 31 2011 Thierry Parmentelat - plcapi-5.0-34 +- GetSession has support for more than one day sessions +- reset_xmpp_pubsub_nodes is much more efficient +- reset_xmpp_pubsub_nodes uses the config instead of localhost:5053 +- bugfix - deleting a person in the middle of the signup process + +* Tue Jun 07 2011 Thierry Parmentelat - plcapi-5.0-33 +- ratelimit aspects +- cache getslivers per node if PLC_GET_SIVERS_CACHE is enabled +- requires Django for cache_utils +- attempt to expose 'pldistro' to sfa +- last_time_spent_online, last_time_spent_offline: new fields in Node +- new slice tags 'isolate_loopback' and 'cpu_cores' +- refresh-peer federation logs dump exceptions +- modpython logs have a timestamp +- more verbose/accurate php error reporting +- postgresql listens on PLC_DB_HOST+localhost instead of 0.0.0.0 +- AddNode, UpdateNode: manage tags directly rather than through another method +- BootUpdateNode: only update once +- GetPersons: techs can access the list of persons on their site +- GetSlices and GetSliceTags: techs can see slices on their nodes +- GetSlivers: isrootonsite tag; cacheable + +* Tue Mar 22 2011 Thierry Parmentelat - plcapi-5.0-32 +- rename initscript_body into initscript_code + +* Mon Mar 21 2011 Thierry Parmentelat - plcapi-5.0-31 +- new initscript_body tag + +* Wed Mar 09 2011 Thierry Parmentelat - plcapi-5.0-30 +- working draft for GetSliceSshKeys + +* Thu Feb 17 2011 Thierry Parmentelat - plcapi-5.0-29 +- trash getbootmedium tmp file if already exists but is longer than 5 minutes old +- (this is for people who cancel their download) + +* Fri Feb 04 2011 Thierry Parmentelat - plcapi-5.0-28 +- fix db-config sequence : accessors step now merged in, and occurs at the right time +- db-config also more robust +- no more explicit 'accessors' step in plc.d + +* Thu Feb 03 2011 Thierry Parmentelat - plcapi-5.0-27 +- session auth: do *not* delete session when node runs a method that does not have 'node' role +- session auth: remove support for bootonce in old boot CDs +- give a reason when caller_may_write_slice_tag fails +- remove ugly hack that was setting 'vref' to 'omf' - need to set both tags now + +* Tue Feb 01 2011 Thierry Parmentelat - plcapi-5.0-26 +- SetSliceVref needed the node role +- protect GetSliceFamily +- Fix bugs in tag authorizations + +* Sun Jan 23 2011 Thierry Parmentelat - plcapi-5.0-25 +- altered checking of optional fields in Interfaces +- UpdateTagType more picky on inputs - msg when trying to set roles, which is not supported +- has pyxml and python-simplejson as new deps + +* Wed Dec 08 2010 Thierry Parmentelat - plcapi-5.0-24 +- tweak doc extraction for fedora14 + +* Tue Dec 07 2010 Thierry Parmentelat - plcapi-5.0-23 +- builtin accessors for the myslice page +- Get{Node,Interface}Tags allowed to techs +- tweak in ratelimitaspect.py + +* Mon Dec 06 2010 Thierry Parmentelat - plcapi-5.0-22 +- add admin role to accessor-related tags (arch, {fc,pl}distro) + +* Mon Dec 06 2010 Thierry Parmentelat - plcapi-5.0-21 +- bugfix in {Update,Delete}PersonTag +- updated xml doc for filters, accessors and tagtypes +- more explicit msg in case of missing roles +- improvements in ratelimitaspects.py + +* Fri Dec 03 2010 Thierry Parmentelat - plcapi-5.0-20 +- fix the roles for ssh_key and hmac tags + +* Wed Dec 01 2010 Thierry Parmentelat - plcapi-5.0-19 +- tag permissions system based on roles and not min_role_ids +- accessors simplified accordingly (no more min_role_id) +- new methods AddRoleToTagType and DeleteRoleFromTagType +- accessor-related tagtypes are created sooner, and enforced +- cleaned up redundancy between db-config.d and accessors + +* Thu Sep 16 2010 Thierry Parmentelat - plcapi-5.0-18 +- fix RefreshPeer that was not working in 5.0-17 + +* Thu Sep 16 2010 Thierry Parmentelat - plcapi-5.0-17 +- RefreshPeer is able to cope with 2 peers running different releases of the api +- DeletePerson can be used on duplicates +- first appearance of ModPythonJson.py + +* Wed Sep 01 2010 Thierry Parmentelat - plcapi-5.0-16 +- set accessors return the new value +- tweaks in the pubsub groups management + +* Wed Jul 28 2010 S.Çağlar Onur - plcapi-5.0-15 +- convert hostnames to lower case and use ILIKE instead of LIKE + +* Fri Jul 16 2010 Baris Metin - plcapi-5.0-14 +- use hrn in pubsub groups + +* Tue Jul 13 2010 Baris Metin - plcapi-5.0-13 +- Add timestamps to Nodes, PCUs and Interfaces to make concrete statements about a node's configuration state. +- OMF fixes + +* Mon Jun 28 2010 Baris Metin - PLCAPI-5.0-12 +- automatically set vsys tag for omf controlled slices + +* Sat Jun 26 2010 Thierry Parmentelat - PLCAPI-5.0-11 +- addition of the 'ssh_key' slice tag +- first draft of the LDAP interface + +* Tue Jun 22 2010 Thierry Parmentelat - PLCAPI-5.0-10 +- reservation granularity defined in plc-config-tty (requires myplc 5.0.5) +- and readable through GetLeaseGranularity +- GetSlivers to expose reservation_policy and lease_granularity +- GetBootMedium fixed for reservable nodes +- tweaks in pcucontrol (requires pcucontrol-1.0-6) +- new Apache mod_wsgi python interface + +* Fri May 14 2010 Thierry Parmentelat - PLCAPI-5.0-9 +- the leases system + +* Wed Apr 14 2010 Thierry Parmentelat - PLCAPI-5.0-8 +- previous tag had gone wrong + +* Wed Apr 14 2010 Talip Baris Metin - PLCAPI-5.0-6 +- fix pubsub hostname + +* Fri Apr 02 2010 Thierry Parmentelat - PLCAPI-5.0-5 +- tweaks for the omf support (xmpp groups and RC-controlled slices) +- BootNodeUpdate supports also ssh_rsa_key (and logs only changes) +- GetNodeFlavour exposes fcdistro + +* Sun Mar 14 2010 Thierry Parmentelat - PLCAPI-5.0-4 +- do not use UpdateNode for handling the 'hrn' tag - should fix refresh peer & foreign nodes more generally + +* Fri Mar 12 2010 Thierry Parmentelat - PLCAPI-5.0-3 +- slice tag 'omf_control' supported for getting OMF's resource controller shipped to slivers +- pyaspect hooks allow to maintain the namespace xmpp groups +- new omf_slicemgr is a proxy to xmpp, used by these hooks +- nodes have their hrn exposed in the 'hrn' tag +- node hrn exposed in GetSlivers, as well as the overall xmpp config +- system slice 'drl' gets created by db-config +- daniel's changes to Filter for supporting wildcards in lists +- AddSliceTag consistency check tweaked + +* Thu Feb 11 2010 Thierry Parmentelat - PLCAPI-5.0-2 +- major cleanup +- get rid of all 4.2-related legacy code +- reset the migrations code, planetlab5.sql somes with (5,100) +- uses hashlib module when available + +* Fri Jan 29 2010 Thierry Parmentelat - PLCAPI-5.0-1 +- first working version of 5.0: +- pld.c/, db-config.d/ and nodeconfig/ scripts should now sit in the module they belong to +- nodefamily is 3-fold with pldistro-fcdistro-arch +- site and person tags +- new methods GetSliceFamily and GetNodeFlavour +- deprecated the dummynet stuff that were for the external dummyboxes +- tags definition : more consistency between db-config scripts and accessors +- (get accessor to create the tag type too if absent) +- logging an event for AddSliceToNodes + +* Sat Jan 09 2010 Thierry Parmentelat - PLCAPI-4.3-32 +- support for fedora 12 +- fix subtle bug in filtering with ] and quotes + +* Fri Dec 18 2009 Baris Metin - PLCAPI-4.3-31 +- * patch for php-5.3 (the one in f12) +- * validate email addresses with regex +- * add PersonTags and SiteTags +- * add additional accessors for node tags (kvariant, serial, ..) + +* Tue Nov 03 2009 Marc Fiuczynski - PLCAPI-4.3-30 +- Redacting password, session, and authstring values from the event log. + +* Mon Oct 19 2009 Thierry Parmentelat - PLCAPI-4.3-29 +- let AddSite set ext_consortium_id - required for the poorman registration pages +- drop version constraint on Requires: postgresql-python +- don't log system calls nor ReportRunLevel + +* Thu Oct 15 2009 Daniel Hokka Zakrisson - PLCAPI-4.3-28 +- Fix requires for CentOS. + +* Fri Oct 09 2009 Baris Metin - PLCAPI-4.3-27 +- Require postgresql 8.2 (for array operators && and @>) + +* Thu Oct 08 2009 Thierry Parmentelat - PLCAPI-4.3-26 +- Filter now supports the | and & features to match in sequence values +- bugfix in the postgresql wrapper for sequence filter values +- reviewed GetSlivers to export admin keys more efficiently +- fix checking roles in UpdateSliceTag + +* Sat Sep 26 2009 Marc Fiuczynski - PLCAPI-4.3-25 +- - Some typos in the documentation were fixed. +- - UpdateSliceTag check if a node's min_role_id is >= (rather than >) +- to the tag's min_role_id. + +* Fri Sep 18 2009 anil vengalil - PLCAPI-4.3-24 + +* Mon Sep 07 2009 Thierry Parmentelat - PLCAPI-4.3-23 +- Ongoing work to add upcalls, using new SFA class +- new methods BindObjectToPeer, UnBindObjectFromPeer, still for SFA +- reviewed type-checking for the 3 taggable classes node-interface-slice +- cleanup ald dummynet stuff +- expose the 'extensions' accessors to the API +- tweaked checks in AddSliceTag +- GetPersons exposes roles by default +- bugfix in ReportRunLevel for non-string levels +- tweaks in GetSlivers ( seems that it now exposes the keys for the root context ) + +* Fri Jul 10 2009 Thierry Parmentelat - PLCAPI-4.3-22 +- new BindObjectToPeer method for sfa +- AddSliceTag and UpdateSliceTag open to the 'node' auth method with restrictions + +* Wed Jul 01 2009 Thierry Parmentelat - PLCAPI-4.3-21 +- getbootmedium supports options as tags (serial, cramfs, kvariant, kargs, no-hangcheck ) +- reportrunlevel logs its calls only when run_level changes +- pycurl more robust wrt to xmlrpclib.Transport + +* Tue Jun 16 2009 Thierry Parmentelat - PLCAPI-4.3-20 +- produce a wsdl interface +- bugfix in getbootmedium for nodes with interface tags + +* Sun Jun 07 2009 Thierry Parmentelat - PLCAPI-4.3-19 +- bugfix for some rare pattern-based filters + +* Wed Jun 03 2009 Thierry Parmentelat - PLCAPI-4.3-18 +- improvements in the 4.2 legacy layer + +* Sat May 30 2009 Thierry Parmentelat - PLCAPI-4.3-17 +- bugfix required for slice tags set on nodegroups + +* Thu May 28 2009 Thierry Parmentelat - PLCAPI-4.3-16 +- more complete compatibility layer - second iteration, with legacy code isolated in Legacy/ + +* Tue May 26 2009 Thierry Parmentelat - PLCAPI-4.3-15 +- more powerful legacy layer with 4.2 + +* Fri May 15 2009 Thierry Parmentelat - PLCAPI-4.3-14 +- RefreshPeer sets lock per-peer to avoid multiple concurent instances +- migration script has an option for running interactively + +* Wed May 06 2009 Thierry Parmentelat - PLCAPI-4.3-13 +- skips already added entries + +* Tue Apr 28 2009 Thierry Parmentelat - PLCAPI-4.3-12 +- yet another set of fixes for external dummynet boxes + +* Wed Apr 22 2009 Thierry Parmentelat - PLCAPI-4.3-11 +- GetDummyBoxMedium returns a base64-encoded boot image, doc is updated +- and tmp file is cleaned up + +* Wed Apr 22 2009 Thierry Parmentelat - PLCAPI-4.3-10 +- restore missing ResolveSlices + +* Mon Apr 20 2009 Thierry Parmentelat - PLCAPI-4.3-9 +- new method GetDummyBoxMedium + +* Fri Apr 17 2009 Thierry Parmentelat - PLCAPI-4.3-8 +- remove duplicate in Methods/__init__ that was breaking build of myplc-docs + +* Fri Apr 17 2009 Thierry Parmentelat - PLCAPI-4.3-7 +- support for external dummynet boxes back in 4.3 - first draft + +* Thu Apr 09 2009 Thierry Parmentelat - PLCAPI-4.3-6 +- fixes for smooth federation between 4.2 and 4.3 +- peername is not UNIQUE in schema anymore, was preventing delete/recreate + +* Tue Apr 07 2009 Thierry Parmentelat - PLCAPI-4.3-5 +- support for BootCD variants (GetBootMedium ['variant:centos5']) +- fix corner case with filters like {'~slice_id':[]} +- fix transaction leak that caused the db connections pool to exhaust +- properly expose all methods, including Legacy/, and not only Methods/ + +* Tue Mar 24 2009 Thierry Parmentelat - PLCAPI-4.3-4 +- renumbered as 4.3 +- nodes have new fields run_level (in addition to boot_state) and verified +- tweaked migration from 4.2 +- tuned rpm dependencies +- doc generation more explicit about errors like missing python modules +- removed obsolete method GetSlicesMD5 + +* Wed Jan 28 2009 Thierry Parmentelat - PLCAPI-4.3-3 +- unified all tags +- renamed interface settings into interface tags and slice attributes into slice tags +- nodes have a node_type +- various changes on the way to 4.3 + +* Thu Nov 27 2008 Thierry Parmentelat - PLCAPI-4.3-2 +- Checkpointing : this version still has interface settings and slice attributes + +* Wed Sep 10 2008 Thierry Parmentelat - PLCAPI-4.3-1 +- first iteration with taggable nodes/interfaces/slices +- embryo for ilinks +- cleaned up boot states +- migration script moslty complete + +* Wed May 14 2008 Thierry Parmentelat - PLCAPI-4.2-8 +- fixed doc build by locating locally installed DTDs at build-time + +* Fri May 09 2008 Thierry Parmentelat - PLCAPI-4.2-7 +- no more doc packaged outside of myplc-docs - doc/ cleaned up +- enhancements in doc on filters +- bootcd-aware GetBootMedium merged from onelab + +* Thu May 08 2008 Thierry Parmentelat - PLCAPI-4.2-6 +- checkpoint while the new myplc-docs package is underway +- bugfix: GetSlivers & conf files +- doc: removed target files + +* Wed Apr 23 2008 Stephen Soltesz - PLCAPI-4.2-5 +- Removed conditions on the persons, site, and nodes indexes. previsouly only +- the non-deleted fields were index, resulting in massivly slow queries. +- + +* Wed Mar 26 2008 Thierry Parmentelat - PLCAPI-4.2-3 PLCAPI-4.2-4 +- plcsh: better handling of options when running as a shell script +- getbootmedium exports compute_key +- tweaks for accepted args in GetPCUTypes and BootNotifyOwners + +* Thu Feb 14 2008 Thierry Parmentelat - PLCAPI-4.2-2 PLCAPI-4.2-3 +- GetBootMedium support for build.sh full options, incl. serial & console_spec +- GetBootMedium simpler, cleaner and safer use of tmpdirs in (dated from bootcustom.sh) + +* Fri Feb 01 2008 Thierry Parmentelat - PLCAPI-4.2-1 PLCAPI-4.2-2 +- refresh peer script to use a month-dependent logfile +- tracking the starting point for UniPi integration of the dummynet boxes + +* Thu Jan 31 2008 Thierry Parmentelat - PLCAPI-4.2-0 PLCAPI-4.2-1 +- plcsh adds its own path to sys.path +- fix so GetNodes can be called from a Node + +* Fri Oct 27 2006 Mark Huang - +- Initial build. + +%define module_current_branch 4.3 diff --git a/plcsh b/plcsh new file mode 100755 index 0000000..7bc3abe --- /dev/null +++ b/plcsh @@ -0,0 +1,173 @@ +#!/usr/bin/python +# +# Interactive shell for testing PLCAPI +# +# Mark Huang +# Copyright (C) 2005 The Trustees of Princeton University +# + +import os +import sys +from socket import gethostname +from optparse import OptionParser +from getpass import getpass +from traceback import print_exc + +sys.path.append(os.path.dirname(os.path.realpath(sys.argv[0]))) +from PLC.Shell import Shell + +usage="""Usage: %prog [options] + runs an interactive shell +Usage: %prog [options] script script-arguments +Usage: %prog script [plcsh-options --] script arguments + run a script""" + +parser = OptionParser(usage=usage,add_help_option = False) +parser.add_option("-f", "--config", help = "PLC configuration file") +parser.add_option("-h", "--url", help = "API URL") +parser.add_option("-c", "--cacert", help = "API SSL certificate") +parser.add_option("-k", "--insecure", help = "Do not check SSL certificate") +parser.add_option("-m", "--method", help = "API authentication method") +parser.add_option("-s", "--session", help = "API session key") +parser.add_option("-u", "--user", help = "API user name") +parser.add_option("-p", "--password", help = "API password") +parser.add_option("-r", "--role", help = "API role") +parser.add_option("-x", "--xmlrpc", action = "store_true", default = False, help = "Use XML-RPC interface") +# pass this to the invoked shell if any +parser.add_option("--help", action = "store_true", dest="help", default=False, + help = "show this help message and exit") +(options, args) = parser.parse_args() + +if not args and options.help: + parser.print_help() + sys.exit(1) + +# If user is specified but password is not +if options.user is not None and options.password is None: + try: + options.password = getpass() + except (EOFError, KeyboardInterrupt): + print + sys.exit(0) + +# Initialize a single global instance (scripts may re-initialize +# this instance and/or create additional instances). +try: + shell = Shell(globals = globals(), + config = options.config, + url = options.url, xmlrpc = options.xmlrpc, cacert = options.cacert, + method = options.method, role = options.role, + user = options.user, password = options.password, + session = options.session) + # Register a few more globals for backward compatibility + auth = shell.auth + api = shell.api + config = shell.config +except Exception, err: + print "Error:", err + print + parser.print_help() + sys.exit(1) + +# If called by a script +if args: + if not os.path.exists(args[0]): + print 'File %s not found'%args[0] + parser.print_help() + sys.exit(1) + else: + # re-append --help if provided + if options.help: + args.append('--help') + # use args as sys.argv for the next shell, so our own options get removed for the next script + sys.argv = args + script = sys.argv[0] + # Add of script to sys.path + path = os.path.dirname(os.path.abspath(script)) + sys.path.append(path) + execfile(script) + +# Otherwise, run an interactive shell environment +else: + if shell.server is None: + print "PlanetLab Central Direct API Access" + prompt = "" + elif shell.auth['AuthMethod'] == "anonymous": + prompt = "[anonymous]" + print "Connected anonymously" + elif shell.auth['AuthMethod'] == "session": + # XXX No way to tell node and user sessions apart from the + # client point of view. + prompt = "[%s]" % gethostname() + print "%s connected using session authentication" % gethostname() + else: + prompt = "[%s]" % shell.auth['Username'] + print "%s connected using %s authentication" % \ + (shell.auth['Username'], shell.auth['AuthMethod']) + + # Readline and tab completion support + import atexit + import readline + import rlcompleter + + print 'Type "system.listMethods()" or "help(method)" for more information.' + # Load command history + history_path = os.path.join(os.environ["HOME"], ".plcapi_history") + try: + file(history_path, 'a').close() + readline.read_history_file(history_path) + atexit.register(readline.write_history_file, history_path) + except IOError: + pass + + # Enable tab completion + readline.parse_and_bind("tab: complete") + + try: + while True: + command = "" + while True: + # Get line + try: + if command == "": + sep = ">>> " + else: + sep = "... " + line = raw_input(prompt + sep) + # Ctrl-C + except KeyboardInterrupt: + command = "" + print + break + + # Build up multi-line command + command += line + + # Blank line or first line does not end in : + if line == "" or (command == line and line[-1] != ':'): + break + + command += os.linesep + + # Blank line + if command == "": + continue + # Quit + elif command in ["q", "quit", "exit"]: + break + + try: + try: + # Try evaluating as an expression and printing the result + result = eval(command) + if result is not None: + print result + except SyntaxError: + # Fall back to executing as a statement + exec command + except Exception, err: + print_exc() + + except EOFError: + print + pass diff --git a/setup.py b/setup.py new file mode 100755 index 0000000..4ead837 --- /dev/null +++ b/setup.py @@ -0,0 +1,24 @@ +#!/usr/bin/python +# +# Setup script for PLCAPI +# +# Mark Huang +# Copyright (C) 2006 The Trustees of Princeton University +# + +from distutils.core import setup +from glob import glob + +setup(packages = ['PLC', 'PLC/Methods', 'PLC/Methods/system', 'PLC/Accessors', 'aspects'], + scripts = ['plcsh', 'Server.py'], + data_files = [ + ('', ['planetlab5.sql']), + # package for mod_python and mod_wsgi, defer choice to myplc + ('apache', ['apache/ModPython.py', 'apache/__init__.py', 'apache/plc.wsgi']), + ('php', ['php/plc_api.php']), + ('migrations', + ['migrations/README.txt', + 'migrations/extract-views.py'] + + glob('migrations/[0-9][0-9][0-9]*')), + ('extensions', ['extensions/README.txt']), + ]) diff --git a/tools/dzombie.py b/tools/dzombie.py new file mode 100755 index 0000000..2095c96 --- /dev/null +++ b/tools/dzombie.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# Tool that removes zombie records from database tables# +import sys +import os +import getopt +import pgdb +from pprint import pprint + +schema_file = None +config_file = "/etc/planetlab/plc_config" +config = {} +execfile(config_file, config) + +def usage(): + print "Usage: %s SCHEMA_FILE " % sys.argv[0] + sys.exit(1) + +try: + schema_file = sys.argv[1] +except IndexError: + print "Error: too few arguments" + usage() + +# all foreing keys exist as primary kyes in another table +# will represent all foreign keys as +# { 'table.foreign_key': 'table.primary_key'} +foreign_keys = {} +foreign_keys_ordered = [] +zombie_keys = {} +# parse the schema for foreign keys +try: + file = open(schema_file, 'r') + index = 0 + lines = file.readlines() + while index < len(lines): + line = lines[index].strip() + # find all created objects + if line.startswith("CREATE"): + line_parts = line.split(" ") + if line_parts[1:3] == ['OR', 'REPLACE']: + line_parts = line_parts[2:] + item_type = line_parts[1].strip() + item_name = line_parts[2].strip() + if item_type.upper() in ['TABLE']: + while index < len(lines): + index = index + 1 + nextline =lines[index].strip() + if nextline.find("--") > -1: + nextline = nextline[0:nextline.index("--")].replace(',', '') + if nextline.upper().find("REFERENCES") > -1: + nextline_parts = nextline.split(" ") + foreign_key_name = nextline_parts[0].strip() + foreign_key_table = nextline_parts[nextline_parts.index("REFERENCES")+1].strip() + foreign_key = item_name + "."+ foreign_key_name + primary_key = foreign_key_table +"."+ foreign_key_name + foreign_keys[foreign_key] = primary_key + foreign_keys_ordered.append(foreign_key) + elif nextline.find(";") >= 0: + break + index = index + 1 +except: + raise + +db = pgdb.connect(user = config['PLC_DB_USER'], + database = config['PLC_DB_NAME']) +cursor = db.cursor() +try: + for foreign_key in foreign_keys_ordered: + primary_key = foreign_keys[foreign_key] + sql = "SELECT distinct %s from %s" + + # get all foreign keys in this table + foreign_key_parts = foreign_key.split(".") + + # do not delete from primary tables + if foreign_key_parts[0] in ['addresses', 'boot_states', 'conf_files', \ + 'keys', 'messages', 'nodegroups', 'interfaces', 'nodes', 'pcus', 'peers' \ + 'persons', 'roles', 'sessions', 'sites', 'slices']: + #print "skipping table %s" % foreign_key_parts[0] + continue + + cursor.execute(sql % (foreign_key_parts[1], foreign_key_parts[0])) + foreign_rows = cursor.fetchall() + + # get all the primary keys from this foreign key's primary table + primary_key_parts = primary_key.split(".") + # foreign key name may not match primary key name. must rename these + if primary_key_parts[1] == 'creator_person_id': + primary_key_parts[1] = 'person_id' + elif primary_key_parts[1] == 'min_role_id': + primary_key_parts[1] = 'role_id' + sql = sql % (primary_key_parts[1], primary_key_parts[0]) + + # determin which primary records are deleted + desc = os.popen('psql planetlab4 postgres -c "\d %s;"' % primary_key_parts[0]) + result = desc.readlines() + if primary_key_parts[0] in ['slices']: + sql = sql + " where name not like '%_deleted'" + elif filter(lambda line: line.find("deleted") > -1, result): + sql = sql + " where deleted = false" + + cursor.execute(sql) + primary_key_rows = cursor.fetchall() + + # if foreign key isnt present in primay_key query, it either doesnt exist or marked as deleted + # also, ignore null foreign keys, not considered zombied + zombie_keys_func = lambda key: key not in primary_key_rows and not key == [None] + zombie_keys_list = [zombie_key[0] for zombie_key in filter(zombie_keys_func, foreign_rows)] + print zombie_keys_list + # delete these zombie records + if zombie_keys_list: + print " -> Deleting %d zombie record(s) from %s after checking %s" % \ + (len(zombie_keys_list), foreign_key_parts[0], primary_key_parts[0]) + sql_delete = 'DELETE FROM %s WHERE %s IN %s' % \ + (foreign_key_parts[0], foreign_key_parts[1], tuple(zombie_keys_list)) + cursor.execute(sql_delete) + db.commit() + #zombie_keys[foreign_key] = zombie_keys_list + print "done" +except pgdb.DatabaseError: + raise diff --git a/tools/planetlab3_dump.sh b/tools/planetlab3_dump.sh new file mode 100755 index 0000000..e6a86e3 --- /dev/null +++ b/tools/planetlab3_dump.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# +# Dumps the planetlab3 database on zulu, fixing a few things on the way +# +# Mark Huang +# Copyright (C) 2007 The Trustees of Princeton University +# +# $Id$ +# + +tables=( +node_bootstates +nodes +nodenetworks +node_nodenetworks +nodegroups +nodegroup_nodes +override_bootscripts +pod_hash +conf_file +conf_assoc +address_types +addresses +organizations +sites +roles +capabilities +persons +person_roles +person_capabilities +person_address +key_types +keys +person_keys +person_site +node_root_access +authorized_subnets +site_authorized_subnets +event_classes +dslice03_states +dslice03_attributetypes +dslice03_slices +dslice03_attributes +dslice03_sliceattribute +dslice03_slicenode +dslice03_sliceuser +dslice03_siteinfo +pcu +pcu_ports +join_request +whatsnew +node_hostnames +blacklist +dslice03_initscripts +dslice03_defaultattribute +peered_mas +sessions +) + +# Dump tables +for table in "${tables[@]}" ; do + pg_dump -U postgres -t $table planetlab3 +done | + +# Do some manual cleanup +sed -f <(cat < +# Copyright (C) 2006 The Trustees of Princeton University +# + +import re +import base64 + +# Convert nm_net_{exempt_,}{min,max}_rate (bps) to +# net_{i2_,}{min,max}_rate and net_{i2_,}{min,max}_rate (kbps) +rename = {'nm_net_min_rate': 'net_min_rate', + 'nm_net_max_rate': 'net_max_rate', + 'nm_net_exempt_min_rate': 'net_i2_min_rate', + 'nm_net_exempt_max_rate': 'net_i2_max_rate'} +for slice_attribute in GetSliceTags({'name': rename.keys()}): + id = slice_attribute['slice_attribute_id'] + name = slice_attribute['name'] + slice_id = slice_attribute['slice_id'] + + # Convert bps to kbps + bps = int(slice_attribute['value']) + kbps = bps / 1000 + + # Add the new attribute + if GetSlices([slice_id]): + AddSliceTag(slice_id, rename[name], str(kbps)) + + # Delete the old attribute + DeleteSliceTag(id) + +# Convert nm_net_{exempt_,}avg_rate to +# net_{i2_,}max_kbyte and net_{i2_,}thresh_kbyte +rename = {'nm_net_avg_rate': {'max': 'net_max_kbyte', + 'thresh': 'net_thresh_kbyte'}, + 'nm_net_exempt_avg_rate': {'max': 'net_i2_max_kbyte', + 'thresh': 'net_i2_thresh_kbyte'}} +for slice_attribute in GetSliceTags({'name': rename.keys()}): + id = slice_attribute['slice_attribute_id'] + name = slice_attribute['name'] + slice_id = slice_attribute['slice_id'] + + # Convert bps to 80% and 100% of max bytes per day + bps = int(slice_attribute['value']) + max_kbyte = bps * 24 * 60 * 60 / 8 / 1000 + thresh_kbyte = int(0.8 * max_kbyte) + + # Add the new attribute + if GetSlices([slice_id]): + AddSliceTag(slice_id, rename[name]['max'], str(max_kbyte)) + AddSliceTag(slice_id, rename[name]['thresh'], str(thresh_kbyte)) + + # Delete the old attribute + DeleteSliceTag(id) + +# Convert plc_slice_state +for slice_attribute in GetSliceTags({'name': 'plc_slice_state'}): + id = slice_attribute['slice_attribute_id'] + name = slice_attribute['name'] + slice_id = slice_attribute['slice_id'] + + # Add the new attribute + if GetSlices([slice_id]): + if slice_attribute['value'] == "suspended": + AddSliceTag(slice_id, 'enabled', "0") + else: + AddSliceTag(slice_id, 'enabled', "1") + + # Delete the old attribute + DeleteSliceTag(id) + +# Straight renames +rename = {'nm_cpu_share': 'cpu_share', + 'nm_disk_quota': 'disk_max', + 'nm_net_share': 'net_share', + 'nm_net_exempt_share': 'net_i2_share', + 'nm_net_max_byte': 'net_max_kbyte', + 'nm_net_max_thresh_byte': 'net_thresh_kbyte', + 'nm_net_max_exempt_byte': 'net_i2_max_kbyte', + 'nm_net_max_thresh_exempt_byte': 'net_i2_thresh_kbyte'} +for slice_attribute in GetSliceTags({'name': rename.keys()}): + id = slice_attribute['slice_attribute_id'] + name = slice_attribute['name'] + slice_id = slice_attribute['slice_id'] + + # Pass straight through + value = slice_attribute['value'] + + # Add the new attribute + if GetSlices([slice_id]): + AddSliceTag(slice_id, rename[name], value) + + # Delete the old attribute + DeleteSliceTag(id) + +# Update plc_ticket_pubkey attribute +for slice_attribute in GetSliceTags({'name': "plc_ticket_pubkey"}): + id = slice_attribute['slice_attribute_id'] + + UpdateSliceTag(id, """ +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDKXa72MEKDAnVyzEpKOB1ot2eW +xG/TG2aa7q/2oy1xf5XMmU9H9uKwO+GoUeinp1BSxgkVRF0VhEGGaqKR9kYQzX0k +ht4+P2hAr+UyU4cp0NxV4xfmyAbrNKuHVjawMUCu5BH0IkBUC/89ckxk71oROnak +FbI7ojUezSGr4aVabQIDAQAB +""".lstrip()) + +# Delete _deleted and deprecated slice attributes and types +for attribute_type in GetSliceTagTypes(): + id = attribute_type['attribute_type_id'] + name = attribute_type['name'] + + if name == 'general_prop_share' or \ + re.match('nm_', name) or \ + re.search('_deleted$', name): + DeleteSliceTagType(id) + # N.B. Automatically deletes all slice attributes of this type + +# Add Proper ops +proper_ops = [ + # give Stork permission to mount and unmount client dirs + ('arizona_stork', 'mount_dir'), + ('arizona_stork', 'set_file_flags pass, "1"'), + ('arizona_stork', 'set_file_flags_list "1"'), + ('arizona_stork', 'bind_socket sockname=64?:*'), + ('arizona_stork2', 'mount_dir'), + ('arizona_stork2', 'set_file_flags pass, "1"'), + ('arizona_stork2', 'set_file_flags_list "1"'), + ('arizona_stork2', 'bind_socket sockname=64?:*'), + + # give CoMon the necessary permissions to run slicestat + ('princeton_slicestat', 'exec "root", pass, "/usr/local/planetlab/bin/pl-ps", none'), + ('princeton_slicestat', 'exec "root", pass, "/usr/sbin/vtop", "bn1", none'), + ('princeton_slicestat', 'open_file file=/proc/virtual/*/cacct'), + ('princeton_slicestat', 'open_file file=/proc/virtual/*/limit'), + ('princeton_comon', 'open_file file=/var/log/secure'), + ('princeton_comon', 'exec "root", pass, "/bin/df", "/vservers", none'), + + # give pl_slicedir access to /etc/passwd + ('pl_slicedir', 'open_file pass, "/etc/passwd"'), + + # nyu_d are building a DNS demux so give them access to port 53 + ('nyu_d', 'bind_socket'), + ('nyu_oasis', 'bind_socket'), + + # QA slices need to be able to create and delete bind-mounts + ('pl_qa_0', 'mount_dir'), + ('pl_qa_1', 'mount_dir'), + + # irb_snort needs packet sockets for tcpdump + ('irb_snort', 'create_socket'), + + # uw_ankur is using netlink sockets to do the same thing as netflow + ('uw_ankur', 'create_socket'), + + # cornell_codons gets access to port 53 for now + ('cornell_codons', 'create_socket'), + + # give Mic Bowman's conf-monitor service read-only access to root fs + # and the ability to run df + ('idsl_monitor', 'mount_dir "root:/", pass, "ro"'), + ('idsl_monitor', 'unmount'), + ('idsl_monitor', 'exec "root", pass, "/bin/df", "-P", "/", "/vservers", none'), + + # give Shark access to port 111 to run portmap + # and port 955 to run mount + ('nyu_shkr', 'bind_socket'), + ('nyu_shkr', 'mount_dir "nfs:**:**"'), + ('nyu_shkr', 'exec "root", pass, "/bin/umount", "-l", "/vservers/nyu_shkr/**", none'), + + # give tsinghua_lgh access to restricted ports + ('tsinghua_lgh', 'bind_socket'), + + # CoDeeN needs port 53 too + ('princeton_codeen', 'bind_socket sockname=53:*'), + + # give ucin_load access to /var/log/wtmp + ('ucin_load', 'open_file file=/var/log/wtmp*'), + + # give google_highground permission to bind port 81 (and raw sockets) + ('google_highground', 'bind_socket'), + + # pl_conf needs access to port 814 + ('pl_conf', 'bind_socket sockname=814:*'), + ('pl_conf', 'open file=/home/*/.ssh/authorized_keys'), + + # give princeton_visp permission to read all packets sent through the + # tap0 device + ('princeton_visp', 'open file=/dev/net/tun, flags=rw'), + + # The PLB group needs the BGP port + ('princeton_iias', 'bind_socket sockname=179:*'), + ('princeton_visp', 'bind_socket sockname=179:*'), + ('mit_rcp', 'bind_socket sockname=179:*'), + ('princeton_bgpmux', 'bind_socket sockname=179:*'), + ('princeton_bgpmux2', 'bind_socket sockname=179:*'), + + # PL-VINI group + ('mit_rcp', 'exec "root", pass, "/usr/bin/chrt"'), + ('princeton_iias', 'exec "root", pass, "/usr/bin/chrt"'), + + # Tycoon needs access to /etc/passwd to determine Slicename->XID mappings + ('hplabs_tycoon_aucd', 'open_file file=/etc/passwd'), +] + +for slice, op in proper_ops: + try: + AddSliceTag(slice, 'proper_op', op) + except Exception, err: + print "Warning: %s:" % slice, err + +initscripts = dict([(initscript['initscript_id'], initscript) for initscript in [{'initscript_id': 8, 'script': '#! /bin/sh

# <Program Name>
#    bindscript
#
# <Author>
#    Jeffry Johnston and Jeremy Plichta
#
# <Purpose>
#    Downloads and installs stork on a node.

# save original PWD
OLDPWD=$PWD

# error reporting function
error()
{
   echo
   echo "Please E-mail stork-support@cs.arizona.edu if you believe you have" 
   echo "received this message in error."

   # get rid of CERT file
   if [ -f $CERT ]
   then
      rm -f $CERT > /dev/null
   fi

   # restore original PWD
   cd $OLDPWD
   exit 1
}

CERT=`pwd`/tempcrtfile

#functions

###
### createCertificate()
###    prints out the equifax certificate to use and stores
###    the file name in $CERT
###
function createCertificate(){
cat > $CERT <<EQUIFAX
-----BEGIN CERTIFICATE-----
MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJV
UzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1
aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0
MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoT
E0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJl
IEdsb2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
gYkCgYEAuucXkAJlsTRVPEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQy
td4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORR
OhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxnhcXIw2EC
AwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8w
HwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6o
oHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf
2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkAZ70Br83gcfxa
z2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIYNMR1
pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV
-----END CERTIFICATE----- 
EQUIFAX
}

###
### overWriteConf()
###	overwrite the default stork.conf file
###     that was installed by the rpm package.
###     this is a temporary hack because I need
###     to change the nestport and I dont know
###     enough to repackage the rpm with the
###     correct settings
function overWriteConf(){
cat > /usr/local/stork/etc/stork.conf <<ENDOFFILE
pacman=/usr/local/stork/bin/pacman
dtd-packages=/usr/local/stork/bin/packages.dtd
dtd-groups=/usr/local/stork/bin/groups.dtd
storknestupdatelistenerport=649

#bittorrenttrackerhost=quadrus.cs.arizona.edu
bittorrenttrackerhost=nr06.cs.arizona.edu

bittorrenttrackerport=6880
bittorrentuploadrate=0
bittorrentseedlookuptimeout=30

#packagerepository = quadrus.cs.arizona.edu/PlanetLab/V3|dist, stable
packagerepository = nr06.cs.arizona.edu/PlanetLab/V3|dist, stable
#packageinforepository = quadrus.cs.arizona.edu/PlanetLab/V3/stork.info
packageinforepository = nr06.cs.arizona.edu/PlanetLab/V3/stork.info

username = PlanetLab
publickeyfile = /usr/local/stork/var/keys/PlanetLab.publickey
packagemanagers = nestrpm, rpm, targz
transfermethod= nest,bittorrent,coblitz,coral,http,ftp
nestport=6000
tarpackinfopath=/usr/local/stork/var/tarinfo
ENDOFFILE
} 


###
### downloadNR06()
###    download a file from nr06 using curl
###
### args: 
###       - the path of the file you wish to download
###         relative from https://nr06.cs.arizona.edu
###       - the file to save it to
###       - returned value as specified in verifyDownload
function downloadNR06(){
    curl --cacert $CERT https://nr06.cs.arizona.edu/$1 -o $2 2>/dev/null
    verifyDownload $2 $3
}

###
### verifyDownload()
###     verify that a file that was just download with downloadNR06
###     was download correctly. Since we are getting stuff from a
###     http server we are assuming that if we get a 404 response
###     that the page we want does not exist. Also, if the output file
###     does not exist that means that only headers were returned
###     without any content. this too is a invalid file download
###
### args:
###       - the file to verify
###       - return variable, will have 1 if fail 0 if good
###
function verifyDownload(){
    eval "$2=0"
    if [ ! -f $1 ];
    then
        eval "$2=1"
    elif grep '404 Not Found' $1 > /dev/null
    then
	rm -f $1
        eval "$2=1"
    else
        eval "$2=0"
    fi
}


# check for root user
if [ $UID -ne "0" ]
then
   echo "You must run this program with root permissions..."
   error
fi   
 
# clean up in case this script was run before and failed
rm -rf /tmp/stork &> /dev/null

# create /tmp/stork directory
mkdir /tmp/stork 
if [ $? -ne "0" ]
then
   echo
   echo "Could not create the /tmp/stork directory..."
   error
fi

# export our root directory to Stork
echo "arizona_stork2" > /.exportdir
if [ $? -ne "0" ]
then
   echo
   echo "Could not create the /.exportdir file..."
   error
fi
 
# tell stork that we want to be served
if [ -f /etc/slicename ]
then
   SLICENAME=`cat /etc/slicename`
else 
   SLICENAME=$USER
fi
wget -O /tmp/stork/$SLICENAME "http://localhost:648/$SLICENAME\$bindscript"

# verify that the download was successful
if [ ! -f /tmp/stork/$SLICENAME -o $? -ne 0 ]
then
   echo
   echo "Stork doesn't seem to be running on this node..."
   error
fi

# wait for stork slice 
echo "Waiting for Stork to accept our binding..."
while [ ! -f /tmp/stork/stork_says_go ]
do
   sleep 1
done

# change PWD to the /tmp/stork directory 
cd /tmp/stork
if [ $? -ne "0" ]
then
   echo
   echo "Could not access the /tmp/stork directory..."
   error
fi

# confirm that packages to be installed actually exist
if echo *.rpm | grep '*' > /dev/null
then
   echo
   echo "Error: Stork package download failed..."
   error
fi

# remove Stork packages and files
echo
echo "Removing Stork files..."

# build a list of packages to remove
packages=""
for filename in *.rpm
do
  # convert filename to a package name
  pack=`rpm -qp --qf "%{NAME}\n" $filename`
  if [ $? -eq "0" ]
  then
    packages="$packages $pack"
  fi
done   

# remove old Stork packages
rpm -e $packages &> /dev/null

# remove anything left in /usr/local/stork/bin
rm -rf /usr/local/stork/bin/* &> /dev/null 

# install Stork packages
echo
echo "Installing packages..." 

# build a list of packages to install
packages=""
for filename in *.rpm
do
  packages="$packages $filename"
done   

# install the new stork packages
rpm -i $packages

# report package installation errors
if [ $? -ne "0" ]
then
  echo "Warning: Possible error installing Stork packages..."
fi

# restore original PWD
cd $OLDPWD

# clean up temporary files
rm -rf /tmp/stork &> /dev/null

# SEE TO-DO 1
#create the equifax certificate to use for curl
#createCertificate

# TO-DO 1
# implement the below in the beggining of stork.py
#attempt to download the users public key from the repository
#downloadNR06 "user-upload/pubkeys/$SLICENAME.publickey" "/usr/local/stork/var/$SLICENAME.publickey" RET

#if [ $RET -ne 0 ];
#then
#   echo
#   echo "Could not fetch your public key from the repository."
#   echo "If you want to upload one for the next time you run"
#   echo "the initscript please visit"
#   echo "http://nr06.cs.arizona.edu/testphp/upload.php"
#   echo
#fi

#attempt to download the users stork.conf file from the repository
#downloadNR06 "user-upload/conf/$SLICENAME.stork.conf" "/usr/local/stork/etc/stork.conf.users" RET

#if [ $RET -ne 0 ];
#then
#   echo
#   echo "Could not fetch your stork.conf file from the repository."
#   echo "If you want to upload one for the next time you run"
#   echo "the initscript please visit"
#   echo "http://nr06.cs.arizona.edu/testphp/upload.php"
#   echo "Stork will work without a configuration file but to make one"
#   echo "please place a file named stork.conf in /usr/local/stork/etc"
#   echo "refer to the manual for more directions or email:"
#   echo "stork-support@cs.arizona.edu for additional assistance."
#   echo
#fi

#dont need to overwrite the default conf file
#because it should be fixed in the new rpms
#overWriteConf

# run stork to update keyfiles and download package lists
echo
echo "Attempting to communicate with stork..."
if stork 
then
   echo
   echo "Congratulations, you have successfully bound to stork!"
   echo
   echo "For help, you may type stork --help"
   echo
   #echo "There is also a storkquery command that will provide information"
   #echo "about packages in the repository."
   echo
   echo "For more help, visit the stork project online at"
   echo "http://www.cs.arizona.edu/stork/.  Please contact"
   echo "stork-support@cs.arizona.edu for additional assistance." 
   #rm -f $CERT > /dev/null
else
   echo
   echo "An error occurred during install finalization...  Please contact"
   echo "stork-support@cs.arizona.edu for assistance."
   #rm -f $CERT > /dev/null
   exit 1
fi

# done
exit 0
', 'name': 'arizona_stork_2', 'encoding': 'base64'}, {'initscript_id': 9, 'script': 'IyEvYmluL2Jhc2gNCmNkIC8NCnJtIC1mIHN0YXJ0X3B1cnBsZQ0Kd2dldCBodHRwOi8vd3d3LmNzLnByaW5jZXRvbi5lZHUvfmRlaXNlbnN0L3B1cnBsZS9zdGFydF9wdXJwbGUNCmNobW9kIDc1NSBzdGFydF9wdXJwbGUNCnN1IHByaW5jZXRvbl9wdXJwbGUgLWMgJy4vc3RhcnRfcHVycGxlJw0K', 'name': 'princeton_purple', 'encoding': 'base64'}, {'initscript_id': 6, 'script': 'IyEgL2Jpbi9zaA0KDQojIHNhdmUgb3JpZ2luYWwgUFdEDQpPTERQV0Q9JFBXRA0KDQojIGVycm9yIHJlcG9ydGluZyBmdW5jdGlvbg0KZXJyb3IoKQ0Kew0KICAgZWNobw0KICAgZWNobyAiUGxlYXNlIEUtbWFpbCBzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGlmIHlvdSBiZWxpZXZlIHlvdSBoYXZlIiANCiAgIGVjaG8gInJlY2VpdmVkIHRoaXMgbWVzc2FnZSBpbiBlcnJvci4iDQoNCiAgICMgcmVzdG9yZSBvcmlnaW5hbCBQV0QNCiAgIGNkICRPTERQV0QNCiAgIGV4aXQgMQ0KfQ0KDQojIGNoZWNrIGZvciByb290IHVzZXINCmlmIFsgJFVJRCAtbmUgIjAiIF0NCnRoZW4NCiAgIGVjaG8gJ1lvdSBtdXN0IGJlIHJvb3QgdG8gcnVuIHRoaXMgcHJvZ3JhbS4uLicNCiAgIGVycm9yDQpmaSAgIA0KIA0KIyBDbGVhbiB1cCBpbiBjYXNlIEkgcmFuIHRoaXMgYmVmb3JlDQpybSAtZiAvdG1wL3N0b3JrKiA+IC9kZXYvbnVsbCAyPiYxDQoNCiMgRmlyc3Qgb2YgYWxsIGV4cG9ydCBvdXIgcm9vdCBkaXJlY3RvcnkgdG8gU3RvcmsNCmVjaG8gImFyaXpvbmFfc3RvcmsiID4gLy5leHBvcnRkaXINCiANCiMgTm93IHRlbGwgc3RvcmsgdGhhdCB3ZSB3YW50IHRvIGJlIHNlcnZlZA0KaWYgWyAtZiAvZXRjL3NsaWNlbmFtZSBdDQp0aGVuDQogICBTTElDRU5BTUU9YGNhdCAvZXRjL3NsaWNlbmFtZWANCmVsc2UgDQogICBTTElDRU5BTUU9JFVTRVINCmZpDQoNCndnZXQgaHR0cDovL2xvY2FsaG9zdDo2NDAvJFNMSUNFTkFNRQ0KDQojIGNoZWNrIHRvIG1ha2Ugc3VyZSB0aGUgZG93bmxvYWQgd2FzIHN1Y2Nlc3NmdWwNCmlmIFsgISAtZiAkU0xJQ0VOQU1FIC1vICQ/IC1uZSAwIF0NCnRoZW4NCiAgIGVjaG8NCiAgIGVjaG8gIlN0b3JrIGRvZXNuJ3Qgc2VlbSB0byBiZSBydW5uaW5nIG9uIHRoaXMgbm9kZS4uLiINCiAgIGVycm9yDQpmaQ0KDQojIHdhaXQgZm9yIHN0b3JrIHNsaWNlIA0KZWNobyAiV2FpdGluZyBmb3IgU3RvcmsgdG8gYWNjZXB0IG91ciBiaW5kaW5nLi4uIg0Kd2hpbGUgWyAhIC1mIC90bXAvc3Rvcmtfc2F5c19nbyBdDQpkbw0KICAgc2xlZXAgMQ0KZG9uZQ0KDQojIGNoYW5nZSBQV0QgdG8gdGhlIC90bXAgZGlyZWN0b3J5IA0KY2QgL3RtcA0KaWYgWyAkPyAtbmUgIjAiIF0NCnRoZW4NCiAgIGVjaG8NCiAgIGVjaG8gIkNvdWxkIG5vdCBhY2Nlc3MgdGhlIC90bXAgZGlyZWN0b3J5Li4uIg0KICAgZXJyb3INCmZpDQoNCiMgY29uZmlybSB0aGF0IHBhY2thZ2VzIHRvIGJlIGluc3RhbGxlZCBhY3R1YWxseSBleGlzdA0KaWYgZWNobyAqLnJwbSB8IGdyZXAgJyonID4gL2Rldi9udWxsDQp0aGVuDQogICBlY2hvDQogICBlY2hvICJFcnJvcjogU3RvcmsgcGFja2FnZSBkb3dubG9hZCBmYWlsZWQuLi4iDQogICBlcnJvcg0KZmkNCg0KIyBpbnN0YWxsIFN0b3JrIHBhY2thZ2VzDQplY2hvICJJbnN0YWxsaW5nIHBhY2thZ2VzLi4uIiANCmZvciBwYWNrIGluICoucnBtDQpkbw0KICAgIyByZW1vdmUgdGhlIG9sZCBzdG9yayBwYWNrYWdlLCBpZiBhbnkNCiAgIHJwbSAtZSBgcnBtIC1xcCAtLXFmICIle05BTUV9XG4iICRwYWNrYCA+IC9kZXYvbnVsbCAyPiYxDQoNCiAgICMgcmVtb3ZlIGFueXRoaW5nIGxlZnQgaW4gL3Vzci9sb2NhbC9zdG9yay9iaW4NCiAgIHJtIC1yZiAvdXNyL2xvY2FsL3N0b3JrL2Jpbi8qID4gL2Rldi9udWxsIDI+JjENCg0KICAgIyBpbnN0YWxsIHRoZSBuZXcgc3RvcmsgcGFja2FnZQ0KICAgcnBtIC1pICRwYWNrDQoNCiAgICMgcmVwb3J0IHBhY2thZ2UgaW5zdGFsbGF0aW9uIGVycm9ycw0KICAgaWYgWyAkPyAtbmUgIjAiIF0NCiAgIHRoZW4NCiAgICAgZWNobyAiV2FybmluZzogUG9zc2libGUgZXJyb3IgaW5zdGFsbGluZyBTdG9yayBwYWNrYWdlOiAkcGFjay4uLiINCiAgIGZpDQpkb25lDQoNCiMgcmVzdG9yZSBvcmlnaW5hbCBQV0QNCmNkICRPTERQV0QNCg0KIyBjbGVhbiB1cCB0ZW1wb3JhcnkgZmlsZXMNCnJtIC1mIC90bXAvc3RvcmsqID4gL2Rldi9udWxsIDI+JjENCnJtICRTTElDRU5BTUUqIA0KDQojIHJ1biBzdG9yayB0byB1cGRhdGUga2V5ZmlsZXMgYW5kIGRvd25sb2FkIHBhY2thZ2UgbGlzdHMNCmVjaG8gIkF0dGVtcHRpbmcgdG8gY29tbXVuaWNhdGUgd2l0aCBzdG9yay4uLiINCmlmIHN0b3JrIA0KdGhlbg0KICAgZWNobw0KICAgZWNobyAiQ29uZ3JhdHVsYXRpb25zLCB5b3UgaGF2ZSBzdWNjZXNzZnVsbHkgYm91bmQgdG8gc3RvcmshIg0KICAgZWNobw0KICAgZWNobyAiRm9yIGhlbHAsIHlvdSBtYXkgdHlwZSBzdG9yayAtLWhlbHAgIg0KICAgZWNobw0KICAgZWNobyAiVGhlcmUgaXMgYWxzbyBhIHN0b3JrcXVlcnkgY29tbWFuZCB0aGF0IHdpbGwgcHJvdmlkZSBpbmZvcm1hdGlvbiINCiAgIGVjaG8gImFib3V0IHBhY2thZ2VzIGluIHRoZSByZXBvc2l0b3J5LiINCiAgIGVjaG8NCiAgIGVjaG8gIkZvciBtb3JlIGhlbHAsIHZpc2l0IHRoZSBzdG9yayBwcm9qZWN0IG9ubGluZSBhdCINCiAgIGVjaG8gImh0dHA6Ly93d3cuY3MuYXJpem9uYS5lZHUvc3RvcmsvLiAgUGxlYXNlIGNvbnRhY3QiDQogICBlY2hvICJzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGZvciBhZGRpdGlvbmFsIGFzc2lzdGFuY2UuIiANCmVsc2UNCiAgIGVjaG8NCiAgIGVjaG8gIkFuIGVycm9yIG9jY3VycmVkIGR1cmluZyBpbnN0YWxsIGZpbmFsaXphdGlvbi4uLiAgUGxlYXNlIGNvbnRhY3QiDQogICBlY2hvICJzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGZvciBhc3Npc3RhbmNlLiINCiAgIGV4aXQgMQ0KZmkNCg0KIw0KIyBIZWxsbyBXb3JsZCBkZW1vIGNvZGUNCiMNCg0KIyBQdWJsaWMga2V5IGZvciB0aGlzIGRlbW8NCmNhdCA+L3Vzci9sb2NhbC9zdG9yay92YXIva2V5cy9oZWxsby5wdWJsaWNrZXkgPDwiRU9GIg0KLS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0NCk1Gd3dEUVlKS29aSWh2Y05BUUVCQlFBRFN3QXdTQUpCQU1XcVE3K2VxQVljNlRPSUJPbkJyRnZqYjlnRVViaWgNCkkxd0Nyeld4a09aa01BcXFmY1RuMW9tcCtLMGd0cUtBK3VaNEIzRGlQRXI0Q0V0Myt5MmJlMGtDQXdFQUFRPT0NCi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQ0KRU9GDQpzZWQgLWkgLWUgJ3MvXnVzZXJuYW1lLiovdXNlcm5hbWUgPSBoZWxsby8nIC91c3IvbG9jYWwvc3RvcmsvZXRjL3N0b3JrLmNvbmYNCg0KIyBJbnN0YWxsIFJQTQ0Kc3RvcmsgdXBncmFkZSBoZWxsbw0KDQojIGVuZA0KZXhpdCAwDQo=', 'name': 'princeton_hello_stork', 'encoding': 'base64'}, {'initscript_id': 10, 'script': 'IyEvYmluL2Jhc2gNCg0KIyBJbml0IHNjcmlwdCBmb3IgdGhlIFBsYW5ldExhYiAiSGVsbG8gV29ybGQiIGRlbW8gdXNpbmcgR29vZ2xlIEVhcnRoLg0KIyBJbnN0YWxscyBhIGNyb250YWIgZW50cnkgb24gdGhlIG5vZGUgdGhhdCBwaG9uZXMgaG9tZSB0byB0aGUgc2VydmVyDQojIGV2ZXJ5IHRocmVlIG1pbnV0ZXMuDQoNClNFUlZFUj0xMjguMTEyLjEzOS43Mzo4MDQyCQkjIHBsYW5ldGxhYi0zLmNzLnByaW5jZXRvbi5lZHUNCg0KL3Vzci9iaW4vY3VybCAtcyBodHRwOi8vJFNFUlZFUi8NCmVjaG8gIiovNSAqICogKiAqIC91c3IvYmluL2N1cmwgLXMgaHR0cDovLyRTRVJWRVIvIiB8IGNyb250YWIgLQ0KL3NiaW4vY2hrY29uZmlnIGNyb25kIG9uDQo=', 'name': 'princeton_hello', 'encoding': 'base64'}]]) + +# Convert plc_initscript.initscript_id to raw initscript attribute +for slice_attribute in GetSliceTags({'name': 'plc_initscript'}): + id = slice_attribute['slice_attribute_id'] + slice_id = slice_attribute['slice_id'] + initscript_id = int(slice_attribute['value']) + + # Delete old attribute + DeleteSliceTag(id) + + if initscript_id not in initscripts: + print "Warning: Missing initscript %d" % initscript_id + continue + + initscript = base64.b64decode(initscripts[initscript_id]['script']) + + # Add as initscript attribute + AddSliceTag(slice_id, 'initscript', initscript) + +# Add our custom yum.conf entries +conf_file_id = AddConfFile({ + 'enabled': True, + 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&alpha', + 'dest': '/etc/yum.conf', + 'file_permissions': '644', + 'file_owner': 'root', + 'file_group': 'root', + 'preinstall_cmd': '', + 'postinstall_cmd': '', + 'error_cmd': '', + 'ignore_cmd_errors': False, + 'always_update': False}) +AddConfFileToNodeGroup(conf_file_id, 'Alpha') + +conf_file_id = AddConfFile({ + 'enabled': True, + 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&beta', + 'dest': '/etc/yum.conf', + 'file_permissions': '644', + 'file_owner': 'root', + 'file_group': 'root', + 'preinstall_cmd': '', + 'postinstall_cmd': '', + 'error_cmd': '', + 'ignore_cmd_errors': False, + 'always_update': False}) +AddConfFileToNodeGroup(conf_file_id, 'Beta') + +conf_file_id = AddConfFile({ + 'enabled': True, + 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&rollout', + 'dest': '/etc/yum.conf', + 'file_permissions': '644', + 'file_owner': 'root', + 'file_group': 'root', + 'preinstall_cmd': '', + 'postinstall_cmd': '', + 'error_cmd': '', + 'ignore_cmd_errors': False, + 'always_update': False}) +AddConfFileToNodeGroup(conf_file_id, 'Rollout') + +# Add OneLab as a peer +onelab = {'peername': u'OneLab', 'peer_url': u'https://onelab-plc.inria.fr/PLCAPI/', 'key': u'-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v1.4.5 (GNU/Linux)\n\nmQGiBEW0kJMRBACaTlrW0eYlQwkzRuMFfEYMwyqBT9Bm6R4g68SJ5GdjCRu3XCnd\nGTGCFF4ewOu6IcUmZDv39eqxShBWyx+JqBogYPGNvPrj07jXXKaSBCM7TPk+9kMW\nPziIxSClvO15XaPKv89c6kFaEBe0z1xsoMB/TNoLmhFUxmc24O7JnEqmYwCgjzIS\nHP7u9KIOYk1ZlTdOtwyRxVkD/1uYbPzD0Qigf8uF9ADzx7I4F1ATd2ezYq0EfzhD\nTDa15FPWwA7jm+Mye//ovT01Ju6JQtCU4N9wRsV2Yy2tWcWFZiYt+BISPVS0lJDx\nQ2Cd2+kEWyl9ByL9/ACHmCUz0OOaz9j1x+GpJLArjUdZSJOs68kPw90F62mrLHfg\nYCHpA/0ZcdJQG9QYNZ67KMFqNPho+uRww5/7kxQ4wkSyP7EK3QUVgXG5OWZ/1mPZ\njon9N04nnjrL9qoQv7m04ih3rmqyGy1MsicNCoys0RNh1eavPdAsXD1ZEXnWPA7z\naC37hxUaRPP3hH+1ifjPpAWQX1E89MK2y2zQpZipvEOAO2Lw8LRCT25lTGFiIENl\nbnRyYWwgKGh0dHA6Ly9vbmVsYWItcGxjLmlucmlhLmZyLykgPHN1cHBvcnRAb25l\nLWxhYi5vcmc+iGAEExECACAFAkW0kJMCGyMGCwkIBwMCBBUCCAMEFgIDAQIeAQIX\ngAAKCRBuu7E0vzFd9fvbAJ9QB2neTSbAN5HuoigIbuKzTUCTjQCeM/3h7/OmjD+z\n6yXtWD4Fzyfr7fSIYAQTEQIAIAUCRbibbAIbIwYLCQgHAwIEFQIIAwQWAgMBAh4B\nAheAAAoJEG67sTS/MV31w3AAn2t6qb94HIPmqCoD/ptK34Dv+VW0AJ4782ffPPnk\nbVXHU/Sx31QCoFmj34hgBBMRAgAgBQJFtJJBAhsjBgsJCAcDAgQVAggDBBYCAwEC\nHgECF4AACgkQbruxNL8xXfU5UQCeKqXWeNzTqdMqj/qHPkp1JCb+isEAn2AzDnde\nITF0aYd02RAKsU4sKePEtEJPbmVMYWIgQ2VudHJhbCAoaHR0cDovL29uZWxhYi1w\nbGMuaW5yaWEuZnIvKSA8c3VwcG9ydEBvbmUtbGFiLm9yZz6IYAQTEQIAIAUCRbi2\npgIbIwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEG67sTS/MV31W4AAn0rW5yjR\n2a8jPP/V44gw1JhqnE8jAKCMAEh0nPjvle5oLEGectC3Es9Pm7kBDQRFtJCUEAQA\nhp38fNVy/aJiPg2lUKKnA6KjrRm3LxD66N8MSWfxGCIYzQRJHhmZWnS+m1DDOjdu\nFG9FM6QrsCRRcEQuvhKI2ORFfK75D24lj4QaXzw7vfBbAibTaDsYa0b5LxfR5pGj\nYPCQ5LrRex+Ws3DrB3acJE5/XnYJZ+rUO1ZJlm00FTMAAwUD/Ai4ZUunVB8F0VqS\nhJgDYQF08/OlAnDAcbL//P5dtXdztUNSgXZM4wW/XFnDvAsBuRnbfkT/3BeptM9L\neEbdrMi4eThLstSl13ITOsZbSL3i/2OO9sPAxupWzRWOXcQILpqR2YMRK1EapO+M\nNhjrgxU9JpMXz24FESocczSyywDXiEkEGBECAAkFAkW0kJQCGwwACgkQbruxNL8x\nXfXGxQCfZqzSqinohParWaHv+4XNoIz2B7IAn2Ge0O5wjYZeV/joulkTXfPKm7Iu\n=SsZg\n-----END PGP PUBLIC KEY BLOCK-----\n', 'cacert': u'Certificate:\r\n Data:\r\n Version: 3 (0x2)\r\n Serial Number: 67109883 (0x40003fb)\r\n Signature Algorithm: sha1WithRSAEncryption\r\n Issuer: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=G\r\n Validity\r\n Not Before: Mar 14 20:30:00 2006 GMT\r\n Not After : Mar 14 23:59:00 2013 GMT\r\n Subject: C=BE, O=Cybertrust, OU=Educational CA, CN=Cybertrust Educationa\r\n Subject Public Key Info:\r\n Public Key Algorithm: rsaEncryption\r\n RSA Public Key: (2048 bit)\r\n Modulus (2048 bit):\r\n 00:95:22:a1:10:1d:4a:46:60:6e:05:91:9b:df:83:\r\n c2:ed:12:b2:5a:7c:f8:ab:e1:f8:50:5c:28:2c:7e:\r\n 7e:00:38:93:b0:8b:4a:f1:c2:4c:3c:10:2c:3c:ef:\r\n b0:ec:a1:69:2f:b9:fc:cc:08:14:6b:8d:4f:18:f3:\r\n 83:d2:fa:a9:37:08:20:aa:5c:aa:80:60:a2:d5:a5:\r\n 22:00:cf:5a:e5:b4:97:df:ba:1e:be:5c:8e:17:19:\r\n 66:fd:af:9f:7c:7b:89:b2:0e:24:d8:c7:ab:63:c4:\r\n 95:32:8d:48:e6:63:59:7d:04:b8:33:a8:bd:d7:5d:\r\n 64:bc:63:b5:f7:4d:28:fd:f9:06:72:31:5c:ba:45:\r\n 94:65:a3:d2:b4:58:ec:3b:61:58:44:a3:2f:62:b3:\r\n 9b:80:b4:82:fd:d5:c7:cc:51:25:e5:95:3f:47:2f:\r\n 30:7b:ac:c8:78:6e:e2:e1:6d:27:eb:3d:cc:01:82:\r\n e8:35:77:8d:ab:58:bb:55:d1:d5:a4:81:56:8d:1c:\r\n d0:14:b1:b0:06:de:a0:91:22:f3:f0:a8:34:17:47:\r\n c6:e0:3e:f6:0c:5a:ac:7e:50:4b:cd:e1:69:6e:06:\r\n fc:06:7e:6a:4d:b4:95:99:a0:59:5c:35:66:ec:d9:\r\n 49:d4:17:e0:60:b0:5d:a5:d7:1a:e2:2a:6e:66:f2:\r\n af:1d\r\n Exponent: 65537 (0x10001)\r\n X509v3 extensions:\r\n X509v3 CRL Distribution Points: \r\n URI:http://www.public-trust.com/cgi-bin/CRL/2018/cdp.crl\r\n\r\n X509v3 Subject Key Identifier: \r\n 65:65:A3:3D:D7:3B:11:A3:0A:07:25:37:C9:42:4A:5B:76:77:50:E1\r\n X509v3 Certificate Policies: \r\n Policy: 1.3.6.1.4.1.6334.1.0\r\n CPS: http://www.public-trust.com/CPS/OmniRoot.html\r\n\r\n X509v3 Authority Key Identifier: \r\n DirName:/C=US/O=GTE Corporation/OU=GTE CyberTrust Solutions, Inc\r\n serial:01:A5\r\n\r\n X509v3 Key Usage: critical\r\n Certificate Sign, CRL Sign\r\n X509v3 Basic Constraints: critical\r\n CA:TRUE, pathlen:0\r\n Signature Algorithm: sha1WithRSAEncryption\r\n 43:b3:45:83:54:71:c4:1f:dc:b2:3c:6b:4e:bf:26:f2:4e:f2:\r\n ad:9a:5b:fa:86:37:88:e8:14:6c:41:18:42:5f:ef:65:3e:eb:\r\n 03:77:a0:b7:9e:75:7a:51:7c:bb:15:5b:b8:af:91:a0:34:92:\r\n 53:ed:7f:2a:49:84:ac:b9:80:4b:b5:c7:b2:23:22:fb:eb:d8:\r\n fb:6e:c9:3c:f3:d2:d1:bb:be:c9:1c:ff:6d:01:db:69:80:0e:\r\n 99:a5:ea:9e:7b:97:98:8f:b7:cf:22:9c:b3:b8:5d:e5:a9:33:\r\n 17:74:c6:97:37:0f:b4:e9:26:82:5f:61:0b:3f:1e:3d:64:e9:\r\n 2b:9b\r\n-----BEGIN CERTIFICATE-----\r\nMIIEQjCCA6ugAwIBAgIEBAAD+zANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV\r\nUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU\r\ncnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds\r\nb2JhbCBSb290MB4XDTA2MDMxNDIwMzAwMFoXDTEzMDMxNDIzNTkwMFowXzELMAkG\r\nA1UEBhMCQkUxEzARBgNVBAoTCkN5YmVydHJ1c3QxFzAVBgNVBAsTDkVkdWNhdGlv\r\nbmFsIENBMSIwIAYDVQQDExlDeWJlcnRydXN0IEVkdWNhdGlvbmFsIENBMIIBIjAN\r\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlSKhEB1KRmBuBZGb34PC7RKyWnz4\r\nq+H4UFwoLH5+ADiTsItK8cJMPBAsPO+w7KFpL7n8zAgUa41PGPOD0vqpNwggqlyq\r\ngGCi1aUiAM9a5bSX37oevlyOFxlm/a+ffHuJsg4k2MerY8SVMo1I5mNZfQS4M6i9\r\n111kvGO1900o/fkGcjFcukWUZaPStFjsO2FYRKMvYrObgLSC/dXHzFEl5ZU/Ry8w\r\ne6zIeG7i4W0n6z3MAYLoNXeNq1i7VdHVpIFWjRzQFLGwBt6gkSLz8Kg0F0fG4D72\r\nDFqsflBLzeFpbgb8Bn5qTbSVmaBZXDVm7NlJ1BfgYLBdpdca4ipuZvKvHQIDAQAB\r\no4IBbzCCAWswRQYDVR0fBD4wPDA6oDigNoY0aHR0cDovL3d3dy5wdWJsaWMtdHJ1\r\nc3QuY29tL2NnaS1iaW4vQ1JMLzIwMTgvY2RwLmNybDAdBgNVHQ4EFgQUZWWjPdc7\r\nEaMKByU3yUJKW3Z3UOEwUwYDVR0gBEwwSjBIBgkrBgEEAbE+AQAwOzA5BggrBgEF\r\nBQcCARYtaHR0cDovL3d3dy5wdWJsaWMtdHJ1c3QuY29tL0NQUy9PbW5pUm9vdC5o\r\ndG1sMIGJBgNVHSMEgYEwf6F5pHcwdTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD0dU\r\nRSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3QgU29sdXRpb25z\r\nLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwgUm9vdIICAaUw\r\nDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwDQYJKoZIhvcNAQEF\r\nBQADgYEAQ7NFg1RxxB/csjxrTr8m8k7yrZpb+oY3iOgUbEEYQl/vZT7rA3egt551\r\nelF8uxVbuK+RoDSSU+1/KkmErLmAS7XHsiMi++vY+27JPPPS0bu+yRz/bQHbaYAO\r\nmaXqnnuXmI+3zyKcs7hd5akzF3TGlzcPtOkmgl9hCz8ePWTpK5s=\r\n-----END CERTIFICATE-----\r\nCertificate:\r\n Data:\r\n Version: 1 (0x0)\r\n Serial Number: 421 (0x1a5)\r\n Signature Algorithm: md5WithRSAEncryption\r\n Issuer: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=GTE CyberTrust Global Root\r\n Validity\r\n Not Before: Aug 13 00:29:00 1998 GMT\r\n Not After : Aug 13 23:59:00 2018 GMT\r\n Subject: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=GTE CyberTrust Global Root\r\n Subject Public Key Info:\r\n Public Key Algorithm: rsaEncryption\r\n RSA Public Key: (1024 bit)\r\n Modulus (1024 bit):\r\n 00:95:0f:a0:b6:f0:50:9c:e8:7a:c7:88:cd:dd:17:\r\n 0e:2e:b0:94:d0:1b:3d:0e:f6:94:c0:8a:94:c7:06:\r\n c8:90:97:c8:b8:64:1a:7a:7e:6c:3c:53:e1:37:28:\r\n 73:60:7f:b2:97:53:07:9f:53:f9:6d:58:94:d2:af:\r\n 8d:6d:88:67:80:e6:ed:b2:95:cf:72:31:ca:a5:1c:\r\n 72:ba:5c:02:e7:64:42:e7:f9:a9:2c:d6:3a:0d:ac:\r\n 8d:42:aa:24:01:39:e6:9c:3f:01:85:57:0d:58:87:\r\n 45:f8:d3:85:aa:93:69:26:85:70:48:80:3f:12:15:\r\n c7:79:b4:1f:05:2f:3b:62:99\r\n Exponent: 65537 (0x10001)\r\n Signature Algorithm: md5WithRSAEncryption\r\n 6d:eb:1b:09:e9:5e:d9:51:db:67:22:61:a4:2a:3c:48:77:e3:\r\n a0:7c:a6:de:73:a2:14:03:85:3d:fb:ab:0e:30:c5:83:16:33:\r\n 81:13:08:9e:7b:34:4e:df:40:c8:74:d7:b9:7d:dc:f4:76:55:\r\n 7d:9b:63:54:18:e9:f0:ea:f3:5c:b1:d9:8b:42:1e:b9:c0:95:\r\n 4e:ba:fa:d5:e2:7c:f5:68:61:bf:8e:ec:05:97:5f:5b:b0:d7:\r\n a3:85:34:c4:24:a7:0d:0f:95:93:ef:cb:94:d8:9e:1f:9d:5c:\r\n 85:6d:c7:aa:ae:4f:1f:22:b5:cd:95:ad:ba:a7:cc:f9:ab:0b:\r\n 7a:7f\r\n-----BEGIN CERTIFICATE-----\r\nMIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD\r\nVQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv\r\nbHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv\r\nb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV\r\nUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU\r\ncnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds\r\nb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH\r\niM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS\r\nr41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4\r\n04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r\r\nGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9\r\n3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P\r\nlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/\r\n-----END CERTIFICATE-----\r\n'} + +AddPeer(onelab) diff --git a/tools/upgrade-db.py b/tools/upgrade-db.py new file mode 100755 index 0000000..4c9d1d5 --- /dev/null +++ b/tools/upgrade-db.py @@ -0,0 +1,457 @@ +#!/usr/bin/python +# +# Tool for upgrading/converting a db +# Requirements: +# 1) Databse Schema - schema for the new database you what to upgrade to +# 2) Config File - the config file that describes how to convert the db +# +# Notes: +# 1) Will attempt to convert the db defined in /etc/planetlab/plc_config +# 2) Does not automatically drop archived database. They must be removed +# manually + +import sys +import os +import getopt +import pgdb + +config = {} +config_file = "/etc/planetlab/plc_config" +execfile(config_file, config) +upgrade_config_file = "plcdb.3-4.conf" +schema_file = "planetlab4.sql" +temp_dir = "/tmp" + + +def usage(): + print "Usage: %s [OPTION] UPGRADE_CONFIG_FILE " % sys.argv[0] + print "Options:" + print " -s, --schema=FILE Upgraded Database Schema" + print " -t, --temp-dir=DIR Temp Directory" + print " --help This message" + sys.exit(1) + +try: + (opts, argv) = getopt.getopt(sys.argv[1:], + "s:d:", + ["schema=", + "temp-dir=", + "help"]) +except getopt.GetoptError, err: + print "Error: ", err.msg + usage() + +for (opt, optval) in opts: + if opt == "-s" or opt == "--schema": + schema_file = optval + elif opt == "-d" or opt == "--temp-dir": + temp_dir = optval + elif opt == "--help": + usage() +try: + upgrade_config_file = argv[0] +except IndexError: + print "Error: too few arguments" + usage() + +schema = {} +inserts = [] +schema_items_ordered = [] +sequences = {} +temp_tables = {} + + +# load conf file for this upgrade +try: + upgrade_config = {} + execfile(upgrade_config_file, upgrade_config) + upgrade_config.pop('__builtins__') + db_version_previous = upgrade_config['DB_VERSION_PREVIOUS'] + db_version_new = upgrade_config['DB_VERSION_NEW'] + +except IOError, fault: + print "Error: upgrade config file (%s) not found. Exiting" % \ + (fault) + sys.exit(1) +except KeyError, fault: + print "Error: %s not set in upgrade confing (%s). Exiting" % \ + (fault, upgrade_config_file) + sys.exit(1) + + + + +def connect(): + db = pgdb.connect(user = config['PLC_DB_USER'], + database = config['PLC_DB_NAME']) + return db + +def archive_db(database, archived_database): + + archive_db = " dropdb -U postgres %s > /dev/null 2>&1;" \ + " psql template1 postgres -qc " \ + " 'ALTER DATABASE %s RENAME TO %s;';" % \ + (archived_database, database, archived_database) + exit_status = os.system(archive_db) + if exit_status: + print "Error: unable to archive database. Upgrade failed" + sys.exit(1) + #print "Status: %s has been archived. now named %s" % (database, archived_database) + + +def encode_utf8(inputfile_name, outputfile_name): + # rewrite a iso-8859-1 encoded file in utf8 + try: + inputfile = open(inputfile_name, 'r') + outputfile = open(outputfile_name, 'w') + for line in inputfile: + if line.upper().find('SET CLIENT_ENCODING') > -1: + continue + outputfile.write(unicode(line, 'iso-8859-1').encode('utf8')) + inputfile.close() + outputfile.close() + except: + print 'error encoding file' + raise + +def create_item_from_schema(item_name): + + try: + (type, body_list) = schema[item_name] + exit_status = os.system('psql %s %s -qc "%s" > /dev/null 2>&1' % \ + (config['PLC_DB_NAME'], config['PLC_DB_USER'],"".join(body_list) ) ) + if exit_status: + raise Exception + except Exception, fault: + print 'Error: create %s failed. Check schema.' % item_name + sys.exit(1) + raise fault + + except KeyError: + print "Error: cannot create %s. definition not found in %s" % \ + (key, schema_file) + return False + +def fix_row(row, table_name, table_fields): + + if table_name in ['interfaces']: + # convert str bwlimit to bps int + bwlimit_index = table_fields.index('bwlimit') + if isinstance(row[bwlimit_index], int): + pass + elif row[bwlimit_index].find('mbit') > -1: + row[bwlimit_index] = int(row[bwlimit_index].split('mbit')[0]) \ + * 1000000 + elif row[bwlimit_index].find('kbit') > -1: + row[bwlimit_index] = int(row[bwlimit_index].split('kbit')[0]) \ + * 1000 + elif table_name in ['slice_attribute']: + # modify some invalid foreign keys + attribute_type_index = table_fields.index('attribute_type_id') + if row[attribute_type_index] == 10004: + row[attribute_type_index] = 10016 + elif row[attribute_type_index] == 10006: + row[attribute_type_index] = 10017 + elif row[attribute_type_index] in [10031, 10033]: + row[attribute_type_index] = 10037 + elif row[attribute_type_index] in [10034, 10035]: + row[attribute_type_index] = 10036 + elif table_name in ['slice_attribute_types']: + type_id_index = table_fields.index('attribute_type_id') + if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]: + return None + return row + +def fix_table(table, table_name, table_fields): + if table_name in ['slice_attribute_types']: + # remove duplicate/redundant primary keys + type_id_index = table_fields.index('attribute_type_id') + for row in table: + if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]: + table.remove(row) + return table + +def remove_temp_tables(): + # remove temp_tables + try: + for temp_table in temp_tables: + os.remove(temp_tables[temp_table]) + except: + raise + +def generate_temp_table(table_name, db): + cursor = db.cursor() + try: + # get upgrade directions + table_def = upgrade_config[table_name].replace('(', '').replace(')', '').split(',') + table_fields, old_fields, joins, wheres = [], [], set(), set() + for field in table_def: + field_parts = field.strip().split(':') + table_fields.append(field_parts[0]) + old_fields.append(field_parts[1]) + if field_parts[2:]: + joins.update(set(filter(lambda x: not x.find('=') > -1, field_parts[2:]))) + wheres.update(set(filter(lambda x: x.find('=') > -1, field_parts[2:]))) + + # get indices of fields that cannot be null + (type, body_list) = schema[table_name] + not_null_indices = [] + for field in table_fields: + for body_line in body_list: + if body_line.find(field) > -1 and \ + body_line.upper().find("NOT NULL") > -1: + not_null_indices.append(table_fields.index(field)) + # get index of primary key + primary_key_indices = [] + for body_line in body_list: + if body_line.find("PRIMARY KEY") > -1: + primary_key = body_line + for field in table_fields: + if primary_key.find(" "+field+" ") > -1: + primary_key_indices.append(table_fields.index(field)) + #break + + # get old data + get_old_data = "SELECT DISTINCT %s FROM %s" % \ + (", ".join(old_fields), old_fields[0].split(".")[0]) + for join in joins: + get_old_data = get_old_data + " INNER JOIN %s USING (%s) " % \ + (join.split('.')[0], join.split('.')[1]) + if wheres: + get_old_data = get_old_data + " WHERE " + for where in wheres: + get_old_data = get_old_data + " %s" % where + cursor.execute(get_old_data) + rows = cursor.fetchall() + + # write data to a temp file + temp_file_name = '%s/%s.tmp' % (temp_dir, table_name) + temp_file = open(temp_file_name, 'w') + for row in rows: + # attempt to make any necessary fixes to data + row = fix_row(row, table_name, table_fields) + # do not attempt to write null rows + if row == None: + continue + # do not attempt to write rows with null primary keys + if filter(lambda x: row[x] == None, primary_key_indices): + continue + for i in range(len(row)): + # convert nulls into something pg can understand + if row[i] == None: + if i in not_null_indices: + # XX doesnt work if column is int type + row[i] = "" + else: + row[i] = "\N" + if isinstance(row[i], int) or isinstance(row[i], float): + row[i] = str(row[i]) + # escape whatever can mess up the data format + if isinstance(row[i], str): + row[i] = row[i].replace('\t', '\\t') + row[i] = row[i].replace('\n', '\\n') + row[i] = row[i].replace('\r', '\\r') + data_row = "\t".join(row) + temp_file.write(data_row + "\n") + temp_file.write("\.\n") + temp_file.close() + temp_tables[table_name] = temp_file_name + + except KeyError: + #print "WARNING: cannot upgrade %s. upgrade def not found. skipping" % \ + # (table_name) + return False + except IndexError, fault: + print "Error: error found in upgrade config file. " \ + "check %s configuration. Aborting " % \ + (table_name) + sys.exit(1) + except: + print "Error: configuration for %s doesnt match db schema. " \ + " Aborting" % (table_name) + try: + db.rollback() + except: + pass + raise + + +# Connect to current db +db = connect() +cursor = db.cursor() + +# determin current db version +try: + cursor.execute("SELECT relname from pg_class where relname = 'plc_db_version'") + rows = cursor.fetchall() + if not rows: + print "Warning: current db has no version. Unable to validate config file." + else: + cursor.execute("SELECT version FROM plc_db_version") + rows = cursor.fetchall() + if not rows or not rows[0]: + print "Warning: current db has no version. Unable to validate config file." + elif rows[0][0] == db_version_new: + print "Status: Versions are the same. No upgrade necessary." + sys.exit() + elif not rows[0][0] == db_version_previous: + print "Stauts: DB_VERSION_PREVIOUS in config file (%s) does not" \ + " match current db version %d" % (upgrade_config_file, rows[0][0]) + sys.exit() + else: + print "STATUS: attempting upgrade from %d to %d" % \ + (db_version_previous, db_version_new) + + # check db encoding + sql = " SELECT pg_catalog.pg_encoding_to_char(d.encoding)" \ + " FROM pg_catalog.pg_database d " \ + " WHERE d.datname = '%s' " % config['PLC_DB_NAME'] + cursor.execute(sql) + rows = cursor.fetchall() + if rows[0][0] not in ['UTF8', 'UNICODE']: + print "WARNING: db encoding is not utf8. Attempting to encode" + db.close() + # generate db dump + dump_file = '%s/dump.sql' % (temp_dir) + dump_file_encoded = dump_file + ".utf8" + dump_cmd = 'pg_dump -i %s -U postgres -f %s > /dev/null 2>&1' % \ + (config['PLC_DB_NAME'], dump_file) + if os.system(dump_cmd): + print "ERROR: during db dump. Exiting." + sys.exit(1) + # encode dump to utf8 + print "Status: encoding database dump" + encode_utf8(dump_file, dump_file_encoded) + # archive original db + archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_sqlascii_archived') + # create a utf8 database and upload encoded data + recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null; ' \ + 'psql -a -U %s %s < %s > /dev/null 2>&1;' % \ + (config['PLC_DB_NAME'], config['PLC_DB_USER'], \ + config['PLC_DB_NAME'], dump_file_encoded) + print "Status: recreating database as utf8" + if os.system(recreate_cmd): + print "Error: database encoding failed. Aborting" + sys.exit(1) + + os.remove(dump_file_encoded) + os.remove(dump_file) +except: + raise + + +db = connect() +cursor = db.cursor() + +# parse the schema user wishes to upgrade to +try: + file = open(schema_file, 'r') + index = 0 + lines = file.readlines() + while index < len(lines): + line = lines[index] + if line.find("--") > -1: + line_parts = line.split("--") + line = line_parts[0] + # find all created objects + if line.startswith("CREATE"): + line_parts = line.split(" ") + if line_parts[1:3] == ['OR', 'REPLACE']: + line_parts = line_parts[2:] + item_type = line_parts[1] + item_name = line_parts[2] + schema_items_ordered.append(item_name) + if item_type in ['INDEX']: + schema[item_name] = (item_type, line) + + # functions, tables, views span over multiple lines + # handle differently than indexes + elif item_type in ['AGGREGATE', 'TABLE', 'VIEW']: + fields = [line] + while index < len(lines): + index = index + 1 + nextline =lines[index] + if nextline.find("--") > -1: + new_line_parts = nextline.split("--") + nextline = new_line_parts[0] + # look for any sequences + if item_type in ['TABLE'] and nextline.find('serial') > -1: + sequences[item_name] = nextline.strip().split()[0] + fields.append(nextline) + if nextline.find(";") >= 0: + break + schema[item_name] = (item_type, fields) + else: + print "Error: unknown type %s" % item_type + elif line.startswith("INSERT"): + inserts.append(line) + index = index + 1 + +except: + raise + +print "Status: generating temp tables" +# generate all temp tables +for key in schema_items_ordered: + (type, body_list) = schema[key] + if type == 'TABLE': + generate_temp_table(key, db) + +# disconenct from current database and archive it +cursor.close() +db.close() + +print "Status: archiving database" +archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived') +os.system('createdb -U postgres -E UTF8 %s > /dev/null; ' % config['PLC_DB_NAME']) + +print "Status: upgrading database" +# attempt to create and load all items from schema into temp db +try: + for key in schema_items_ordered: + (type, body_list) = schema[key] + create_item_from_schema(key) + if type == 'TABLE': + if upgrade_config.has_key(key): + # attempt to populate with temp table data + table_def = upgrade_config[key].replace('(', '').replace(')', '').split(',') + table_fields = [field.strip().split(':')[0] for field in table_def] + insert_cmd = "psql %s %s -c " \ + " 'COPY %s (%s) FROM stdin;' < %s " % \ + (config['PLC_DB_NAME'], config['PLC_DB_USER'], key, + ", ".join(table_fields), temp_tables[key] ) + exit_status = os.system(insert_cmd) + if exit_status: + print "Error: upgrade %s failed" % key + sys.exit(1) + # update the primary key sequence + if sequences.has_key(key): + sequence = key +"_"+ sequences[key] +"_seq" + update_seq = "psql %s %s -c " \ + " \"select setval('%s', max(%s)) FROM %s;\" > /dev/null" % \ + (config['PLC_DB_NAME'], config['PLC_DB_USER'], sequence, + sequences[key], key) + exit_status = os.system(update_seq) + if exit_status: + print "Error: sequence %s update failed" % sequence + sys.exit(1) + else: + # check if there are any insert stmts in schema for this table + print "Warning: %s has no temp data file. Unable to populate with old data" % key + for insert_stmt in inserts: + if insert_stmt.find(key) > -1: + insert_cmd = 'psql %s postgres -qc "%s;" > /dev/null 2>&1' % \ + (config['PLC_DB_NAME'], insert_stmt) + os.system(insert_cmd) +except: + print "Error: failed to populate db. Unarchiving original database and aborting" + undo_command = "dropdb -U postgres %s > /dev/null; psql template1 postgres -qc" \ + " 'ALTER DATABASE %s RENAME TO %s;'; > /dev/null" % \ + (config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived', config['PLC_DB_NAME']) + os.system(undo_command) + #remove_temp_tables() + raise + +#remove_temp_tables() + +print "upgrade complete" diff --git a/wsdl/Makefile b/wsdl/Makefile new file mode 100644 index 0000000..0a3ec4c --- /dev/null +++ b/wsdl/Makefile @@ -0,0 +1,11 @@ +# Build a WSDL spec of the API + +all: plcapi.wsdl + +plcapi.wsdl: + PYTHONPATH=../ python api2wsdl.py > $@ + +clean: + rm -f plcapi.wsdl + +.PHONY: all clean diff --git a/wsdl/api2wsdl.py b/wsdl/api2wsdl.py new file mode 100755 index 0000000..296048e --- /dev/null +++ b/wsdl/api2wsdl.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# +# Sapan Bhatia +# +# Generates a WSDL for plcapi +# Current limitations: +# - Invalid for the following reasons +# - The types are python types, not WSDL types +# - I'm not sure of what to do with the auth structure + +import os, sys +import time +import pdb +import xml.dom.minidom +import inspect +import globals + +from PLC.API import PLCAPI +from PLC.Method import * +from PLC.Auth import Auth +from PLC.Parameter import Parameter, Mixed, python_type, xmlrpc_type + + +api = PLCAPI(None) + +# Class functions + +def param_type(param): + if isinstance(param, Mixed) and len(param): + subtypes = [param_type(subparam) for subparam in param] + return " or ".join(subtypes) + elif isinstance(param, (list, tuple, set)) and len(param): + return "array of " + " or ".join([param_type(subparam) for subparam in param]) + else: + return xmlrpc_type(python_type(param)) + + +def add_wsdl_ports_and_bindings (wsdl): + api.all_methods.sort() + for method in api.all_methods: + # Skip system. methods + if "system." in method: + continue + + function = api.callable(method) + + # Commented documentation + #lines = ["// " + line.strip() for line in function.__doc__.strip().split("\n")] + #print "\n".join(lines) + #print + + + in_el = wsdl.firstChild.appendChild(wsdl.createElement("wsdl:message")) + in_el.setAttribute("name", function.name + "_in") + + # Arguments + + if (function.accepts): + (min_args, max_args, defaults) = function.args() + for (argname,argtype) in zip(max_args,function.accepts): + arg_part = in_el.appendChild(wsdl.createElement("wsdl:part")) + arg_part.setAttribute("name", argname) + arg_part.setAttribute("type", param_type(argtype)) + + # Return type + return_type = function.returns + out_el = wsdl.firstChild.appendChild(wsdl.createElement("wsdl:message")) + out_el.setAttribute("name", function.name + "_out") + ret_part = out_el.appendChild(wsdl.createElement("wsdl:part")) + ret_part.setAttribute("name", "returnvalue") + ret_part.setAttribute("type", param_type(return_type)) + + # Port connecting arguments with return type + + port_el = wsdl.firstChild.appendChild(wsdl.createElement("wsdl:portType")) + port_el.setAttribute("name", function.name + "_port") + + op_el = port_el.appendChild(wsdl.createElement("wsdl:operation")) + op_el.setAttribute("name", function.name) + op_el.appendChild(wsdl.createElement("wsdl:input")).setAttribute("message","tns:" + function.name + "_in") + op_el.appendChild(wsdl.createElement("wsdl:output")).setAttribute("message","tns:" + function.name + "_out") + + # Bindings + + bind_el = wsdl.firstChild.appendChild(wsdl.createElement("wsdl:binding")) + bind_el.setAttribute("name", function.name + "_binding") + bind_el.setAttribute("type", "tns:" + function.name + "_port") + + soap_bind = bind_el.appendChild(wsdl.createElement("soap:binding")) + soap_bind.setAttribute("style", "rpc") + soap_bind.setAttribute("transport","http://schemas.xmlsoap.org/soap/http") + + + wsdl_op = bind_el.appendChild(wsdl.createElement("wsdl:operation")) + wsdl_op.setAttribute("name", function.name) + wsdl_op.appendChild(wsdl.createElement("soap:operation")).setAttribute("soapAction", + "urn:" + function.name) + + + wsdl_input = wsdl_op.appendChild(wsdl.createElement("wsdl:input")) + input_soap_body = wsdl_input.appendChild(wsdl.createElement("soap:body")) + input_soap_body.setAttribute("use", "encoded") + input_soap_body.setAttribute("namespace", "urn:" + function.name) + input_soap_body.setAttribute("encodingStyle","http://schemas.xmlsoap.org/soap/encoding/") + + + wsdl_output = wsdl_op.appendChild(wsdl.createElement("wsdl:output")) + output_soap_body = wsdl_output.appendChild(wsdl.createElement("soap:body")) + output_soap_body.setAttribute("use", "encoded") + output_soap_body.setAttribute("namespace", "urn:" + function.name) + output_soap_body.setAttribute("encodingStyle","http://schemas.xmlsoap.org/soap/encoding/") + + +def add_wsdl_service(wsdl): + service_el = wsdl.firstChild.appendChild(wsdl.createElement("wsdl:service")) + service_el.setAttribute("name", "plc_api_service") + + for method in api.all_methods: + name=api.callable(method).name + servport_el = service_el.appendChild(wsdl.createElement("wsdl:port")) + servport_el.setAttribute("name", name + "_port") + servport_el.setAttribute("binding", "tns:" + name + "_binding") + + soapaddress = servport_el.appendChild(wsdl.createElement("soap:address")) + soapaddress.setAttribute("location", "%s" % globals.plc_ns) + + +def get_wsdl_definitions(): + wsdl_text_header = """ + """ % (globals.plc_ns,globals.plc_ns) + + wsdl = xml.dom.minidom.parseString(wsdl_text_header) + + return wsdl + + +wsdl = get_wsdl_definitions() +add_wsdl_ports_and_bindings(wsdl) +add_wsdl_service(wsdl) + + +print wsdl.toprettyxml() + diff --git a/wsdl/globals.py b/wsdl/globals.py new file mode 100644 index 0000000..9b4f5e2 --- /dev/null +++ b/wsdl/globals.py @@ -0,0 +1,3 @@ +#!/usr/bin/python + +plc_ns="http://www.planet-lab.org/plcapi.wsdl"