'planetlab-4_0-branch'.
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 90 $
+#
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+
+from PLC.NodeNetworkSettingTypes import NodeNetworkSettingType, NodeNetworkSettingTypes
+from PLC.NodeNetworkSettings import NodeNetworkSetting, NodeNetworkSettings
+from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+
+from PLC.Nodes import Nodes
+from PLC.Sites import Sites
+
+class AddNodeNetworkSetting(Method):
+ """
+ Sets the specified setting for the specified nodenetwork
+ to the specified value.
+
+ In general only tech(s), PI(s) and of course admin(s) are allowed to
+ do the change, but this is defined in the nodenetwork setting type object.
+
+ Returns the new nodenetwork_setting_id (> 0) if successful, faults
+ otherwise.
+ """
+
+ roles = ['admin', 'pi', 'tech', 'user']
+
+ accepts = [
+ Auth(),
+ # no other way to refer to a nodenetwork
+ NodeNetworkSetting.fields['nodenetwork_id'],
+ Mixed(NodeNetworkSettingType.fields['nodenetwork_setting_type_id'],
+ NodeNetworkSettingType.fields['name']),
+ NodeNetworkSetting.fields['value'],
+ ]
+
+ returns = Parameter(int, 'New nodenetwork_setting_id (> 0) if successful')
+
+ object_type = 'NodeNetwork'
+
+
+ def call(self, auth, nodenetwork_id, nodenetwork_setting_type_id_or_name, value):
+ nodenetworks = NodeNetworks(self.api, [nodenetwork_id])
+ if not nodenetworks:
+ raise PLCInvalidArgument, "No such nodenetwork %r"%nodenetwork_id
+ nodenetwork = nodenetworks[0]
+
+ nodenetwork_setting_types = NodeNetworkSettingTypes(self.api, [nodenetwork_setting_type_id_or_name])
+ if not nodenetwork_setting_types:
+ raise PLCInvalidArgument, "No such nodenetwork setting type %r"%nodenetwork_setting_type_id_or_name
+ nodenetwork_setting_type = nodenetwork_setting_types[0]
+
+ # checks for existence - does not allow several different settings
+ conflicts = NodeNetworkSettings(self.api,
+ {'nodenetwork_id':nodenetwork['nodenetwork_id'],
+ 'nodenetwork_setting_type_id':nodenetwork_setting_type['nodenetwork_setting_type_id']})
+
+ if len(conflicts) :
+ raise PLCInvalidArgument, "Nodenetwork %d already has setting %d"%(nodenetwork['nodenetwork_id'],
+ nodenetwork_setting_type['nodenetwork_setting_type_id'])
+
+ # check permission : it not admin, is the user affiliated with the right site
+ if 'admin' not in self.caller['roles']:
+ # locate node
+ node = Nodes (self.api,[nodenetwork['node_id']])[0]
+ # locate site
+ site = Sites (self.api, [node['site_id']])[0]
+ # check caller is affiliated with this site
+ if self.caller['person_id'] not in site['person_ids']:
+ raise PLCPermissionDenied, "Not a member of the hosting site %s"%site['abbreviated_site']
+
+ required_min_role = nodenetwork_setting_type ['min_role_id']
+ if required_min_role is not None and \
+ min(self.caller['role_ids']) > required_min_role:
+ raise PLCPermissionDenied, "Not allowed to modify the specified nodenetwork setting, requires role %d",required_min_role
+
+ nodenetwork_setting = NodeNetworkSetting(self.api)
+ nodenetwork_setting['nodenetwork_id'] = nodenetwork['nodenetwork_id']
+ nodenetwork_setting['nodenetwork_setting_type_id'] = nodenetwork_setting_type['nodenetwork_setting_type_id']
+ nodenetwork_setting['value'] = value
+
+ nodenetwork_setting.sync()
+ self.object_ids = [nodenetwork_setting['nodenetwork_setting_id']]
+
+ return nodenetwork_setting['nodenetwork_setting_id']
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 88 $
+#
+
+
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.NodeNetworkSettingTypes import NodeNetworkSettingType, NodeNetworkSettingTypes
+from PLC.Auth import Auth
+
+can_update = lambda (field, value): field in \
+ ['name', 'description', 'category', 'min_role_id']
+
+class AddNodeNetworkSettingType(Method):
+ """
+ Adds a new type of nodenetwork setting.
+ Any fields specified are used, otherwise defaults are used.
+
+ Returns the new nodenetwork_setting_id (> 0) if successful,
+ faults otherwise.
+ """
+
+ roles = ['admin']
+
+ nodenetwork_setting_type_fields = dict(filter(can_update, NodeNetworkSettingType.fields.items()))
+
+ accepts = [
+ Auth(),
+ nodenetwork_setting_type_fields
+ ]
+
+ returns = Parameter(int, 'New nodenetwork_setting_id (> 0) if successful')
+
+
+ def call(self, auth, nodenetwork_setting_type_fields):
+ nodenetwork_setting_type_fields = dict(filter(can_update, nodenetwork_setting_type_fields.items()))
+ nodenetwork_setting_type = NodeNetworkSettingType(self.api, nodenetwork_setting_type_fields)
+ nodenetwork_setting_type.sync()
+
+ self.object_ids = [nodenetwork_setting_type['nodenetwork_setting_type_id']]
+
+ return nodenetwork_setting_type['nodenetwork_setting_type_id']
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 90 $
+#
+
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+
+from PLC.NodeNetworkSettings import NodeNetworkSetting, NodeNetworkSettings
+from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+
+from PLC.Nodes import Node, Nodes
+from PLC.Sites import Site, Sites
+
+class DeleteNodeNetworkSetting(Method):
+ """
+ Deletes the specified nodenetwork setting
+
+ Attributes may require the caller to have a particular role in order
+ to be deleted, depending on the related nodenetwork setting type.
+ Admins may delete attributes of any slice or sliver.
+
+ Returns 1 if successful, faults otherwise.
+ """
+
+ roles = ['admin', 'pi', 'user']
+
+ accepts = [
+ Auth(),
+ NodeNetworkSetting.fields['nodenetwork_setting_id']
+ ]
+
+ returns = Parameter(int, '1 if successful')
+
+ object_type = 'NodeNetwork'
+
+
+ def call(self, auth, nodenetwork_setting_id):
+ nodenetwork_settings = NodeNetworkSettings(self.api, [nodenetwork_setting_id])
+ if not nodenetwork_settings:
+ raise PLCInvalidArgument, "No such nodenetwork setting %r"%nodenetwork_setting_id
+ nodenetwork_setting = nodenetwork_settings[0]
+
+ ### reproducing a check from UpdateSliceAttribute, looks dumb though
+ nodenetworks = NodeNetworks(self.api, [nodenetwork_setting['nodenetwork_id']])
+ if not nodenetworks:
+ raise PLCInvalidArgument, "No such nodenetwork %r"%nodenetwork_setting['nodenetwork_id']
+ nodenetwork = nodenetworks[0]
+
+ assert nodenetwork_setting['nodenetwork_setting_id'] in nodenetwork['nodenetwork_setting_ids']
+
+ # check permission : it not admin, is the user affiliated with the right site
+ if 'admin' not in self.caller['roles']:
+ # locate node
+ node = Nodes (self.api,[nodenetwork['node_id']])[0]
+ # locate site
+ site = Sites (self.api, [node['site_id']])[0]
+ # check caller is affiliated with this site
+ if self.caller['person_id'] not in site['person_ids']:
+ raise PLCPermissionDenied, "Not a member of the hosting site %s"%site['abbreviated_site']
+
+ required_min_role = nodenetwork_setting_type ['min_role_id']
+ if required_min_role is not None and \
+ min(self.caller['role_ids']) > required_min_role:
+ raise PLCPermissionDenied, "Not allowed to modify the specified nodenetwork setting, requires role %d",required_min_role
+
+ nodenetwork_setting.delete()
+ self.object_ids = [nodenetwork_setting['nodenetwork_setting_id']]
+
+ return 1
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 88 $
+#
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.NodeNetworkSettingTypes import NodeNetworkSettingType, NodeNetworkSettingTypes
+from PLC.Auth import Auth
+
+class DeleteNodeNetworkSettingType(Method):
+ """
+ Deletes the specified nodenetwork setting type.
+
+ Returns 1 if successful, faults otherwise.
+ """
+
+ roles = ['admin']
+
+ accepts = [
+ Auth(),
+ Mixed(NodeNetworkSettingType.fields['nodenetwork_setting_type_id'],
+ NodeNetworkSettingType.fields['name']),
+ ]
+
+ returns = Parameter(int, '1 if successful')
+
+
+ def call(self, auth, nodenetwork_setting_type_id_or_name):
+ nodenetwork_setting_types = NodeNetworkSettingTypes(self.api, [nodenetwork_setting_type_id_or_name])
+ if not nodenetwork_setting_types:
+ raise PLCInvalidArgument, "No such nodenetwork settnig type"
+ nodenetwork_setting_type = nodenetwork_setting_types[0]
+
+ nodenetwork_setting_type.delete()
+ self.object_ids = [nodenetwork_setting_type['nodenetwork_setting_type_id']]
+
+ return 1
--- /dev/null
+import random
+import base64
+import os
+import os.path
+
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+
+from PLC.Nodes import Node, Nodes
+from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+from PLC.NodeNetworkSettings import NodeNetworkSetting, NodeNetworkSettings
+
+#
+# xxx todo
+# Thierry on june 5 2007
+#
+# it turns out that having either apache (when invoked through xmlrpc)
+# or root (when running plcsh directly) run this piece of code is
+# problematic. In fact although we try to create intermediate dirs
+# with mode 777, what happens is that root's umask in the plc chroot
+# jail is set to 0022.
+#
+# the bottom line is, depending on who (apache or root) runs this for
+# the first time, we can access denied issued (when root comes first)
+# so probably we'd better implement a scheme where files are stored
+# directly under /var/tmp or something
+#
+# in addition the sequels of a former run (e.g. with a non-empty
+# filename) can prevent subsequent runs if the file is not properly
+# cleaned up after use, which is generally the case if someone invokes
+# this through plcsh and does not clean up
+# so maybe a dedicated cleanup method could be useful just in case
+#
+
+# could not define this in the class..
+boot_medium_actions = [ 'node-preview',
+ 'node-floppy',
+ 'node-iso',
+ 'node-usb',
+ 'generic-iso',
+ 'generic-usb',
+ ]
+
+class GetBootMedium(Method):
+ """
+ This method is a redesign based on former, supposedly dedicated,
+ AdmGenerateNodeConfFile
+
+ As compared with its ancestor, this method provides a much more detailed
+ detailed interface, that allows to
+ (*) either just preview the node config file (in which case
+ the node key is NOT recomputed, and NOT provided in the output
+ (*) or regenerate the node config file for storage on a floppy
+ that is, exactly what the ancestor method used todo,
+ including renewing the node's key
+ (*) or regenerate the config file and bundle it inside an ISO or USB image
+ (*) or just provide the generic ISO or USB boot images
+ in which case of course the node_id_or_hostname parameter is not used
+
+ action is expected among the following string constants
+ (*) node-preview
+ (*) node-floppy
+ (*) node-iso
+ (*) node-usb
+ (*) generic-iso
+ (*) generic-usb
+
+ Apart for the preview mode, this method generates a new node key for the
+ specified node, effectively invalidating any old boot medium.
+
+ Non-admins can only generate files for nodes at their sites.
+
+ In addition, two return mechanisms are supported.
+ (*) The default behaviour is that the file's content is returned as a
+ base64-encoded string. This is how the ancestor method used to work.
+ To use this method, pass an empty string as the file parameter.
+
+ (*) Or, for efficiency -- this makes sense only when the API is used
+ by the web pages that run on the same host -- the caller may provide
+ a filename, in which case the resulting file is stored in that location instead.
+ The filename argument can use the following markers, that are expanded
+ within the method
+ - %d : default root dir (some builtin dedicated area under /var/tmp/)
+ Using this is recommended, and enforced for non-admin users
+ - %n : the node's name when this makes sense, or a mktemp-like name when
+ generic media is requested
+ - %s : a file suffix appropriate in the context (.txt, .iso or the like)
+ - %v : the bootcd version string (e.g. 4.0)
+ - %p : the PLC name
+ With the file-based return mechanism, the method returns the full pathname
+ of the result file; it is the caller's responsability to remove
+ this file after use.
+
+ Security:
+ When the user's role is not admin, the provided directory *must* be under
+ the %d area
+
+ Housekeeping:
+ Whenever needed, the method stores intermediate files in a
+ private area, typically not located under the web server's
+ accessible area, and are cleaned up by the method.
+
+ """
+
+ roles = ['admin', 'pi', 'tech']
+
+ accepts = [
+ Auth(),
+ Mixed(Node.fields['node_id'],
+ Node.fields['hostname']),
+ Parameter (str, "Action mode, expected in " + "|".join(boot_medium_actions)),
+ Parameter (str, "Empty string for verbatim result, resulting file full path otherwise"),
+ ]
+
+ returns = Parameter(str, "Node boot medium, either inlined, or filename, depending to the filename parameter")
+
+ BOOTCDDIR = "/usr/share/bootcd/"
+ BOOTCUSTOM = "/usr/share/bootcd/bootcustom.sh"
+ GENERICDIR = "/var/www/html/download/"
+ NODEDIR = "/var/tmp/bootmedium/results"
+ WORKDIR = "/var/tmp/bootmedium/work"
+ DEBUG = False
+ # uncomment this to preserve temporary area and bootcustom logs
+ #DEBUG = True
+
+ ### returns (host, domain) :
+ # 'host' : host part of the hostname
+ # 'domain' : domain part of the hostname
+ def split_hostname (self, node):
+ # Split hostname into host and domain parts
+ parts = node['hostname'].split(".", 1)
+ if len(parts) < 2:
+ raise PLCInvalidArgument, "Node hostname %s is invalid"%node['hostname']
+ return parts
+
+ # plnode.txt content
+ def floppy_contents (self, node, renew_key):
+
+ if node['peer_id'] is not None:
+ raise PLCInvalidArgument, "Not a local node"
+
+ # If we are not an admin, make sure that the caller is a
+ # member of the site at which the node is located.
+ if 'admin' not in self.caller['roles']:
+ if node['site_id'] not in self.caller['site_ids']:
+ raise PLCPermissionDenied, "Not allowed to generate a configuration file for %s"%node['hostname']
+
+ # Get node networks for this node
+ primary = None
+ nodenetworks = NodeNetworks(self.api, node['nodenetwork_ids'])
+ for nodenetwork in nodenetworks:
+ if nodenetwork['is_primary']:
+ primary = nodenetwork
+ break
+ if primary is None:
+ raise PLCInvalidArgument, "No primary network configured on %s"%node['hostname']
+
+ ( host, domain ) = self.split_hostname (node)
+
+ if renew_key:
+ # Generate 32 random bytes
+ bytes = random.sample(xrange(0, 256), 32)
+ # Base64 encode their string representation
+ node['key'] = base64.b64encode("".join(map(chr, bytes)))
+ # XXX Boot Manager cannot handle = in the key
+ node['key'] = node['key'].replace("=", "")
+ # Save it
+ node.sync()
+
+ # Generate node configuration file suitable for BootCD
+ file = ""
+
+ if renew_key:
+ file += 'NODE_ID="%d"\n' % node['node_id']
+ file += 'NODE_KEY="%s"\n' % node['key']
+
+ if primary['mac']:
+ file += 'NET_DEVICE="%s"\n' % primary['mac'].lower()
+
+ file += 'IP_METHOD="%s"\n' % primary['method']
+
+ if primary['method'] == 'static':
+ file += 'IP_ADDRESS="%s"\n' % primary['ip']
+ file += 'IP_GATEWAY="%s"\n' % primary['gateway']
+ file += 'IP_NETMASK="%s"\n' % primary['netmask']
+ file += 'IP_NETADDR="%s"\n' % primary['network']
+ file += 'IP_BROADCASTADDR="%s"\n' % primary['broadcast']
+ file += 'IP_DNS1="%s"\n' % primary['dns1']
+ file += 'IP_DNS2="%s"\n' % (primary['dns2'] or "")
+
+ file += 'HOST_NAME="%s"\n' % host
+ file += 'DOMAIN_NAME="%s"\n' % domain
+
+ # define various nodenetwork settings attached to the primary nodenetwork
+ settings = NodeNetworkSettings (self.api, {'nodenetwork_id':nodenetwork['nodenetwork_id']})
+
+ categories = set()
+ for setting in settings:
+ if setting['category'] is not None:
+ categories.add(setting['category'])
+
+ for category in categories:
+ category_settings = NodeNetworkSettings(self.api,{'nodenetwork_id':nodenetwork['nodenetwork_id'],
+ 'category':category})
+ if category_settings:
+ file += '### Category : %s\n'%category
+ for setting in category_settings:
+ file += '%s_%s="%s"\n'%(category.upper(),setting['name'].upper(),setting['value'])
+
+ for nodenetwork in nodenetworks:
+ if nodenetwork['method'] == 'ipmi':
+ file += 'IPMI_ADDRESS="%s"\n' % nodenetwork['ip']
+ if nodenetwork['mac']:
+ file += 'IPMI_MAC="%s"\n' % nodenetwork['mac'].lower()
+ break
+
+ return file
+
+ def bootcd_version (self):
+ try:
+ f = open (self.BOOTCDDIR + "/build/version.txt")
+ version=f.readline().strip()
+ finally:
+ f.close()
+ return version
+
+ def cleandir (self,tempdir):
+ if not self.DEBUG:
+ os.system("rm -rf %s"%tempdir)
+
+ def call(self, auth, node_id_or_hostname, action, filename):
+
+ ### check action
+ if action not in boot_medium_actions:
+ raise PLCInvalidArgument, "Unknown action %s"%action
+
+ ### compute file suffix
+ if action.find("-iso") >= 0 :
+ suffix=".iso"
+ elif action.find("-usb") >= 0:
+ suffix=".usb"
+ else:
+ suffix=".txt"
+
+ ### compute a 8 bytes random number
+ tempbytes = random.sample (xrange(0,256), 8);
+ def hexa2 (c):
+ return chr((c>>4)+65) + chr ((c&16)+65)
+ temp = "".join(map(hexa2,tempbytes))
+
+ ### check node if needed
+ if action.find("node-") == 0:
+ nodes = Nodes(self.api, [node_id_or_hostname])
+ if not nodes:
+ raise PLCInvalidArgument, "No such node %r"%node_id_or_hostname
+ node = nodes[0]
+ nodename = node['hostname']
+
+ else:
+ node = None
+ nodename = temp
+
+ ### handle filename
+ filename = filename.replace ("%d",self.NODEDIR)
+ filename = filename.replace ("%n",nodename)
+ filename = filename.replace ("%s",suffix)
+ filename = filename.replace ("%p",self.api.config.PLC_NAME)
+ # only if filename contains "%v", bootcd is maybe not avail ?
+ if filename.find("%v") >=0:
+ filename = filename.replace ("%v",self.bootcd_version())
+
+ ### Check filename location
+ if filename != '':
+ if 'admin' not in self.caller['roles']:
+ if ( filename.index(self.NODEDIR) != 0):
+ raise PLCInvalidArgument, "File %s not under %s"%(filename,self.NODEDIR)
+
+ ### output should not exist (concurrent runs ..)
+ if os.path.exists(filename):
+ raise PLCInvalidArgument, "Resulting file %s already exists"%filename
+
+ ### we can now safely create the file,
+ ### either we are admin or under a controlled location
+ if not os.path.exists(os.path.dirname(filename)):
+ try:
+ os.makedirs (os.path.dirname(filename),0777)
+ except:
+ raise PLCPermissionDenied, "Could not create dir %s"%os.path.dirname(filename)
+
+
+ ### generic media
+ if action == 'generic-iso' or action == 'generic-usb':
+ # this raises an exception if bootcd is missing
+ version = self.bootcd_version()
+ generic_name = "%s-BootCD-%s%s"%(self.api.config.PLC_NAME,
+ version,
+ suffix)
+ generic_path = "%s/%s" % (self.GENERICDIR,generic_name)
+
+ if filename:
+ ret=os.system ("cp %s %s"%(generic_path,filename))
+ if ret==0:
+ return filename
+ else:
+ raise PLCPermissionDenied, "Could not copy %s into"%(generic_path,filename)
+ else:
+ ### return the generic medium content as-is, just base64 encoded
+ return base64.b64encode(file(generic_path).read())
+
+ ### floppy preview
+ if action == 'node-preview':
+ floppy = self.floppy_contents (node,False)
+ if filename:
+ try:
+ file(filename,'w').write(floppy)
+ except:
+ raise PLCPermissionDenied, "Could not write into %s"%filename
+ return filename
+ else:
+ return floppy
+
+ if action == 'node-floppy':
+ floppy = self.floppy_contents (node,True)
+ if filename:
+ try:
+ file(filename,'w').write(floppy)
+ except:
+ raise PLCPermissionDenied, "Could not write into %s"%filename
+ return filename
+ else:
+ return floppy
+
+ ### we're left with node-iso and node-usb
+ if action == 'node-iso' or action == 'node-usb':
+
+ ### check we've got required material
+ version = self.bootcd_version()
+ generic_name = "%s-BootCD-%s%s"%(self.api.config.PLC_NAME,
+ version,
+ suffix)
+ generic_path = "%s/%s" % (self.GENERICDIR,generic_name)
+ if not os.path.isfile(generic_path):
+ raise PLCAPIError, "Cannot locate generic medium %s"%generic_path
+
+ if not os.path.isfile(self.BOOTCUSTOM):
+ raise PLCAPIError, "Cannot locate bootcustom script %s"%self.BOOTCUSTOM
+
+ # need a temporary area
+ tempdir = "%s/%s"%(self.WORKDIR,nodename)
+ if not os.path.isdir(tempdir):
+ try:
+ os.makedirs(tempdir,0777)
+ except:
+ raise PLCPermissionDenied, "Could not create dir %s"%tempdir
+
+ try:
+ # generate floppy config
+ floppy = self.floppy_contents(node,True)
+ # store it
+ node_floppy = "%s/%s"%(tempdir,nodename)
+ try:
+ file(node_floppy,"w").write(floppy)
+ except:
+ raise PLCPermissionDenied, "Could not write into %s"%node_floppy
+
+ # invoke bootcustom
+ bootcustom_command = 'sudo %s -C "%s" "%s" "%s"'%(self.BOOTCUSTOM,
+ tempdir,
+ generic_path,
+ node_floppy)
+ if self.DEBUG:
+ print 'bootcustom command:',bootcustom_command
+ ret=os.system(bootcustom_command)
+ if ret != 0:
+ raise PLCPermissionDenied,"bootcustom.sh failed to create node-specific medium"
+
+ node_image = "%s/%s%s"%(tempdir,nodename,suffix)
+ if not os.path.isfile (node_image):
+ raise PLCAPIError,"Unexpected location of bootcustom output - %s"%node_image
+
+ # cache result
+ if filename:
+ ret=os.system("mv %s %s"%(node_image,filename))
+ if ret != 0:
+ raise PLCAPIError, "Could not move node image %s into %s"%(node_image,filename)
+ self.cleandir(tempdir)
+ return filename
+ else:
+ result = file(node_image).read()
+ self.cleandir(tempdir)
+ return base64.b64encode(result)
+ except:
+ self.cleandir(tempdir)
+ raise
+
+ # we're done here, or we missed something
+ raise PLCAPIError,'Unhandled action %s'%action
+
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 88 $
+#
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Auth import Auth
+from PLC.NodeNetworkSettingTypes import NodeNetworkSettingType, NodeNetworkSettingTypes
+
+class GetNodeNetworkSettingTypes(Method):
+ """
+ Returns an array of structs containing details about
+ nodenetwork setting types.
+
+ The usual filtering scheme applies on this method.
+ """
+
+ roles = ['admin', 'pi', 'user', 'tech', 'node']
+
+ accepts = [
+ Auth(),
+ Mixed([Mixed(NodeNetworkSettingType.fields['nodenetwork_setting_type_id'],
+ NodeNetworkSettingType.fields['name'])],
+ Filter(NodeNetworkSettingType.fields)),
+ Parameter([str], "List of fields to return", nullok = True)
+ ]
+
+ returns = [NodeNetworkSettingType.fields]
+
+ def call(self, auth, nodenetwork_setting_type_filter = None, return_fields = None):
+ return NodeNetworkSettingTypes(self.api, nodenetwork_setting_type_filter, return_fields)
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 88 $
+#
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Persons import Person, Persons
+from PLC.Auth import Auth
+
+from PLC.NodeNetworkSettings import NodeNetworkSetting, NodeNetworkSettings
+from PLC.Sites import Site, Sites
+from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+
+class GetNodeNetworkSettings(Method):
+ """
+ Returns an array of structs containing details about
+ nodenetworks and related settings.
+
+ If nodenetwork_setting_filter is specified and is an array of
+ nodenetwork setting identifiers, only nodenetwork settings matching
+ the filter will be returned. If return_fields is specified, only
+ the specified details will be returned.
+ """
+
+ roles = ['admin', 'pi', 'user', 'node']
+
+ accepts = [
+ Auth(),
+ Mixed([NodeNetworkSetting.fields['nodenetwork_setting_id']],
+ Parameter(int,"Nodenetwork setting id"),
+ Filter(NodeNetworkSetting.fields)),
+ Parameter([str], "List of fields to return", nullok = True)
+ ]
+
+ returns = [NodeNetworkSetting.fields]
+
+
+ def call(self, auth, nodenetwork_setting_filter = None, return_fields = None):
+
+ nodenetwork_settings = NodeNetworkSettings(self.api, nodenetwork_setting_filter, return_fields)
+
+ return nodenetwork_settings
--- /dev/null
+from PLC.Method import Method
+from PLC.Auth import Auth
+from PLC.Faults import *
+
+import re
+
+comment_regexp = '\A\s*#.|\A\s*\Z|\Axxxxx'
+
+regexps = { 'build' : '\A[bB]uild\s+(?P<key>[^:]+)\s*:\s*(?P<value>.*)\Z',
+ 'tags' : '\A(?P<key>[^:]+)\s*:=\s*(?P<value>.*)\Z'}
+
+class GetPlcRelease(Method):
+ """
+ Returns various information about the current myplc installation.
+ """
+
+ roles = ['admin', 'pi', 'user', 'tech', 'node', 'anonymous']
+
+ accepts = [
+ Auth(),
+ ]
+
+ # for now only return /etc/myplc-release verbatim
+ returns = { 'build' : 'information about the build',
+ 'tags' : 'describes the codebase location and tags used for building',
+ 'rpms' : 'details the rpm installed in the myplc chroot jail' }
+
+ def call(self, auth):
+
+ comment_matcher = re.compile(comment_regexp)
+
+ matchers = {}
+ result = {}
+ for field in regexps.keys():
+ matchers[field] = re.compile(regexps[field])
+ result[field]={}
+ result['rpms']="Not implemented yet"
+
+ try:
+ release = open('/etc/myplc-release')
+ for line in release.readlines():
+ line=line.strip()
+ if comment_matcher.match(line):
+ continue
+ for field in regexps.keys():
+ m=matchers[field].match(line)
+ if m:
+ (key,value)=m.groups(['key','value'])
+ result[field][key]=value
+ break
+ else:
+ if not result.has_key('unexpected'):
+ result['unexpected']=""
+ result['unexpected'] += (line+"\n")
+ except:
+ raise PLCNotImplemented, 'Cannot open /etc/myplc-release'
+ return result
--- /dev/null
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Auth import Auth
+from PLC.Persons import Person, Persons
+from PLC.Sites import Site, Sites
+from PLC.Slices import Slice, Slices
+from PLC.Keys import Key, Keys
+
+class GetSliceKeys(Method):
+ """
+ Returns an array of structs containing public key info for users in
+ the specified slices. If slice_filter is specified and is an array
+ of slice identifiers or slice names, or a struct of slice
+ attributes, only slices matching the filter will be returned. If
+ return_fields is specified, only the specified details will be
+ returned.
+
+ Users may only query slices of which they are members. PIs may
+ query any of the slices at their sites. Admins and nodes may query
+ any slice. If a slice that cannot be queried is specified in
+ slice_filter, details about that slice will not be returned.
+ """
+
+ roles = ['admin', 'pi', 'user', 'node']
+
+ accepts = [
+ Auth(),
+ Mixed([Mixed(Slice.fields['slice_id'],
+ Slice.fields['name'])],
+ Filter(Slice.fields)),
+ Parameter([str], "List of fields to return", nullok = True)
+ ]
+
+ returns = [
+ {
+ 'slice_id': Slice.fields['slice_id'],
+ 'name': Slice.fields['name'],
+ 'person_id': Person.fields['person_id'],
+ 'email': Person.fields['email'],
+ 'key': Key.fields['key']
+ }]
+
+ def call(self, auth, slice_filter = None, return_fields = None):
+ slice_fields = ['slice_id', 'name']
+ person_fields = ['person_id', 'email']
+ key_fields = ['key']
+
+ # If we are not admin, make sure to return only viewable
+ # slices.
+ if isinstance(self.caller, Person) and \
+ 'admin' not in self.caller['roles']:
+ # Get slices that we are able to view
+ valid_slice_ids = self.caller['slice_ids']
+ if 'pi' in self.caller['roles'] and self.caller['site_ids']:
+ sites = Sites(self.api, self.caller['site_ids'])
+ for site in sites:
+ valid_slice_ids += site['slice_ids']
+
+ if not valid_slice_ids:
+ return []
+
+ if slice_filter is None:
+ slice_filter = valid_slice_ids
+
+ if return_fields:
+ slice_return_fields = filter(lambda field: field in slice_fields, return_fields)
+ person_return_fields = filter(lambda field: field in person_fields, return_fields)
+ key_return_fields = filter(lambda field: field in key_fields, return_fields)
+ else:
+ slice_return_fields = slice_fields
+ person_return_fields = person_fields
+ key_return_fields = key_fields
+
+ # Must query at least Slice.slice_id, Slice.person_ids,
+ # and Person.person_id and Person.key_ids so we can join data correctly
+ slice_added_fields = set(['slice_id', 'person_ids']).difference(slice_return_fields)
+ slice_return_fields += slice_added_fields
+ person_added_fields = set(['person_id', 'key_ids']).difference(person_return_fields)
+ person_return_fields += person_added_fields
+ key_added_fields = set(['key_id']).difference(key_return_fields)
+ key_return_fields += key_added_fields
+
+ # Get the slices
+ all_slices = Slices(self.api, slice_filter, slice_return_fields).dict('slice_id')
+ slice_ids = all_slices.keys()
+ slices = all_slices.values()
+
+ # Filter out slices that are not viewable
+ if isinstance(self.caller, Person) and \
+ 'admin' not in self.caller['roles']:
+ slices = filter(lambda slice: slice['slice_id'] in valid_slice_ids, slices)
+
+ # Get the persons
+ person_ids = set()
+ for slice in slices:
+ person_ids.update(slice['person_ids'])
+
+ all_persons = Persons(self.api, list(person_ids), person_return_fields).dict('person_id')
+ person_ids = all_persons.keys()
+ persons = all_persons.values()
+
+ # Get the keys
+ key_ids = set()
+ for person in persons:
+ key_ids.update(person['key_ids'])
+
+ all_keys = Keys(self.api, list(key_ids), key_return_fields).dict('key_id')
+ key_ids = all_keys.keys()
+ keys = all_keys.values()
+
+ # Create slice_keys list
+ slice_keys = []
+ slice_fields = list(set(slice_return_fields).difference(slice_added_fields))
+ person_fields = list(set(person_return_fields).difference(person_added_fields))
+ key_fields = list(set(key_return_fields).difference(key_added_fields))
+
+ for slice in slices:
+ slice_key = dict.fromkeys(slice_fields + person_fields + key_fields)
+ if not slice['person_ids']:
+ continue
+ for person_id in slice['person_ids']:
+ person = all_persons[person_id]
+ if not person['key_ids']:
+ continue
+ for key_id in person['key_ids']:
+ key = all_keys[key_id]
+ slice_key.update(dict(filter(lambda (k, v): k in slice_fields, slice.items())))
+ slice_key.update(dict(filter(lambda (k, v): k in person_fields, person.items())))
+ slice_key.update(dict(filter(lambda (k, v): k in key_fields, key.items())))
+ slice_keys.append(slice_key.copy())
+
+ return slice_keys
+
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 90 $
+#
+
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+
+from PLC.NodeNetworkSettings import NodeNetworkSetting, NodeNetworkSettings
+from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+
+from PLC.Nodes import Nodes
+from PLC.Sites import Sites
+
+class UpdateNodeNetworkSetting(Method):
+ """
+ Updates the value of an existing nodenetwork setting
+
+ Access rights depend on the nodenetwork setting type.
+
+ Returns 1 if successful, faults otherwise.
+ """
+
+ roles = ['admin', 'pi', 'tech', 'user']
+
+ accepts = [
+ Auth(),
+ NodeNetworkSetting.fields['nodenetwork_setting_id'],
+ NodeNetworkSetting.fields['value']
+ ]
+
+ returns = Parameter(int, '1 if successful')
+
+ object_type = 'NodeNetwork'
+
+ def call(self, auth, nodenetwork_setting_id, value):
+ nodenetwork_settings = NodeNetworkSettings(self.api, [nodenetwork_setting_id])
+ if not nodenetwork_settings:
+ raise PLCInvalidArgument, "No such nodenetwork setting %r"%nodenetwork_setting_id
+ nodenetwork_setting = nodenetwork_settings[0]
+
+ ### reproducing a check from UpdateSliceAttribute, looks dumb though
+ nodenetworks = NodeNetworks(self.api, [nodenetwork_setting['nodenetwork_id']])
+ if not nodenetworks:
+ raise PLCInvalidArgument, "No such nodenetwork %r"%nodenetwork_setting['nodenetwork_id']
+ nodenetwork = nodenetworks[0]
+
+ assert nodenetwork_setting['nodenetwork_setting_id'] in nodenetwork['nodenetwork_setting_ids']
+
+ # check permission : it not admin, is the user affiliated with the right site
+ if 'admin' not in self.caller['roles']:
+ # locate node
+ node = Nodes (self.api,[nodenetwork['node_id']])[0]
+ # locate site
+ site = Sites (self.api, [node['site_id']])[0]
+ # check caller is affiliated with this site
+ if self.caller['person_id'] not in site['person_ids']:
+ raise PLCPermissionDenied, "Not a member of the hosting site %s"%site['abbreviated_site']
+
+ required_min_role = nodenetwork_setting_type ['min_role_id']
+ if required_min_role is not None and \
+ min(self.caller['role_ids']) > required_min_role:
+ raise PLCPermissionDenied, "Not allowed to modify the specified nodenetwork setting, requires role %d",required_min_role
+
+ nodenetwork_setting['value'] = value
+ nodenetwork_setting.sync()
+
+ self.object_ids = [nodenetwork_setting['nodenetwork_setting_id']]
+ return 1
--- /dev/null
+#
+# Thierry Parmentelat - INRIA
+#
+# $Revision: 88 $
+#
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.NodeNetworkSettingTypes import NodeNetworkSettingType, NodeNetworkSettingTypes
+from PLC.Auth import Auth
+
+can_update = lambda (field, value): field in \
+ ['name', 'description', 'category', 'min_role_id']
+
+class UpdateNodeNetworkSettingType(Method):
+ """
+ Updates the parameters of an existing setting type
+ with the values in nodenetwork_setting_type_fields.
+
+ Returns 1 if successful, faults otherwise.
+ """
+
+ roles = ['admin']
+
+ nodenetwork_setting_type_fields = dict(filter(can_update, NodeNetworkSettingType.fields.items()))
+
+ accepts = [
+ Auth(),
+ Mixed(NodeNetworkSettingType.fields['nodenetwork_setting_type_id'],
+ NodeNetworkSettingType.fields['name']),
+ nodenetwork_setting_type_fields
+ ]
+
+ returns = Parameter(int, '1 if successful')
+
+ def call(self, auth, nodenetwork_setting_type_id_or_name, nodenetwork_setting_type_fields):
+ nodenetwork_setting_type_fields = dict(filter(can_update, nodenetwork_setting_type_fields.items()))
+
+ nodenetwork_setting_types = NodeNetworkSettingTypes(self.api, [nodenetwork_setting_type_id_or_name])
+ if not nodenetwork_setting_types:
+ raise PLCInvalidArgument, "No such setting type"
+ nodenetwork_setting_type = nodenetwork_setting_types[0]
+
+ nodenetwork_setting_type.update(nodenetwork_setting_type_fields)
+ nodenetwork_setting_type.sync()
+ self.object_ids = [nodenetwork_setting_type['nodenetwork_setting_type_id']]
+
+ return 1
--- /dev/null
+-- IMPORTANT NOTICE
+--
+-- this down script is provided for convenience only
+-- DO NOT USE on an operational site
+-- the change in migration 003 involves creating
+-- the new view view_nodenetworks for fetching instances
+-- of NodeNetworks
+-- AND to alter NodeNetworks.py accordingly
+-- so this change cannot be easily undone
+-- unless you also revert the API itself
+
+DROP VIEW view_nodenetworks;
+
+DROP VIEW view_nodenetwork_settings;
+
+DROP VIEW nodenetwork_settings;
+
+DROP TABLE nodenetwork_setting;
+
+DROP TABLE nodenetwork_setting_types;
+
+-- deflate subversion
+UPDATE plc_db_version SET subversion = 2;
+SELECT subversion from plc_db_version;
--- /dev/null
+#!/usr/bin/env plcsh
+
+nnst = GetNodeNetworkSettingTypes(['interface_name'])
+if nnst:
+ print 'NodeNetworkSettingType interface_name already defined'
+else:
+ AddNodeNetworkSettingType({
+ 'category':'general',
+ 'min_role_id':30,
+ 'name':'interface_name',
+ 'description':'allows to specify a custom interface name'
+ })
+nnst_ifname_id = GetNodeNetworkSettingTypes(['interface_name'])[0]['nodenetwork_setting_type_id']
+
+
+nnst = GetNodeNetworkSettingTypes(['ssid'])
+if nnst:
+ print 'NodeNetworkSettingType ssid already defined'
+else:
+ AddNodeNetworkSettingType({
+ 'category':'wifi',
+ 'min_role_id':30,
+ 'name':'ssid',
+ 'description':'allows to set ESSID'
+ })
+nnst_ssid_id = GetNodeNetworkSettingTypes(['ssid'])[0]['nodenetwork_setting_type_id']
+
+nodename = 'onelab2.inria.fr'
+
+nodenetwork_id=GetNodes(nodename)[0]['nodenetwork_ids'][0]
+
+#######
+nns_ifname = GetNodeNetworkSettings ({'nodenetwork_id':nodenetwork_id,
+ 'nodenetwork_setting_type_id':nnst_ifname_id})
+if nns_ifname:
+ print "interface name for %s already set (got %s - cat=%s)" %\
+ (nodename,nns_ifname[0]['value'],nns_ifname[0]['category'])
+else:
+ AddNodeNetworkSetting(nodenetwork_id, 'interface_name', 'custom-eth0')
+
+nns_ifname_id = GetNodeNetworkSettings ({'nodenetwork_id':nodenetwork_id,
+ 'nodenetwork_setting_type_id':nnst_ifname_id})[0]['nodenetwork_setting_id']
+#######
+nns_ssid = GetNodeNetworkSettings ({'nodenetwork_id':nodenetwork_id,
+ 'nodenetwork_setting_type_id':nnst_ssid_id})
+if nns_ssid:
+ print "ssid for %s already set (got %s - cat=%s)" %\
+ (nodename,nns_ifname[0]['value'],nns_ifname[0]['category'])
+else:
+ AddNodeNetworkSetting(nodenetwork_id, 'ssid', 'init-onelab-g')
+
+nns_ssid_id = GetNodeNetworkSettings ({'nodenetwork_id':nodenetwork_id,
+ 'nodenetwork_setting_type_id':nnst_ssid_id})[0]['nodenetwork_setting_id']
+
+#######
+
+UpdateNodeNetworkSetting (nns_ssid_id,'onelab-g')
+
+DeleteNodeNetworkSetting (nns_ifname_id)
+
--- /dev/null
+--
+-- Thierry Parmentelat -- INRIA
+--
+-- migration 003
+--
+-- purpose : provide a generic mechanism for assigning
+-- nodenetworks (read, network interfaces) with
+-- custom settings
+--
+-- design
+-- mimicks the way slice attributes are being handled,
+-- not that this design is particularly attractive
+-- but let's not add confusion here
+-- i.e:
+-- (*) nodenetwork_setting_types (see slice_attribute_types)
+-- allows to define a new setting
+-- e.g, define one such object for storing wifi SSID
+--
+-- (*) nodenetwork_setting (see slice_attribute)
+-- allow to associate a nodenetwork, a nodenetwork_setting_type, and a value
+--
+-- NOTE. with slice_attributes there is a trick that allows to define
+-- the attribute either on the slice globally or on a particular node only.
+-- of course we do not need such a trick
+
+CREATE TABLE nodenetwork_setting_types (
+ nodenetwork_setting_type_id serial PRIMARY KEY,
+ -- Setting Type Identifier
+ name text UNIQUE NOT NULL, -- Setting Name
+ description text, -- Optional Description
+ category text NOT NULL, -- Category, e.g. Wifi, or whatever
+ min_role_id integer references roles -- If set, minimal role required
+) WITH OIDS;
+
+CREATE TABLE nodenetwork_setting (
+ nodenetwork_setting_id serial PRIMARY KEY, -- Nodenetwork Setting Identifier
+ nodenetwork_id integer REFERENCES nodenetworks NOT NULL,
+ -- the nodenetwork this applies to
+ nodenetwork_setting_type_id integer REFERENCES nodenetwork_setting_types NOT NULL,
+ -- the setting type
+ value text
+) WITH OIDS;
+
+
+CREATE OR REPLACE VIEW nodenetwork_settings AS
+SELECT nodenetwork_id,
+array_accum(nodenetwork_setting_id) AS nodenetwork_setting_ids
+FROM nodenetwork_setting
+GROUP BY nodenetwork_id;
+
+CREATE OR REPLACE VIEW view_nodenetwork_settings AS
+SELECT
+nodenetwork_setting.nodenetwork_setting_id,
+nodenetwork_setting.nodenetwork_id,
+nodenetwork_setting_types.nodenetwork_setting_type_id,
+nodenetwork_setting_types.name,
+nodenetwork_setting_types.description,
+nodenetwork_setting_types.category,
+nodenetwork_setting_types.min_role_id,
+nodenetwork_setting.value
+FROM nodenetwork_setting
+INNER JOIN nodenetwork_setting_types USING (nodenetwork_setting_type_id);
+
+CREATE VIEW view_nodenetworks AS
+SELECT
+nodenetworks.nodenetwork_id,
+nodenetworks.node_id,
+nodenetworks.is_primary,
+nodenetworks.type,
+nodenetworks.method,
+nodenetworks.ip,
+nodenetworks.mac,
+nodenetworks.gateway,
+nodenetworks.network,
+nodenetworks.broadcast,
+nodenetworks.netmask,
+nodenetworks.dns1,
+nodenetworks.dns2,
+nodenetworks.bwlimit,
+nodenetworks.hostname,
+COALESCE((SELECT nodenetwork_setting_ids FROM nodenetwork_settings WHERE nodenetwork_settings.nodenetwork_id = nodenetworks.nodenetwork_id), '{}') AS nodenetwork_setting_ids
+FROM nodenetworks;
+
+-- Bump subversion
+UPDATE plc_db_version SET subversion = 3;
+SELECT subversion from plc_db_version;
--- /dev/null
+--
+-- bugfix
+-- the site_nodes should restrict to nodes where deleted is false
+--
+
+CREATE OR REPLACE VIEW site_nodes AS
+SELECT site_id,
+array_accum(node_id) AS node_ids
+FROM nodes
+WHERE deleted is false
+GROUP BY site_id;
+
+-- Bump subversion
+UPDATE plc_db_version SET subversion = 4;
+SELECT subversion from plc_db_version;
+
--- /dev/null
+-- revert migration 005
+--
+-- this is a rather complex example, so for next times, make sure that you
+-- * first restore old columns or tables
+-- * then create or replace views
+-- * and only finally drop new columns and tables
+-- otherwise the columns may refuse to get dropped if they are still used by views
+--
+
+---------- creations
+
+ALTER TABLE events ADD object_type text NOT NULL Default 'Unknown';
+
+---------- view changes
+
+-- for some reason these views require to be dropped first
+DROP VIEW view_events;
+DROP VIEW event_objects;
+DROP VIEW view_nodes;
+DROP VIEW view_sites;
+
+CREATE OR REPLACE VIEW event_objects AS
+SELECT event_id,
+array_accum(object_id) AS object_ids
+FROM event_object
+GROUP BY event_id;
+
+CREATE OR REPLACE VIEW view_events AS
+SELECT
+events.event_id,
+events.person_id,
+events.node_id,
+events.fault_code,
+events.call_name,
+events.call,
+events.object_type,
+events.message,
+events.runtime,
+CAST(date_part('epoch', events.time) AS bigint) AS time,
+COALESCE((SELECT object_ids FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_ids
+FROM events;
+
+CREATE OR REPLACE VIEW view_nodes AS
+SELECT
+nodes.node_id,
+nodes.hostname,
+nodes.site_id,
+nodes.boot_state,
+nodes.deleted,
+nodes.model,
+nodes.boot_nonce,
+nodes.version,
+nodes.ssh_rsa_key,
+nodes.key,
+CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated,
+peer_node.peer_id,
+peer_node.peer_node_id,
+COALESCE((SELECT nodenetwork_ids FROM node_nodenetworks WHERE node_nodenetworks.node_id = nodes.node_id), '{}') AS nodenetwork_ids,
+COALESCE((SELECT nodegroup_ids FROM node_nodegroups WHERE node_nodegroups.node_id = nodes.node_id), '{}') AS nodegroup_ids,
+COALESCE((SELECT slice_ids FROM node_slices WHERE node_slices.node_id = nodes.node_id), '{}') AS slice_ids,
+COALESCE((SELECT pcu_ids FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS pcu_ids,
+COALESCE((SELECT ports FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS ports,
+COALESCE((SELECT conf_file_ids FROM node_conf_files WHERE node_conf_files.node_id = nodes.node_id), '{}') AS conf_file_ids,
+node_session.session_id AS session
+FROM nodes
+LEFT JOIN peer_node USING (node_id)
+LEFT JOIN node_session USING (node_id);
+
+CREATE OR REPLACE VIEW view_sites AS
+SELECT
+sites.site_id,
+sites.login_base,
+sites.name,
+sites.abbreviated_name,
+sites.deleted,
+sites.enabled,
+sites.is_public,
+sites.max_slices,
+sites.max_slivers,
+sites.latitude,
+sites.longitude,
+sites.url,
+CAST(date_part('epoch', sites.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', sites.last_updated) AS bigint) AS last_updated,
+peer_site.peer_id,
+peer_site.peer_site_id,
+COALESCE((SELECT person_ids FROM site_persons WHERE site_persons.site_id = sites.site_id), '{}') AS person_ids,
+COALESCE((SELECT node_ids FROM site_nodes WHERE site_nodes.site_id = sites.site_id), '{}') AS node_ids,
+COALESCE((SELECT address_ids FROM site_addresses WHERE site_addresses.site_id = sites.site_id), '{}') AS address_ids,
+COALESCE((SELECT slice_ids FROM site_slices WHERE site_slices.site_id = sites.site_id), '{}') AS slice_ids,
+COALESCE((SELECT pcu_ids FROM site_pcus WHERE site_pcus.site_id = sites.site_id), '{}') AS pcu_ids
+FROM sites
+LEFT JOIN peer_site USING (site_id);
+
+---------- deletions
+
+ALTER TABLE sites DROP COLUMN ext_consortium_id;
+
+ALTER TABLE nodes DROP COLUMN last_contact;
+
+DROP INDEX initscripts_name_idx;
+DROP TABLE initscripts;
+
+ALTER TABLE events DROP COLUMN auth_type;
+
+ALTER TABLE event_object DROP COLUMN object_type;
+
+---------- revert subversion
+
+UPDATE plc_db_version SET subversion = 4;
+SELECT subversion from plc_db_version;
--- /dev/null
+--
+-- to apply changes from import done in april 2007 from the
+-- planetlab-4_0-branch tag
+--
+-- this is a rather complex example, so for next times, make sure that you
+-- * first add new columns and new tables
+-- * then create or replace views
+-- * and only finally drop columns
+-- otherwise the columns may refuse to get dropped if they are still used by views
+--
+
+---------- creations
+
+ALTER TABLE sites ADD ext_consortium_id integer;
+
+ALTER TABLE nodes ADD last_contact timestamp without time zone;
+
+-- Initscripts
+CREATE TABLE initscripts (
+ initscript_id serial PRIMARY KEY, -- Initscript identifier
+ name text NOT NULL, -- Initscript name
+ enabled bool NOT NULL DEFAULT true, -- Initscript is active
+ script text NOT NULL, -- Initscript
+ UNIQUE (name)
+) WITH OIDS;
+CREATE INDEX initscripts_name_idx ON initscripts (name);
+
+-- rather drop the tables altogether,
+-- ALTER TABLE events ADD auth_type text;
+-- ALTER TABLE event_object ADD COLUMN object_type text NOT NULL Default 'Unknown';
+-- CREATE INDEX event_object_object_type_idx ON event_object (object_type);
+
+-- for some reason these views require to be dropped first
+DROP VIEW view_events;
+DROP VIEW event_objects;
+DROP VIEW view_nodes;
+DROP VIEW view_sites;
+
+----dropping tables must be preceded by dropping views using those tables
+----otherwise dependency problems
+DROP TABLE event_object;
+DROP TABLE events;
+
+CREATE TABLE events (
+ event_id serial PRIMARY KEY, -- Event identifier
+ person_id integer REFERENCES persons, -- Person responsible for event, if any
+ node_id integer REFERENCES nodes, -- Node responsible for event, if any
+ auth_type text, -- Type of auth used. i.e. AuthMethod
+ fault_code integer NOT NULL DEFAULT 0, -- Did this event result in error
+ call_name text NOT NULL, -- Call responsible for this event
+ call text NOT NULL, -- Call responsible for this event, including parameters
+ message text, -- High level description of this event
+ runtime float DEFAULT 0, -- Event run time
+ time timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -- Event timestamp
+) WITH OIDS;
+
+-- Database object(s) that may have been affected by a particular event
+CREATE TABLE event_object (
+ event_id integer REFERENCES events NOT NULL, -- Event identifier
+ object_id integer NOT NULL, -- Object identifier
+ object_type text NOT NULL Default 'Unknown' -- What type of object is this event affecting
+) WITH OIDS;
+CREATE INDEX event_object_event_id_idx ON event_object (event_id);
+CREATE INDEX event_object_object_id_idx ON event_object (object_id);
+CREATE INDEX event_object_object_type_idx ON event_object (object_type);
+
+---------- view changes
+
+CREATE OR REPLACE VIEW event_objects AS
+SELECT event_id,
+array_accum(object_id) AS object_ids,
+array_accum(object_type) AS object_types
+FROM event_object
+GROUP BY event_id;
+
+CREATE OR REPLACE VIEW view_events AS
+SELECT
+events.event_id,
+events.person_id,
+events.node_id,
+events.auth_type,
+events.fault_code,
+events.call_name,
+events.call,
+events.message,
+events.runtime,
+CAST(date_part('epoch', events.time) AS bigint) AS time,
+COALESCE((SELECT object_ids FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_ids,
+COALESCE((SELECT object_types FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_types
+FROM events;
+
+CREATE OR REPLACE VIEW view_nodes AS
+SELECT
+nodes.node_id,
+nodes.hostname,
+nodes.site_id,
+nodes.boot_state,
+nodes.deleted,
+nodes.model,
+nodes.boot_nonce,
+nodes.version,
+nodes.ssh_rsa_key,
+nodes.key,
+CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated,
+CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact,
+peer_node.peer_id,
+peer_node.peer_node_id,
+COALESCE((SELECT nodenetwork_ids FROM node_nodenetworks WHERE node_nodenetworks.node_id = nodes.node_id), '{}') AS nodenetwork_ids,
+COALESCE((SELECT nodegroup_ids FROM node_nodegroups WHERE node_nodegroups.node_id = nodes.node_id), '{}') AS nodegroup_ids,
+COALESCE((SELECT slice_ids FROM node_slices WHERE node_slices.node_id = nodes.node_id), '{}') AS slice_ids,
+COALESCE((SELECT pcu_ids FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS pcu_ids,
+COALESCE((SELECT ports FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS ports,
+COALESCE((SELECT conf_file_ids FROM node_conf_files WHERE node_conf_files.node_id = nodes.node_id), '{}') AS conf_file_ids,
+node_session.session_id AS session
+FROM nodes
+LEFT JOIN peer_node USING (node_id)
+LEFT JOIN node_session USING (node_id);
+
+CREATE OR REPLACE VIEW view_sites AS
+SELECT
+sites.site_id,
+sites.login_base,
+sites.name,
+sites.abbreviated_name,
+sites.deleted,
+sites.enabled,
+sites.is_public,
+sites.max_slices,
+sites.max_slivers,
+sites.latitude,
+sites.longitude,
+sites.url,
+sites.ext_consortium_id,
+CAST(date_part('epoch', sites.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', sites.last_updated) AS bigint) AS last_updated,
+peer_site.peer_id,
+peer_site.peer_site_id,
+COALESCE((SELECT person_ids FROM site_persons WHERE site_persons.site_id = sites.site_id), '{}') AS person_ids,
+COALESCE((SELECT node_ids FROM site_nodes WHERE site_nodes.site_id = sites.site_id), '{}') AS node_ids,
+COALESCE((SELECT address_ids FROM site_addresses WHERE site_addresses.site_id = sites.site_id), '{}') AS address_ids,
+COALESCE((SELECT slice_ids FROM site_slices WHERE site_slices.site_id = sites.site_id), '{}') AS slice_ids,
+COALESCE((SELECT pcu_ids FROM site_pcus WHERE site_pcus.site_id = sites.site_id), '{}') AS pcu_ids
+FROM sites
+LEFT JOIN peer_site USING (site_id);
+
+---------- deletions
+--dont need to drop this colum it doesn't exit anymore
+-----ALTER TABLE events DROP COLUMN object_type;
+
+---------- bump subversion
+
+UPDATE plc_db_version SET subversion = 5;
+SELECT subversion from plc_db_version;
--- /dev/null
+---------- view changes
+
+DROP VIEW view_slice_attributes;
+
+CREATE OR REPLACE VIEW view_slice_attributes AS
+SELECT
+slice_attribute.slice_attribute_id,
+slice_attribute.slice_id,
+slice_attribute.node_id,
+slice_attribute_types.attribute_type_id,
+slice_attribute_types.name,
+slice_attribute_types.description,
+slice_attribute_types.min_role_id,
+slice_attribute.value
+FROM slice_attribute
+INNER JOIN slice_attribute_types USING (attribute_type_id);
+
+---------- deletions
+DROP INDEX slice_attribute_nodegroup_id_idx;
+ALTER TABLE slice_attribute DROP COLUMN nodegroup_id;
+
+---------- revert subversion
+
+UPDATE plc_db_version SET subversion = 5;
+SELECT subversion from plc_db_version;
--- /dev/null
+---------- creations
+
+ALTER TABLE slice_attribute ADD nodegroup_id integer REFERENCES nodegroups;
+
+CREATE INDEX slice_attribute_nodegroup_id_idx ON slice_attribute (nodegroup_id);
+
+---------- view changes
+
+DROP VIEW view_slice_attributes;
+
+CREATE OR REPLACE VIEW view_slice_attributes AS
+SELECT
+slice_attribute.slice_attribute_id,
+slice_attribute.slice_id,
+slice_attribute.node_id,
+slice_attribute.nodegroup_id,
+slice_attribute_types.attribute_type_id,
+slice_attribute_types.name,
+slice_attribute_types.description,
+slice_attribute_types.min_role_id,
+slice_attribute.value
+FROM slice_attribute
+INNER JOIN slice_attribute_types USING (attribute_type_id);
+
+
+---------- bump subversion
+
+UPDATE plc_db_version SET subversion = 6;
+SELECT subversion from plc_db_version;
--- /dev/null
+--
+-- migration 007 - revert
+--
+
+DROP VIEW view_event_objects;
+
+---------- revert subversion
+
+UPDATE plc_db_version SET subversion = 6;
+SELECT subversion from plc_db_version;
+
--- /dev/null
+--
+-- migration 007
+-- change the way event objects are fetched, use a view for that purpose
+--
+
+
+CREATE OR REPLACE VIEW view_event_objects AS
+SELECT
+events.event_id,
+events.person_id,
+events.node_id,
+events.fault_code,
+events.call_name,
+events.call,
+events.message,
+events.runtime,
+CAST(date_part('epoch', events.time) AS bigint) AS time,
+event_object.object_id,
+event_object.object_type
+FROM events LEFT JOIN event_object USING (event_id);
+
+
+---------- bump subversion
+
+UPDATE plc_db_version SET subversion = 7;
+SELECT subversion from plc_db_version;
--- /dev/null
+
+DELETE from slice_instantiations WHERE instantiation='nm-controller';
+
+
+
+DROP VIEW view_nodes;
+DROP VIEW node_slices_whitelist;
+DROP TABLE node_slice_whitelist;
+
+CREATE OR REPLACE VIEW view_nodes AS
+SELECT
+nodes.node_id,
+nodes.hostname,
+nodes.site_id,
+nodes.boot_state,
+nodes.deleted,
+nodes.model,
+nodes.boot_nonce,
+nodes.version,
+nodes.ssh_rsa_key,
+nodes.key,
+CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated,
+CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact,
+peer_node.peer_id,
+peer_node.peer_node_id,
+COALESCE((SELECT nodenetwork_ids FROM node_nodenetworks WHERE node_nodenetworks.node_id = nodes.node_id), '{}') AS nodenetwork_ids,
+COALESCE((SELECT nodegroup_ids FROM node_nodegroups WHERE node_nodegroups.node_id = nodes.node_id), '{}') AS nodegroup_ids,
+COALESCE((SELECT slice_ids FROM node_slices WHERE node_slices.node_id = nodes.node_id), '{}') AS slice_ids,
+COALESCE((SELECT pcu_ids FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS pcu_ids,
+COALESCE((SELECT ports FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS ports,
+COALESCE((SELECT conf_file_ids FROM node_conf_files WHERE node_conf_files.node_id = nodes.node_id), '{}') AS conf_file_ids,
+node_session.session_id AS session
+FROM nodes
+LEFT JOIN peer_node USING (node_id)
+LEFT JOIN node_session USING (node_id);
+
+
+---------- revert subversion
+
+UPDATE plc_db_version SET subversion = 7;
+SELECT subversion from plc_db_version;
--- /dev/null
+--
+-- migration 008
+-- import from Princeton codebase on august 2007 28
+-- slice instantiation 'nm-controller'
+-- * white lists
+--
+
+INSERT INTO slice_instantiations (instantiation) VALUES ('nm-controller'); -- NM Controller
+
+--------------------------------------------------------------------------------
+-- Slice whitelist
+--------------------------------------------------------------------------------
+-- slice whitelist on nodes
+CREATE TABLE node_slice_whitelist (
+ node_id integer REFERENCES nodes NOT NULL, -- Node id of whitelist
+ slice_id integer REFERENCES slices NOT NULL, -- Slice id thats allowd on this node
+ PRIMARY KEY (node_id, slice_id)
+) WITH OIDS;
+CREATE INDEX node_slice_whitelist_node_id_idx ON node_slice_whitelist (node_id);
+CREATE INDEX node_slice_whitelist_slice_id_idx ON node_slice_whitelist (slice_id);
+
+-- Slices on each node
+CREATE VIEW node_slices_whitelist AS
+SELECT node_id,
+array_accum(slice_id) AS slice_ids_whitelist
+FROM node_slice_whitelist
+GROUP BY node_id;
+
+DROP VIEW view_nodes;
+
+
+CREATE OR REPLACE VIEW view_nodes AS
+SELECT
+nodes.node_id,
+nodes.hostname,
+nodes.site_id,
+nodes.boot_state,
+nodes.deleted,
+nodes.model,
+nodes.boot_nonce,
+nodes.version,
+nodes.ssh_rsa_key,
+nodes.key,
+CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created,
+CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated,
+CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact,
+peer_node.peer_id,
+peer_node.peer_node_id,
+COALESCE((SELECT nodenetwork_ids FROM node_nodenetworks WHERE node_nodenetworks.node_id = nodes.node_id), '{}') AS nodenetwork_ids,
+COALESCE((SELECT nodegroup_ids FROM node_nodegroups WHERE node_nodegroups.node_id = nodes.node_id), '{}') AS nodegroup_ids,
+COALESCE((SELECT slice_ids FROM node_slices WHERE node_slices.node_id = nodes.node_id), '{}') AS slice_ids,
+COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') AS slice_ids_whitelist,
+COALESCE((SELECT pcu_ids FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS pcu_ids,
+COALESCE((SELECT ports FROM node_pcus WHERE node_pcus.node_id = nodes.node_id), '{}') AS ports,
+COALESCE((SELECT conf_file_ids FROM node_conf_files WHERE node_conf_files.node_id = nodes.node_id), '{}') AS conf_file_ids,
+node_session.session_id AS session
+FROM nodes
+LEFT JOIN peer_node USING (node_id)
+LEFT JOIN node_session USING (node_id);
+
+---------- bump subversion
+
+UPDATE plc_db_version SET subversion = 8;
+SELECT subversion from plc_db_version;