From: Tony Mack Date: Wed, 12 Sep 2012 19:52:45 +0000 (-0400) Subject: cleanup X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=169dca3c68daf0d063d457f62a3f7e8525209174;p=plcapi.git cleanup --- diff --git a/ModPython.py b/ModPython.py deleted file mode 100644 index 07a0fda..0000000 --- a/ModPython.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Apache mod_python interface -# -# Aaron Klingaman -# Mark Huang -# -# Copyright (C) 2004-2006 The Trustees of Princeton University -# - -import sys -import time -import traceback -import xmlrpclib -from mod_python import apache - -from PLC.Debug import log - -from PLC.API import PLCAPI -api = PLCAPI() - -def handler(req): - try: - if req.method != "POST": - req.content_type = "text/html" - req.send_http_header() - req.write(""" - -PLCAPI XML-RPC/SOAP Interface - -

PLCAPI XML-RPC/SOAP Interface

-

Please use XML-RPC or SOAP to access the PLCAPI.

- -""") - return apache.OK - - # Read request - request = req.read(int(req.headers_in['content-length'])) - - # mod_python < 3.2: The IP address portion of remote_addr is - # incorrect (always 0.0.0.0) when IPv6 is enabled. - # http://issues.apache.org/jira/browse/MODPYTHON-64?page=all - (remote_ip, remote_port) = req.connection.remote_addr - remote_addr = (req.connection.remote_ip, remote_port) - - # Handle request - response = api.handle(remote_addr, request) - - # Shut down database connection, otherwise up to MaxClients DB - # connections will remain open. - api.db.close() - - # Write response - req.content_type = "text/xml; charset=" + api.encoding - req.send_http_header() - req.write(response) - - return apache.OK - - except Exception, err: - # Log error in /var/log/httpd/(ssl_)?error_log - t = "[" + time.ctime() + "] [error]" - print >> log, t, err, traceback.format_exc() - return apache.HTTP_INTERNAL_SERVER_ERROR diff --git a/ModPythonJson.py b/ModPythonJson.py deleted file mode 100644 index 22dbdc7..0000000 --- a/ModPythonJson.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# Apache mod_python interface for JSON requests -# -# Aaron Klingaman -# Mark Huang -# -# Copyright (C) 2004-2006 The Trustees of Princeton University -# - -import sys -import traceback -import xmlrpclib -from mod_python import apache - -from PLC.Debug import log - -from PLC.API import PLCAPI -api = PLCAPI() - -def handler(req): - try: - if req.method != "POST": - req.content_type = "text/html" - req.send_http_header() - req.write(""" - -PLCAPI JSON Interface - -

PLCAPI JSON Interface

-

Please POST JSON to access the PLCAPI.

- -""") - return apache.OK - - # Read request - request = req.read(int(req.headers_in['content-length'])) - - # mod_python < 3.2: The IP address portion of remote_addr is - # incorrect (always 0.0.0.0) when IPv6 is enabled. - # http://issues.apache.org/jira/browse/MODPYTHON-64?page=all - (remote_ip, remote_port) = req.connection.remote_addr - remote_addr = (req.connection.remote_ip, remote_port) - - # Handle request - response = api.handle_json(remote_addr, request) - - # Shut down database connection, otherwise up to MaxClients DB - # connections will remain open. - api.db.close() - - # Write response - req.content_type = "text/json; charset=" + api.encoding - req.send_http_header() - req.write(response) - - return apache.OK - - except Exception, err: - # Log error in /var/log/httpd/(ssl_)?error_log - print >> log, err, traceback.format_exc() - return apache.HTTP_INTERNAL_SERVER_ERROR diff --git a/ModWSGI.wsgi b/ModWSGI.wsgi deleted file mode 100644 index b2d4955..0000000 --- a/ModWSGI.wsgi +++ /dev/null @@ -1,51 +0,0 @@ -# -# Apache mod_wsgi python interface -# -# Copyright (C) 2004-2006 The Trustees of Princeton University -# $Id: ModWSGI.py 14587 2009-07-19 13:18:50Z tmack $ -# $URL: svn+ssh://svn.planet-lab.org/svn/PLCAPI/trunk/ModWSGI.py $ -# - -import sys -sys.path.append('/usr/share/plc_api') -sys.stdout = sys.stderr -import traceback -from PLC.Debug import log -from PLC.API import PLCAPI - -api = PLCAPI() - -def application(environ, start_response): - try: - status = '200 OK' - if environ.get('REQUEST_METHOD') != 'POST': - content_type = 'text/html' - output = """ - -PLCAPI WSGI XML-RPC/SOAP Interface - -

PLCAPI WSGI XML-RPC/SOAP Interface

-

Please use XML-RPC or SOAP to access the PLCAPI.

- -""" - else: - api.environ = environ - content_type = 'text/xml' - ip = environ.get('REMOTE_ADDR') - port = environ.get('REMOTE_PORT') - output = api.handle((ip,port), environ.get('wsgi.input').read()) - # Shut down database connection, otherwise up to MaxClients DB - # connections will remain open. - api.db.close() - except Exception, err: - status = '500 Internal Server Error' - content_type = 'text/html' - output = 'Internal Server Error' - print >> log, err, traceback.format_exc() - - # Write response - response_headers = [('Content-type', '%s' % content_type), - ('Content-Length', str(len(output)))] - start_response(status, response_headers) - return [output] - diff --git a/db-config.d/000-functions b/db-config.d/000-functions deleted file mode 100644 index 63c3ed7..0000000 --- a/db-config.d/000-functions +++ /dev/null @@ -1,175 +0,0 @@ -# -*-python-*- -#################### -import sys, os - -g_url = "" -def GetMyPLCURL(): return g_url -def SetMyPLCURL(url): - global g_url - g_url = url - -# Get all currently registered roles -g_role_names = [ role['name'] for role in GetRoles()] -g_role_names.sort() - -def SetRole(level, role): - global g_role_names - if role not in g_role_names: - AddRole(level, role) - g_role_names.append(role) - g_role_names.sort() - -# Get list of existing tag types -g_known_tag_types = [tag_type['tagname'] for tag_type in GetTagTypes()] -g_known_tag_types.sort() - -def SetTagType(tag_type): - try: - tagname=tag_type['tagname'] - global g_known_tag_types - # handle 'roles' field differently - if 'roles' in tag_type: - roles=tag_type['roles'] - del tag_type['roles'] - else: - roles=['admin'] - # just in case - if 'min_role_id' in tag_type: - print "WARNING: ignoring deprecated field min_role_id for tagtype %s"%tagname - del tag_type['min_role_id'] - # Create/update default slice tag types - if tagname not in g_known_tag_types: - AddTagType(tag_type) - g_known_tag_types.append(tagname) - g_known_tag_types.sort() - else: - UpdateTagType(tagname, tag_type) - # enforce provided roles if present - old_roles=GetTagTypes(tagname)[0]['roles'] - for minus_role in set(old_roles).difference(set(roles)): - DeleteRoleFromTagType(minus_role,tagname) - for plus_role in set(roles).difference(set(old_roles)): - AddRoleToTagType(plus_role,tagname) - except: - # something went wrong for that tagname, - # but don't want to break the whole startup sequence - print "Could not enforce tagtype %s --- beg"%tagname - import traceback - traceback.print_exc() - print "Could not enforce tagtype %s --- end"%tagname - -# Get list of existing (enabled, global) files -g_conf_files = GetConfFiles() -g_conf_files = filter(lambda conf_file: conf_file['enabled'] and \ - not conf_file['node_ids'] and \ - not conf_file['nodegroup_ids'], - g_conf_files) -g_dests = [conf_file['dest'] for conf_file in g_conf_files] -g_conf_files = dict(zip(g_dests, g_conf_files)) - -# Get list of existing initscripts -g_oldinitscripts = GetInitScripts() -g_oldinitscript_names = [script['name'] for script in g_oldinitscripts] -g_oldinitscripts = dict(zip(g_oldinitscript_names, g_oldinitscripts)) - -def SetInitScript(initscript): - global g_oldinitscripts, g_oldinitscript_names - if initscript['name'] not in g_oldinitscript_names: - initscript_id = AddInitScript(initscript) - g_oldinitscript_names.append(initscript['name']) - initscript['initscript_id']=initscript_id - g_oldinitscripts[initscript['name']]=initscript - else: - orig_initscript = g_oldinitscripts[initscript['name']] - initscript_id = orig_initscript['initscript_id'] - UpdateInitScript(initscript_id, initscript) - -def SetConfFile(conf_file): - global g_conf_files, g_dests - if conf_file['dest'] not in g_dests: - AddConfFile(conf_file) - else: - orig_conf_file = g_conf_files[conf_file['dest']] - conf_file_id = orig_conf_file['conf_file_id'] - UpdateConfFile(conf_file_id, conf_file) - -def SetSlice(slice, tags): - try: - # Create or Update slice - slice_name = slice['name'] - slices = GetSlices([slice_name]) - if len(slices)==1: - slice_id = slices[0]['slice_id'] - if slice.has_key('name'): - del slice['name'] - UpdateSlice(slice_id, slice) - slice['name']=slice_name - else: - expires = None - if slice.has_key('expires'): - expires = slice['expires'] - del slice['expires'] - slice_id = AddSlice(slice) - if expires <> None: - UpdateSlice(slice_id, {'expires':expires}) - - # Get slice structure with all fields - slice = GetSlices([slice_name])[0] - - # Create/delete all tags - # NOTE: update is not needed, since unspecified tags are deleted, - # and new tags are added - slice_tags = [] - if slice['slice_tag_ids']: - # Delete unknown attributes - for slice_tag in GetSliceTags(slice['slice_tag_ids']): - # ignore sliver tags, as those are custom/run-time values - if slice_tag['node_id'] <> None: continue - if (slice_tag['tagname'], slice_tag['value']) not in tags: - DeleteSliceTag(slice_tag['slice_tag_id']) - else: - slice_tags.append((slice_tag['tagname'],slice_tag['value'])) - - # only add slice tags that are new - for (name, value) in tags: - if (name,value) not in slice_tags: - AddSliceTag(slice_name, name, value) - else: - # NOTE: this confirms that the user-specified tag is - # returned by GetSliceTags - pass - except: - # something went wrong for that tagname, - print "Could not create init slice %s --- beg"%slice['name'] - import traceback - traceback.print_exc() - print "Could not create init slice %s --- end"%slice['name'] - -def SetMessage(message): - messages = GetMessages([message['message_id']]) - if len(messages)==0: - AddMessage(message) - ### Thierry 2012-03 - # let people customize their messages if they want to - #else: - # UpdateMessage(message['message_id'],message) - -# Get all model names -g_pcu_models = [type['model'] for type in GetPCUTypes()] - -def SetPCUType(pcu_type): - global g_pcu_models - if 'pcu_protocol_types' in pcu_type: - protocol_types = pcu_type['pcu_protocol_types'] - # Take this value out of the struct. - del pcu_type['pcu_protocol_types'] - else: - protocol_types = [] - - if pcu_type['model'] not in g_pcu_models: - # Add the name/model info into DB - id = AddPCUType(pcu_type) - # for each protocol, also add this. - for ptype in protocol_types: - AddPCUProtocolType(id, ptype) - diff --git a/db-config.d/001-admin_user b/db-config.d/001-admin_user deleted file mode 100644 index 6532530..0000000 --- a/db-config.d/001-admin_user +++ /dev/null @@ -1,22 +0,0 @@ -# -*-python-*- -#################### -# Create/update the default administrator account (should be person_id 2). - -admin = { 'person_id': 2, - 'first_name': "Default", - 'last_name': "Administrator", - 'email': plc['root_user'], - 'password': plc['root_password'] } -persons = GetPersons([admin['person_id']]) -if not persons: - person_id = AddPerson(admin) - if person_id != admin['person_id']: - # Huh? Someone deleted the account manually from the database. - DeletePerson(person_id) - raise Exception, "Someone deleted the \"%s %s\" account from the database!" % \ - (admin['first_name'], admin['last_name']) - UpdatePerson(person_id, { 'enabled': True }) -else: - person_id = persons[0]['person_id'] - UpdatePerson(person_id, admin) - diff --git a/db-config.d/002-system_site b/db-config.d/002-system_site deleted file mode 100644 index 12e00a7..0000000 --- a/db-config.d/002-system_site +++ /dev/null @@ -1,67 +0,0 @@ -# -*-python-*- -#################### -# Create/update and populate the default site (should be site_id 1) - -### plc_www holds the contents of the PLC_WWW configuration category -if plc_www['port'] == '80': - url = "http://" + plc_www['host'] + "/" -elif plc_www['port'] == '443': - url = "https://" + plc_www['host'] + "/" -else: - url = "http://" + plc_www['host'] + ":" + plc_www['port'] + "/" - -SetMyPLCURL(url) - -site = { 'site_id': 1, - 'name': plc['name'] + " Central", - 'abbreviated_name': plc['name'], - 'login_base': plc['slice_prefix'], - 'is_public': False, - 'url': url, - 'max_slices': 100 } - -sites = GetSites([site['site_id']]) -if not sites: - site_id = AddSite(site['name'], site['abbreviated_name'], site['login_base'], site) - if site_id != site['site_id']: - DeleteSite(site_id) - raise Exception, "Someone deleted the \"%s\" site from the database!" % \ - site['name'] - sites = [site] - -# Must call UpdateSite() even after AddSite() to update max_slices -site_id = sites[0]['site_id'] -UpdateSite(site_id, site) - -# The default administrator account must be associated with a site -# in order to login. -AddPersonToSite(admin['person_id'], site['site_id']) -SetPersonPrimarySite(admin['person_id'], site['site_id']) - -# Grant admin and PI roles to the default administrator account -AddRoleToPerson(10, admin['person_id']) -AddRoleToPerson(20, admin['person_id']) - -# Associate root ssh key with the default administrator -keyfile=plc['root_ssh_key_pub'] -person = GetPersons(admin['person_id'])[0] -keys = GetKeys(person['key_ids']) -if os.path.exists(keyfile): - sshkeyfp = file(keyfile,"r") - sshkey = sshkeyfp.read() - sshkeyfp.close() - - found=False - for key in keys: - if key['key_type']=='ssh': - if key['key'] == sshkey: - found=True - else: - # should we delete other keys? - pass - if not found: - key_id = AddPersonKey(admin['person_id'],{'key_type':'ssh','key':sshkey}) -else: - if len(keys)==0: - print "WARNING: default administrator does not have an ssh key" - print "and the default ssh root pub key (%s) file does not exist." % keyfile diff --git a/db-config.d/003-accessors b/db-config.d/003-accessors deleted file mode 100644 index 6b43079..0000000 --- a/db-config.d/003-accessors +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- -from PLC.Accessor import AccessorSingleton - -AccessorSingleton(api).run_all_tag_locators() diff --git a/db-config.d/010-slice_tags b/db-config.d/010-slice_tags deleted file mode 100644 index 4aabfd2..0000000 --- a/db-config.d/010-slice_tags +++ /dev/null @@ -1,149 +0,0 @@ -# -*-python-*- -#################### slice tag types -# xxx this should move to PLC/Accessors - -# vref is now defined in an accessor -# initscript is now defined in an accessor - -# Setup default slice tag types -slicetag_types = \ -[ - # Slice type (only vserver is supported) - {'tagname': "type", - 'description': "Type of slice (e.g. vserver)", - 'category' : 'slice/general', - 'roles': ['admin','pi']}, - - # System slice - {'tagname': "system", - 'description': "Is a default system slice (1) or not (0 or unset)", - 'category' : 'slice/general'}, - - # Slice enabled (1) or suspended (0) - {'tagname': "enabled", - 'description': "Slice enabled (1 or unset) or suspended (0)", - 'category' : 'slice/general'}, - - # IP Addresses for a Slice - {'tagname': "ip_addresses", - 'description': "Add an ip address to a slice/sliver.", - 'category' : 'slice/rspec'}, - {'tagname': "isolate_loopback", - 'description': "Create an isolated loopback interface within the vserver rather than sharing with all vservers.", - 'category' : 'slice/rspec'}, - - # CPU share - {'tagname': "cpu_pct", - 'description': "Reserved CPU percent", - 'category' : 'slice/rspec'}, - {'tagname': "cpu_share", - 'description': "Number of CPU shares", - 'category' : 'slice/rspec'}, - {'tagname': "cpu_cores", - 'description': "Number of CPU cores", - 'category': 'slice/rspec'}, - {'tagname': "cpu_freezable", - 'description': "Slice processes should be frozen if cpu_cores is 0", - 'category': 'slice/rspec'}, - - # Bandwidth limits - {'tagname': "net_min_rate", - 'description': "Minimum bandwidth (kbps)", - 'category' : 'slice/rspec'}, - {'tagname': "net_max_rate", - 'description': "Maximum bandwidth (kbps)", - 'category' : 'slice/rspec'}, - {'tagname': "net_i2_min_rate", - 'description': "Minimum bandwidth over I2 routes (kbps)", - 'category' : 'slice/rspec'}, - {'tagname': "net_i2_max_rate", - 'description': "Maximum bandwidth over I2 routes (kbps)", - 'category' : 'slice/rspec'}, - {'tagname': "net_max_kbyte", - 'description': "Maximum daily network Tx KByte limit.", - 'category' : 'slice/rspec'}, - {'tagname': "net_thresh_kbyte", - 'description': "KByte limit before warning and throttling.", - 'category' : 'slice/rspec'}, - {'tagname': "net_i2_max_kbyte", - 'description': "Maximum daily network Tx KByte limit to I2 hosts.", - 'category' : 'slice/rspec'}, - {'tagname': "net_i2_thresh_kbyte", - 'description': "KByte limit to I2 hosts before warning and throttling.", - 'category' : 'slice/rspec'}, - {'tagname': "net_share", - 'description': "Number of bandwidth shares", - 'category' : 'slice/rspec'}, - {'tagname': "net_i2_share", - 'description': "Number of bandwidth shares over I2 routes", - 'category' : 'slice/rspec'}, - - # Disk quota - {'tagname': "disk_max", - 'description': "Disk quota (1k disk blocks)", - 'category' : 'slice/rspec'}, - - # deprecated in nov. 2010 - # Proper operations - #{'tagname': "proper_op", - # 'description': "Proper operation (e.g. bind_socket)", - # 'category' : 'slice/rspec'}, - - # VServer capabilities - {'tagname': "capabilities", - 'description': "VServer bcapabilities (separate by commas)", - 'category' : 'slice/rspec'}, - - # Vsys - {'tagname': "vsys", - 'description': "Bind vsys script fd's to a slice's /vsys directory.", - 'category' : 'slice/rspec'}, - {'tagname': "vsys_vnet", - 'description': """Specify the IP range that can be used in a given slice -for virtual devices involved in topologies, e.g. 192.168.100.0/24""", - 'category': 'slice/rspec'}, - - # CoDemux - {'tagname': "codemux", - 'description': "Demux HTTP between slices using localhost ports. Value in the form 'host, localhost port'.", - 'category' : 'slice/rspec'}, - - # Delegation - {'tagname': "delegations", - 'description': "Coma seperated list of slices to give delegation authority to.", - 'category' : 'slice/rspec', - 'roles' : ['admin','pi','user']}, - - # Capability to give a sliver access to unused raw disk - {'tagname': "rawdisk", - 'description': "map unused raw disk devices into the slice", - 'category' : 'slice/access', # we should get rid of this category thing - 'roles': ['admin','pi']}, - - { 'tagname' : 'exempt_slice_until', - 'description' : 'Exclude this slice from MyOps until given date (YYYYMMDD)', - 'category' : 'slice/myops'}, - - # DistributedRateLimiting slice - {'tagname': "drl", - 'description': "Is a default Distributed Rate Limiting slice (1) or not (0 or unset)", - 'category' : 'slice/general'}, - -] - -import resource -# add in the platform supported rlimits to the default_attribute_types -for entry in resource.__dict__.keys() + ["VLIMIT_OPENFD"]: - if entry.find("LIMIT_")==1: - rlim = entry[len("RLIMIT_"):] - rlim = rlim.lower() - for ty in ("min","soft","hard"): - attribute = { - 'tagname': "%s_%s"%(rlim,ty), - 'description': "Per sliver RLIMIT %s_%s."%(rlim,ty), - 'category': 'slice/limit', - } - slicetag_types.append(attribute) - -for slicetag_type in slicetag_types: - SetTagType(slicetag_type) diff --git a/db-config.d/020-boot_states b/db-config.d/020-boot_states deleted file mode 100644 index f94a8c5..0000000 --- a/db-config.d/020-boot_states +++ /dev/null @@ -1,21 +0,0 @@ -# -*-python-*- -#################### slice tag types -default_boot_states = [ - 'boot', - 'failboot', - 'safeboot', - 'install', - 'reinstall', - 'disabled', -] -current_boot_states = GetBootStates() -for state in default_boot_states: - if state not in current_boot_states: - AddBootState(state) - -# TODO: Delete old boot states. -if False:# NOTE: Only set to true if all federating peers have the new default boot states above. - for state in current_boot_states: - if state not in default_boot_states: - DeleteBootState(state) - diff --git a/db-config.d/050-pcu_types b/db-config.d/050-pcu_types deleted file mode 100644 index 8db5bb3..0000000 --- a/db-config.d/050-pcu_types +++ /dev/null @@ -1,62 +0,0 @@ -# -*-python-*- -#################### PCUs -### Setup Initial PCU information - -pcu_types = [ - {'model': 'HPiLO', - 'name': 'HP iLO v1 or v2 (Integrated Lights-Out)', }, - - {'model': 'IntelAMT', - 'name': 'Intel AMT v2.5 or v3.0 (Active Management Technology)', }, - - {'model': 'DRAC', - 'name': 'DRAC - Dell Remote Access Control (not Modular Chassis (MC))', }, - - {'model': 'OpenIPMI', - 'name': 'OpenIPMI - Intelligent Platform Management Interface', }, - - {'model': 'APCControl12p3', - 'name': 'APC AP79xx or Masterswitch (sequence 1-2-port-3)', }, - {'model': 'APCControl1p4', - 'name': 'APC AP79xx or Masterswitch (sequence 1-port-4)', }, - {'model': 'APCControl121p3', - 'name': 'APC AP79xx or Masterswitch (sequence 1-2-1-port-3)', }, - {'model': 'APCControl121p1', - 'name': 'APC AP79xx or Masterswitch (sequence 1-2-1-port-1)', }, - {'model': 'APCControl13p13', - 'name': 'APC AP79xx or Masterswitch (sequence 1-3-port-1-3)', }, - - {'model': 'BayTechRPC3NC', - 'name': 'BayTech with prompt RPC3-NC>', }, - {'model': 'BayTechRPC16', - 'name': 'BayTech with prompt RPC-16>', }, - {'model': 'BayTech', - 'name': 'BayTech with prompt DS-RPC>', }, - {'model': 'BayTechCtrlC', - 'name': 'BayTech Ctrl-C, 5, then with prompt DS-RPC>', }, - {'model': 'BayTechCtrlCUnibe', - 'name': 'BayTech Ctrl-C, 3, then with prompt DS-RPC>', }, - - {'model': 'BlackBoxPSMaverick', - 'name': 'BlackBoxPSMaverick Web based controller'}, - - {'model': 'IPAL', - 'name': 'IPAL - Dataprobe IP-41x & IP-81x', }, - {'model': 'ePowerSwitchNew', - 'name': 'ePowerSwitch Newer Models 1/4/8x', }, - {'model': 'ePowerSwitchOld', - 'name': 'ePowerSwitch Older Models 1/4/8x', }, - - {'model': 'PM211MIP', - 'name': 'Infratec PM221-MIP', }, - - {'model': 'WTIIPS4', - 'name': 'Western Telematic (WTI IPS-4)', }, - - {'model': 'ManualPCU', - 'name': 'Manual Administrator Operation (choose if model unknown)', }, - ] - -for pcu_type in pcu_types: - SetPCUType(pcu_type) - diff --git a/db-config.d/060-messages b/db-config.d/060-messages deleted file mode 100644 index bee865e..0000000 --- a/db-config.d/060-messages +++ /dev/null @@ -1,291 +0,0 @@ -# -*-python-*- -#################### body for messages - -installfailed = """Once the node meets these requirements, please reinitiate the install -by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -Update the BootState to 'Reinstall', then reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we may investigate the problem. -""" - -# Load default message templates -message_templates = [ - {'message_id': 'Verify account', - 'subject': "Verify account registration", - 'template': """ -Please verify that you registered for a %(PLC_NAME)s account with the -username %(email)s by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/register.php?id=%(person_id)d&key=%(verification_key)s - -You must wait for this account to be approved before you can begin using it, please be patient. - -If you did not register for a %(PLC_NAME)s account, please ignore this -message, or contact %(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. -""" - }, - - {'message_id': 'New PI account', - 'subject': "New PI account registration from %(first_name)s %(last_name)s <%(email)s> at %(site_name)s", - 'template': """ -%(first_name)s %(last_name)s <%(email)s> has signed up for a new -%(PLC_NAME)s account at %(site_name)s and has requested a PI role. PIs -are responsible for enabling user accounts, creating slices, and -ensuring that all users abide by the %(PLC_NAME)s Acceptable Use -Policy. - -Only %(PLC_NAME)s administrators may enable new PI accounts. If you -are a PI at %(site_name)s, please respond and indicate whether this -registration is acceptable. - -To view the request, visit: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d -""" - }, - - {'message_id': 'New account', - 'subject': "New account registration from %(first_name)s %(last_name)s <%(email)s> at %(site_name)s", - 'template': """ -%(first_name)s %(last_name)s <%(email)s> has signed up for a new -%(PLC_NAME)s account at %(site_name)s and has requested the following -roles: %(roles)s. - -To deny the request or enable the account, visit: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d -""" - }, - - {'message_id': 'Password reset requested', - 'subject': "Password reset requested", - 'template': """ -Someone has requested that the password of your %(PLC_NAME)s account -%(email)s be reset. If this person was you, you may continue with the -reset by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/reset_password.php?id=%(person_id)d&key=%(verification_key)s - -If you did not request that your password be reset, please contact -%(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. Do not quote or -otherwise include any of this text in any correspondence. -""" - }, - - {'message_id': 'Password reset', - 'subject': "Password reset", - 'template': """ -The password of your %(PLC_NAME)s account %(email)s has been -temporarily reset to: - -%(password)s - -Please change it at as soon as possible by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/persons/index.php?id=%(person_id)d - -If you did not request that your password be reset, please contact -%(PLC_NAME)s Support <%(PLC_MAIL_SUPPORT_ADDRESS)s>. Do not quote or -otherwise include any of this text in any correspondence. -""" - }, - - # Boot Manager messages - {'message_id': "installfinished", - 'subject': "%(hostname)s completed installation", - 'template': """ -%(hostname)s just completed installation. - -The node should be usable in a couple of minutes if installation was -successful. -""" - }, - - {'message_id': "insufficientdisk", - 'subject': "%(hostname)s does not have sufficient disk space", - 'template': """ -%(hostname)s failed to boot because it does not have sufficent disk -space, or because its disk controller was not recognized. - -Please replace the current disk or disk controller or install -additional disks to meet the current hardware requirements. -""" + installfailed - }, - - {'message_id': "insufficientmemory", - 'subject': "%(hostname)s does not have sufficient memory", - 'template': """ -%(hostname)s failed to boot because it does not have sufficent -memory. - -Please install additional memory to meet the current hardware -requirements. -""" + installfailed - }, - - {'message_id': "authfail", - 'subject': "%(hostname)s failed to authenticate", - 'template': -""" -%(hostname)s failed to authenticate for the following reason: - -%(fault)s - -The most common reason for authentication failure is that the -authentication key stored in the node configuration file, does not -match the key on record. - -There are two possible steps to resolve the problem. - -1. If you have used an All-in-one BootCD that includes the plnode.txt file, - then please check your machine for any old boot media, either in the - floppy drive, or on a USB stick. It is likely that an old configuration - is being used instead of the new configuration stored on the BootCD. -Or, -2. If you are using Generic BootCD image, then regenerate the node - configuration file by visiting: - - https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - - Under 'Download', follow the 'Download plnode.txt file for %(hostname)s' - option, and save the downloaded file as plnode.txt on either a floppy - disk or a USB flash drive. Be sure the 'Boot State' is set to 'Boot', - and, then reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we can help investigate the problem. -""" - }, - - {'message_id': "notinstalled", - 'subject': "%(hostname)s is not installed", - 'template': -""" -%(hostname)s failed to boot because it has either never been -installed, or the installation is corrupt. - -Please check if the hard drive has failed, and replace it if so. After -doing so, visit: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -Change the 'Boot State' to 'Reinstall', and then reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we may investigate the problem. -""" - }, - - {'message_id': "missingkernel", - 'subject': "%(hostname)s is missing its production kernel", - 'template': -""" -%(hostname)s failed to boot because the filesystem is missing its production -kernel. - -No action is needed from you at this time; this message is merely -informational. - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -We will investigate the problem shortly. -""" - }, - - {'message_id': "filesystemcorrupted", - 'subject': "%(hostname)s may have corrupt filesystem", - 'template': -""" -%(hostname)s failed to boot because the filesystem appears to be corrupted. - -No action is needed from you at this time; this message is merely -informational. - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -We will investigate the problem shortly. -""" - }, - - {'message_id': "mountfailed", - 'subject': "%(hostname)s could not mount filesystem", - 'template': -""" -%(hostname)s failed to boot because the boot scripts could not mount the -filesystem. - -This could be for a number of reasons. No action is needed from you at this -time; this message is merely informational. - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -We will investigate the problem shortly. -""" - }, - - {'message_id': "hostnamenotresolve", - 'subject': "%(hostname)s does not resolve", - 'template': -""" -%(hostname)s failed to boot because its hostname does not resolve, or -does resolve but does not match its configured IP address. - -Please check the network settings for the node, especially its -hostname, IP address, and DNS servers, by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -Correct any errors, and change the 'Boot State' to 'Reinstall', and then -reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we may investigate the problem. -""" - }, - - # XXX N.B. I don't think these are necessary, since there's no - # way that the Boot Manager would even be able to contact the - # API to send these messages. - - {'message_id': "noconfig", - 'subject': "%(hostname)s does not have a configuration file", - 'template': """ -%(hostname)s failed to boot because it could not find a PlanetLab -configuration file. To create this file, visit: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -Click the Configuration File link, and save the downloaded file as -plnode.txt on either a floppy disk or a USB flash drive. Change the -'Boot State' to 'Reinstall', and then reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we may investigate the problem. -""" - }, - - {'message_id': "nodetectednetwork", - 'subject': "%(hostname)s has unsupported network hardware", - 'template': -""" - -%(hostname)s failed to boot because it has network hardware that is -unsupported by the current production kernel. If it has booted -successfully in the past, please try re-installing it by visiting: - -https://%(PLC_WWW_HOST)s:%(PLC_WWW_SSL_PORT)d/db/nodes/?id=%(node_id)d - -Change the 'Boot State' to 'Reinstall', and then reboot the node. - -If you have already performed this step and are still receiving this -message, please reply so that we may investigate the problem. -""" - }, -] - -for message in message_templates: - SetMessage(message) diff --git a/db-config.d/099-hrns b/db-config.d/099-hrns deleted file mode 100644 index 879c9d0..0000000 --- a/db-config.d/099-hrns +++ /dev/null @@ -1,6 +0,0 @@ -# -*-python-*- -#################### -# quick and dirty, make sure all hrns are set on local nodes -# could/should get trashed somedy - -for node in GetNodes({'peer_id':None}): UpdateNode(node['node_id'],{'hostname':node['hostname']}) diff --git a/migrations/100-up-major-to-5.sql b/migrations/100-up-major-to-5.sql deleted file mode 100644 index 6c24f25..0000000 --- a/migrations/100-up-major-to-5.sql +++ /dev/null @@ -1,9 +0,0 @@ --- myplc v5.0 starts with (5,100) --- the expected former values would be (4,11) --- --- if you somehow start from a 4.3 not entirely up-dated to rc17, --- then manually run --- http://git.onelab.eu/?p=plcapi.git;a=blob;f=migrations/011-up-site-and-person-tags.sql;hb=refs/heads/4.3 --- -UPDATE plc_db_version SET version = 5; -UPDATE plc_db_version SET subversion = 100; diff --git a/migrations/101-down-leases.sql b/migrations/101-down-leases.sql deleted file mode 100644 index f72bbb0..0000000 --- a/migrations/101-down-leases.sql +++ /dev/null @@ -1,15 +0,0 @@ --- revert cleanup on node_types -INSERT INTO node_types VALUES ('dummynet'); - -UPDATE nodes SET node_type='regular' WHERE node_type='reservable'; -DELETE FROM node_types WHERE node_type='reservable'; - --- drop new tables -DROP VIEW view_leases; -DROP VIEW view_all_leases; -DROP TABLE leases; - -DROP FUNCTION IF EXISTS overlapping_trigger(); - --------------------------------------------------- -UPDATE plc_db_version SET subversion = 100; diff --git a/migrations/101-up-leases.sql b/migrations/101-up-leases.sql deleted file mode 100644 index 0c9b538..0000000 --- a/migrations/101-up-leases.sql +++ /dev/null @@ -1,80 +0,0 @@ --- we're using the 'lease' nodetype to model reservable nodes -INSERT INTO node_types VALUES ('reservable'); --- also the dummynet node_type is obsolete -DELETE FROM node_types WHERE node_type='dummynet'; - -SET TIMEZONE TO 'UTC'; - -CREATE TABLE leases ( - lease_id serial PRIMARY KEY, -- id - t_from timestamp with time zone NOT NULL, -- from - t_until timestamp with time zone NOT NULL, -- until - node_id integer REFERENCES nodes NOT NULL, -- subject node - slice_id integer REFERENCES slices, -- slice owning the node --- xxx for testing --- CONSTRAINT future CHECK (t_from > CURRENT_TIMESTAMP), - CONSTRAINT start_before_end CHECK (t_until > t_from) -) WITH OIDS; - --- --- hook to check for overlapping time slots on a given node_id --- xxx might use the builtin OVERLAPS feature --- http://www.postgresql.org/docs/8.3/interactive/functions-datetime.html --- -CREATE language plpgsql; -CREATE FUNCTION overlapping_trigger() RETURNS trigger AS $overlapping_trigger$ -BEGIN - PERFORM lease_id FROM leases WHERE - -- consider only leases on the same node - NEW.node_id = node_id - -- consider only non expired leases - AND t_until > CURRENT_TIMESTAMP - -- useful for updates - AND NEW.lease_id <> lease_id - -- new start date is in range - AND ( (NEW.t_from >= t_from AND NEW.t_from < t_until) - -- new end date is in range - OR (NEW.t_until > t_from AND NEW.t_until <= t_until) - -- complete overlap: new from before from, new until after until - OR (NEW.t_from <= t_from AND NEW.t_until >= t_until)); - IF FOUND THEN - RAISE EXCEPTION 'overlapping error: node % - slice %, % -> %', NEW.node_id, NEW.slice_id, NEW.t_from, NEW.t_until; - END IF; - RETURN NEW; -END; -$overlapping_trigger$ LANGUAGE plpgsql; - -CREATE - TRIGGER overlapping_trigger BEFORE INSERT OR UPDATE - ON leases FOR EACH ROW EXECUTE PROCEDURE overlapping_trigger(); - - --- this is to let the API a chance to check for leases attached --- to a node that is not 'reservable' -CREATE OR REPLACE VIEW view_all_leases AS -SELECT -leases.lease_id, -CAST(date_part('epoch', leases.t_from) AS bigint) AS t_from, -CAST(date_part('epoch', leases.t_until) AS bigint) AS t_until, --- dbg -leases.t_from as s_from, -leases.t_until as s_until, -leases.node_id, -leases.slice_id, -nodes.hostname, -nodes.node_type, -slices.name, -slices.site_id, -CAST( date_part ('epoch',leases.t_until-leases.t_from) AS bigint) AS duration, -leases.t_until < CURRENT_TIMESTAMP as expired -FROM slices INNER JOIN leases USING (slice_id) -JOIN nodes USING (node_id); - --- only the relevant leases -CREATE OR REPLACE VIEW view_leases AS -SELECT * FROM view_all_leases -WHERE node_type = 'reservable'; - - --------------------------------------------------- -UPDATE plc_db_version SET subversion = 101; diff --git a/migrations/102-down-isvalid.sql b/migrations/102-down-isvalid.sql deleted file mode 100644 index e147fd1..0000000 --- a/migrations/102-down-isvalid.sql +++ /dev/null @@ -1,88 +0,0 @@ -ALTER TABLE nodes DROP COLUMN last_download; -ALTER TABLE nodes DROP COLUMN last_pcu_reboot; -ALTER TABLE nodes DROP COLUMN last_pcu_confirmation; - -ALTER TABLE pcus DROP COLUMN last_updated timestamp without time zone; - -ALTER TABLE interfaces DROP COLUMN last_updated timestamp without time zone; - -DROP VIEW view_nodes; -CREATE OR REPLACE VIEW view_nodes AS -SELECT -nodes.node_id, -nodes.node_type, -nodes.hostname, -nodes.site_id, -nodes.boot_state, -nodes.run_level, -nodes.deleted, -nodes.model, -nodes.boot_nonce, -nodes.version, -nodes.verified, -nodes.ssh_rsa_key, -nodes.key, -CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, -CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, -peer_node.peer_id, -peer_node.peer_node_id, -COALESCE((SELECT interface_ids FROM node_interfaces - WHERE node_interfaces.node_id = nodes.node_id), '{}') -AS interface_ids, -COALESCE((SELECT nodegroup_ids FROM node_nodegroups - WHERE node_nodegroups.node_id = nodes.node_id), '{}') -AS nodegroup_ids, -COALESCE((SELECT slice_ids FROM node_slices - WHERE node_slices.node_id = nodes.node_id), '{}') -AS slice_ids, -COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist - WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') -AS slice_ids_whitelist, -COALESCE((SELECT pcu_ids FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS pcu_ids, -COALESCE((SELECT ports FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS ports, -COALESCE((SELECT conf_file_ids FROM node_conf_files - WHERE node_conf_files.node_id = nodes.node_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_tag_ids FROM node_tags - WHERE node_tags.node_id = nodes.node_id), '{}') -AS node_tag_ids, -node_session.session_id AS session -FROM nodes -LEFT JOIN peer_node USING (node_id) -LEFT JOIN node_session USING (node_id); - -DROP VIEW view_pcus; -CREATE OR REPLACE VIEW view_pcus AS -SELECT -pcus.*, -COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, -COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports -FROM pcus; - - -DROP VIEW view_interfaces; -CREATE OR REPLACE VIEW view_interfaces AS -SELECT -interfaces.interface_id, -interfaces.node_id, -interfaces.is_primary, -interfaces.type, -interfaces.method, -interfaces.ip, -interfaces.mac, -interfaces.gateway, -interfaces.network, -interfaces.broadcast, -interfaces.netmask, -interfaces.dns1, -interfaces.dns2, -interfaces.bwlimit, -interfaces.hostname, -COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids -FROM interfaces; - diff --git a/migrations/102-up-isvalid.sql b/migrations/102-up-isvalid.sql deleted file mode 100644 index c1bd5c2..0000000 --- a/migrations/102-up-isvalid.sql +++ /dev/null @@ -1,106 +0,0 @@ -ALTER TABLE nodes ADD COLUMN last_boot timestamp without time zone; -ALTER TABLE nodes ADD COLUMN last_download timestamp without time zone; -ALTER TABLE nodes ADD COLUMN last_pcu_reboot timestamp without time zone; -ALTER TABLE nodes ADD COLUMN last_pcu_confirmation timestamp without time zone; - -ALTER TABLE pcus ADD COLUMN last_updated timestamp without time zone; - -ALTER TABLE interfaces ADD COLUMN last_updated timestamp without time zone; - -DROP VIEW view_nodes; -CREATE OR REPLACE VIEW view_nodes AS -SELECT -nodes.node_id, -nodes.node_type, -nodes.hostname, -nodes.site_id, -nodes.boot_state, -nodes.run_level, -nodes.deleted, -nodes.model, -nodes.boot_nonce, -nodes.version, -nodes.verified, -nodes.ssh_rsa_key, -nodes.key, -CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, -CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, -CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, -CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, -CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, -CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, -peer_node.peer_id, -peer_node.peer_node_id, -COALESCE((SELECT interface_ids FROM node_interfaces - WHERE node_interfaces.node_id = nodes.node_id), '{}') -AS interface_ids, -COALESCE((SELECT nodegroup_ids FROM node_nodegroups - WHERE node_nodegroups.node_id = nodes.node_id), '{}') -AS nodegroup_ids, -COALESCE((SELECT slice_ids FROM node_slices - WHERE node_slices.node_id = nodes.node_id), '{}') -AS slice_ids, -COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist - WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') -AS slice_ids_whitelist, -COALESCE((SELECT pcu_ids FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS pcu_ids, -COALESCE((SELECT ports FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS ports, -COALESCE((SELECT conf_file_ids FROM node_conf_files - WHERE node_conf_files.node_id = nodes.node_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_tag_ids FROM node_tags - WHERE node_tags.node_id = nodes.node_id), '{}') -AS node_tag_ids, -node_session.session_id AS session -FROM nodes -LEFT JOIN peer_node USING (node_id) -LEFT JOIN node_session USING (node_id); - --------------------------------------------------------------------------------- -DROP VIEW view_pcus; -CREATE OR REPLACE VIEW view_pcus AS -SELECT -pcus.pcu_id, -pcus.site_id, -pcus.hostname, -pcus.ip, -pcus.protocol, -pcus.username, -pcus.password, -pcus.model, -pcus.notes, -CAST(date_part('epoch', pcus.last_updated) AS bigint) AS last_updated, -COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, -COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports -FROM pcus; - - -DROP VIEW view_interfaces; -CREATE OR REPLACE VIEW view_interfaces AS -SELECT -interfaces.interface_id, -interfaces.node_id, -interfaces.is_primary, -interfaces.type, -interfaces.method, -interfaces.ip, -interfaces.mac, -interfaces.gateway, -interfaces.network, -interfaces.broadcast, -interfaces.netmask, -interfaces.dns1, -interfaces.dns2, -interfaces.bwlimit, -interfaces.hostname, -CAST(date_part('epoch', interfaces.last_updated) AS bigint) AS last_updated, -COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids -FROM interfaces; - - -UPDATE plc_db_version SET subversion = 102; diff --git a/migrations/103-down-extensions.sql b/migrations/103-down-extensions.sql deleted file mode 100644 index a0797df..0000000 --- a/migrations/103-down-extensions.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE plc_db_extensions; diff --git a/migrations/103-up-extensions.sql b/migrations/103-up-extensions.sql deleted file mode 100644 index c411049..0000000 --- a/migrations/103-up-extensions.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE plc_db_extensions ( - name text NOT NULL PRIMARY KEY, - version integer NOT NULL -) WITH OIDS; - -UPDATE plc_db_version SET subversion = 103; diff --git a/migrations/104-down-noderole.sql b/migrations/104-down-noderole.sql deleted file mode 100644 index b8b4adb..0000000 --- a/migrations/104-down-noderole.sql +++ /dev/null @@ -1,28 +0,0 @@ --- recreate the min_role_id column -ALTER TABLE tag_types ADD COLUMN min_role_id integer REFERENCES roles; - --- compute the highest role available for each tag_type and store it as min_role_id -CREATE OR REPLACE VIEW tag_type_max_role_id AS -SELECT tag_type_id, max(role_id) from tag_type_role GROUP BY tag_type_id; - --- tag_types that have at least one role in the new model get the max -UPDATE tag_types -SET min_role_id = tag_type_max_role_id.max -FROM tag_type_max_role_id WHERE tag_type_max_role_id.tag_type_id = tag_types.tag_type_id; - --- the ones with no roles end up with min_role_id=10 -UPDATE tag_types -SET min_role_id=10 -WHERE min_role_id IS NULL; - -DELETE VIEW tag_type_max_role_id; - -DROP TABLE tag_type_role CASCADE; --- done by cascade ---DROP VIEW view_tag_types; ---DROP VIEW tag_type_roles; - -DELETE from roles WHERE name='node'; - --------------------- -UPDATE plc_db_version SET subversion = 103; diff --git a/migrations/104-up-noderole.sql b/migrations/104-up-noderole.sql deleted file mode 100644 index 95c0e93..0000000 --- a/migrations/104-up-noderole.sql +++ /dev/null @@ -1,124 +0,0 @@ --- changing the permission model on tags --- we replace the single 'min_role_id' field attached to tag_types --- with a set of roles - - --- create a separate table to keep the tag-type x role relationship -CREATE TABLE tag_type_role ( - tag_type_id integer REFERENCES tag_types NOT NULL, -- tag_type ID - role_id integer REFERENCES roles NOT NULL, -- role ID - PRIMARY KEY (tag_type_id, role_id) -); -CREATE INDEX tag_type_role_tag_type_id_idx ON tag_type_role (tag_type_id); -CREATE INDEX tag_type_role_role_id_idx ON tag_type_role (role_id); - --- fill this from the former min_role_id field in the tag_types table --- add all roles lower or equal to the min_role_id -INSERT INTO tag_type_role ("tag_type_id","role_id") SELECT tag_type_id,role_id FROM tag_types,roles where role_id<=min_role_id; - --- we can now drop the min_role_id column -ALTER TABLE tag_types DROP COLUMN min_role_id CASCADE; - --- create views to expose roles -CREATE OR REPLACE VIEW tag_type_roles AS -SELECT tag_type_id, -array_accum(role_id) AS role_ids, -array_accum(roles.name) AS roles -FROM tag_type_role -LEFT JOIN roles USING (role_id) -GROUP BY tag_type_id; - -CREATE OR REPLACE VIEW view_tag_types AS -SELECT -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -COALESCE((SELECT role_ids FROM tag_type_roles WHERE tag_type_roles.tag_type_id = tag_types.tag_type_id), '{}') AS role_ids, -COALESCE((SELECT roles FROM tag_type_roles WHERE tag_type_roles.tag_type_id = tag_types.tag_type_id), '{}') AS roles -FROM tag_types; - - --- remove min_role_id from the object views -CREATE OR REPLACE VIEW view_person_tags AS -SELECT -person_tag.person_tag_id, -person_tag.person_id, -persons.email, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -person_tag.value -FROM person_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN persons USING (person_id); - -CREATE OR REPLACE VIEW view_site_tags AS -SELECT -site_tag.site_tag_id, -site_tag.site_id, -sites.login_base, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -site_tag.value -FROM site_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN sites USING (site_id); - -CREATE OR REPLACE VIEW view_interface_tags AS -SELECT -interface_tag.interface_tag_id, -interface_tag.interface_id, -interfaces.ip, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -interface_tag.value -FROM interface_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN interfaces USING (interface_id); - -CREATE OR REPLACE VIEW view_node_tags AS -SELECT -node_tag.node_tag_id, -node_tag.node_id, -nodes.hostname, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -node_tag.value -FROM node_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN nodes USING (node_id); - -CREATE OR REPLACE VIEW view_slice_tags AS -SELECT -slice_tag.slice_tag_id, -slice_tag.slice_id, -slice_tag.node_id, -slice_tag.nodegroup_id, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -slice_tag.value, -slices.name -FROM slice_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN slices USING (slice_id); - --- same for ilinks -CREATE OR REPLACE VIEW view_ilinks AS -SELECT * FROM tag_types -INNER JOIN ilink USING (tag_type_id); - --- use this to allow nodes to set slice tags -INSERT INTO roles (role_id, name) VALUES (50, 'node'); - --------------------- -UPDATE plc_db_version SET subversion = 104; diff --git a/migrations/105-down-timespent.sql b/migrations/105-down-timespent.sql deleted file mode 100644 index d2d1f2c..0000000 --- a/migrations/105-down-timespent.sql +++ /dev/null @@ -1,60 +0,0 @@ -ALTER TABLE nodes DROP COLUMN last_time_spent_online CASCADE; -ALTER TABLE nodes DROP COLUMN last_time_spent_offline CASCADE; - -DROP VIEW view_nodes; -CREATE OR REPLACE VIEW view_nodes AS -SELECT -nodes.node_id, -nodes.node_type, -nodes.hostname, -nodes.site_id, -nodes.boot_state, -nodes.run_level, -nodes.deleted, -nodes.model, -nodes.boot_nonce, -nodes.version, -nodes.verified, -nodes.ssh_rsa_key, -nodes.key, -CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, -CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, -CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, -CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, -CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, -CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, -peer_node.peer_id, -peer_node.peer_node_id, -COALESCE((SELECT interface_ids FROM node_interfaces - WHERE node_interfaces.node_id = nodes.node_id), '{}') -AS interface_ids, -COALESCE((SELECT nodegroup_ids FROM node_nodegroups - WHERE node_nodegroups.node_id = nodes.node_id), '{}') -AS nodegroup_ids, -COALESCE((SELECT slice_ids FROM node_slices - WHERE node_slices.node_id = nodes.node_id), '{}') -AS slice_ids, -COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist - WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') -AS slice_ids_whitelist, -COALESCE((SELECT pcu_ids FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS pcu_ids, -COALESCE((SELECT ports FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS ports, -COALESCE((SELECT conf_file_ids FROM node_conf_files - WHERE node_conf_files.node_id = nodes.node_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_tag_ids FROM node_tags - WHERE node_tags.node_id = nodes.node_id), '{}') -AS node_tag_ids, -node_session.session_id AS session -FROM nodes -LEFT JOIN peer_node USING (node_id) -LEFT JOIN node_session USING (node_id); - --------------------------------------------------------------------------------- - -UPDATE plc_db_version SET subversion = 104; diff --git a/migrations/105-up-timespent.sql b/migrations/105-up-timespent.sql deleted file mode 100644 index 2dbdfb9..0000000 --- a/migrations/105-up-timespent.sql +++ /dev/null @@ -1,62 +0,0 @@ -ALTER TABLE nodes ADD COLUMN last_time_spent_online integer; -ALTER TABLE nodes ADD COLUMN last_time_spent_offline integer; - -DROP VIEW view_nodes; -CREATE OR REPLACE VIEW view_nodes AS -SELECT -nodes.node_id, -nodes.node_type, -nodes.hostname, -nodes.site_id, -nodes.boot_state, -nodes.run_level, -nodes.deleted, -nodes.model, -nodes.boot_nonce, -nodes.version, -nodes.verified, -nodes.ssh_rsa_key, -nodes.key, -CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, -CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, -CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, -CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, -CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, -CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, -nodes.last_time_spent_online, -nodes.last_time_spent_offline, -peer_node.peer_id, -peer_node.peer_node_id, -COALESCE((SELECT interface_ids FROM node_interfaces - WHERE node_interfaces.node_id = nodes.node_id), '{}') -AS interface_ids, -COALESCE((SELECT nodegroup_ids FROM node_nodegroups - WHERE node_nodegroups.node_id = nodes.node_id), '{}') -AS nodegroup_ids, -COALESCE((SELECT slice_ids FROM node_slices - WHERE node_slices.node_id = nodes.node_id), '{}') -AS slice_ids, -COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist - WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') -AS slice_ids_whitelist, -COALESCE((SELECT pcu_ids FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS pcu_ids, -COALESCE((SELECT ports FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS ports, -COALESCE((SELECT conf_file_ids FROM node_conf_files - WHERE node_conf_files.node_id = nodes.node_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_tag_ids FROM node_tags - WHERE node_tags.node_id = nodes.node_id), '{}') -AS node_tag_ids, -node_session.session_id AS session -FROM nodes -LEFT JOIN peer_node USING (node_id) -LEFT JOIN node_session USING (node_id); - --------------------------------------------------------------------------------- - -UPDATE plc_db_version SET subversion = 105; diff --git a/migrations/README.txt b/migrations/README.txt deleted file mode 100644 index 0d14ca7..0000000 --- a/migrations/README.txt +++ /dev/null @@ -1,13 +0,0 @@ -Store here migration scripts, named --up-.sql - handled as a sql script to be run against planetlab5, or --up-.sh - which is assumed to be a shell script and is run as is - -Another assumption is that - * nnn-up- script will set subversion number to - * nnn-down script will set subversion number to -1 - -=== -See the migration script in plc.d/db for how this is used -=== diff --git a/migrations/extract-views.py b/migrations/extract-views.py deleted file mode 100755 index 0daed51..0000000 --- a/migrations/extract-views.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -import sys -import re - -class Schema: - - def __init__ (self,input,output=None): - self.input=input - self.output=output - - # left part is non-greedy - comment=re.compile("(.*?)--.*") - spaces=re.compile("^\s+(\S.*)") - view=re.compile("(?i)\s*create\s+(or\s+replace)?\s+view.*") - - def parse (self): - if self.output: - outfile = open(self.output,"w") - else: - outfile = sys.stdout - contents = file(self.input).read() - parts=contents.split(";") - for part in parts: - # normalize: remove comments, linebreaks, trailing spaces.. - normalized='' - lines=part.split('\n'); - out_lines=[] - for line in lines: - # remove comment - match=Schema.comment.match(line) - if match: - line=match.group(1) - out_lines.append(line) - # get them together - out_line = " ".join(out_lines) - # remove trailing spaces - match=Schema.spaces.match(out_line) - if match: - out_line=match.group(1) - match=Schema.view.match(out_line) - if match: - print >>outfile, out_line,';' - if outfile != sys.stdout: - outfile.close() - -if __name__ == '__main__': - if len(sys.argv) not in [2,3]: - print 'Usage:',sys.argv[0],'input [output]' - sys.exit(1) - input=sys.argv[1] - try: - output=sys.argv[2] - except: - output=None - Schema(input,output).parse() - diff --git a/planetlab5.sql b/planetlab5.sql deleted file mode 100644 index 95eb02b..0000000 --- a/planetlab5.sql +++ /dev/null @@ -1,1369 +0,0 @@ --- --- PlanetLab Central database schema --- Version 5, PostgreSQL --- --- Aaron Klingaman --- Reid Moran --- Mark Huang --- Tony Mack --- Thierry Parmentelat --- --- Copyright (C) 2006 The Trustees of Princeton University --- --- NOTE: this file was first created for version 4.3, the filename might be confusing --- - -SET client_encoding = 'UNICODE'; - --------------------------------------------------------------------------------- --- Version --------------------------------------------------------------------------------- - --- Database version -CREATE TABLE plc_db_version ( - version integer NOT NULL, - subversion integer NOT NULL DEFAULT 0 -) WITH OIDS; - --- the migration scripts do not use the major 'version' number --- so 5.0 sets subversion at 100 --- in case your database misses the site and persons tags feature, --- you might wish to first upgrade to 4.3-rc16 before moving to some 5.0 --- or run the up script here --- http://svn.planet-lab.org/svn/PLCAPI/branches/4.3/migrations/ - -INSERT INTO plc_db_version (version, subversion) VALUES (5, 100); - --------------------------------------------------------------------------------- --- Aggregates and store procedures --------------------------------------------------------------------------------- - --- Like MySQL GROUP_CONCAT(), this function aggregates values into a --- PostgreSQL array. -CREATE AGGREGATE array_accum ( - sfunc = array_append, - basetype = anyelement, - stype = anyarray, - initcond = '{}' -); - --------------------------------------------------------------------------------- --- Roles --------------------------------------------------------------------------------- - --- Valid account roles -CREATE TABLE roles ( - role_id integer PRIMARY KEY, -- Role identifier - name text UNIQUE NOT NULL -- Role symbolic name -) WITH OIDS; -INSERT INTO roles (role_id, name) VALUES (10, 'admin'); -INSERT INTO roles (role_id, name) VALUES (20, 'pi'); -INSERT INTO roles (role_id, name) VALUES (30, 'user'); -INSERT INTO roles (role_id, name) VALUES (40, 'tech'); - --------------------------------------------------------------------------------- --- The building block for attaching tags --------------------------------------------------------------------------------- -CREATE TABLE tag_types ( - - tag_type_id serial PRIMARY KEY, -- ID - tagname text UNIQUE NOT NULL, -- Tag Name - description text, -- Optional Description --- this is deprecated -- see migrations/104* --- starting with subversion 104, a tag type has a SET OF roles attached to it - min_role_id integer REFERENCES roles DEFAULT 10, -- set minimal role required - category text NOT NULL DEFAULT 'general' -- Free text for grouping tags together -) WITH OIDS; - --------------------------------------------------------------------------------- --- Accounts --------------------------------------------------------------------------------- - --- Accounts -CREATE TABLE persons ( - -- Mandatory - person_id serial PRIMARY KEY, -- Account identifier - email text NOT NULL, -- E-mail address - first_name text NOT NULL, -- First name - last_name text NOT NULL, -- Last name - deleted boolean NOT NULL DEFAULT false, -- Has been deleted - enabled boolean NOT NULL DEFAULT false, -- Has been disabled - - password text NOT NULL DEFAULT 'nopass', -- Password (md5crypted) - verification_key text, -- Reset password key - verification_expires timestamp without time zone, - - -- Optional - title text, -- Honorific - phone text, -- Telephone number - url text, -- Home page - bio text, -- Biography - - -- Timestamps - date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -) WITH OIDS; -CREATE INDEX persons_email_idx ON persons (email); - --------------------------------------------------------------------------------- --- person tags --------------------------------------------------------------------------------- -CREATE TABLE person_tag ( - person_tag_id serial PRIMARY KEY, -- ID - person_id integer REFERENCES persons NOT NULL, -- person id - tag_type_id integer REFERENCES tag_types, -- tag type id - value text -- value attached -) WITH OIDS; - -CREATE OR REPLACE VIEW person_tags AS -SELECT person_id, -array_accum(person_tag_id) AS person_tag_ids -FROM person_tag -GROUP BY person_id; - -CREATE OR REPLACE VIEW view_person_tags AS -SELECT -person_tag.person_tag_id, -person_tag.person_id, -persons.email, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -tag_types.min_role_id, -person_tag.value -FROM person_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN persons USING (person_id); - --------------------------------------------------------------------------------- --- Sites --------------------------------------------------------------------------------- - --- Sites -CREATE TABLE sites ( - -- Mandatory - site_id serial PRIMARY KEY, -- Site identifier - login_base text NOT NULL, -- Site slice prefix - name text NOT NULL, -- Site name - abbreviated_name text NOT NULL, -- Site abbreviated name - enabled boolean NOT NULL Default true, -- Is this site enabled - deleted boolean NOT NULL DEFAULT false, -- Has been deleted - is_public boolean NOT NULL DEFAULT true, -- Shows up in public lists - max_slices integer NOT NULL DEFAULT 0, -- Maximum number of slices - max_slivers integer NOT NULL DEFAULT 1000, -- Maximum number of instantiated slivers - - -- Optional - latitude real, - longitude real, - url text, - ext_consortium_id integer, -- external consortium id - - -- Timestamps - date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -) WITH OIDS; -CREATE INDEX sites_login_base_idx ON sites (login_base); - --- Account site membership -CREATE TABLE person_site ( - person_id integer REFERENCES persons NOT NULL, -- Account identifier - site_id integer REFERENCES sites NOT NULL, -- Site identifier - is_primary boolean NOT NULL DEFAULT false, -- Is the primary site for this account - PRIMARY KEY (person_id, site_id) -); -CREATE INDEX person_site_person_id_idx ON person_site (person_id); -CREATE INDEX person_site_site_id_idx ON person_site (site_id); - --- Ordered by primary site first -CREATE OR REPLACE VIEW person_site_ordered AS -SELECT person_id, site_id -FROM person_site -ORDER BY is_primary DESC; - --- Sites that each person is a member of -CREATE OR REPLACE VIEW person_sites AS -SELECT person_id, -array_accum(site_id) AS site_ids -FROM person_site_ordered -GROUP BY person_id; - --- Accounts at each site -CREATE OR REPLACE VIEW site_persons AS -SELECT site_id, -array_accum(person_id) AS person_ids -FROM person_site -GROUP BY site_id; - --------------------------------------------------------------------------------- --- site tags --------------------------------------------------------------------------------- - -CREATE TABLE site_tag ( - site_tag_id serial PRIMARY KEY, -- ID - site_id integer REFERENCES sites NOT NULL, -- site id - tag_type_id integer REFERENCES tag_types, -- tag type id - value text -- value attached -) WITH OIDS; - -CREATE OR REPLACE VIEW site_tags AS -SELECT site_id, -array_accum(site_tag_id) AS site_tag_ids -FROM site_tag -GROUP BY site_id; - -CREATE OR REPLACE VIEW view_site_tags AS -SELECT -site_tag.site_tag_id, -site_tag.site_id, -sites.login_base, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -tag_types.min_role_id, -site_tag.value -FROM site_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN sites USING (site_id); - --------------------------------------------------------------------------------- --- Mailing Addresses --------------------------------------------------------------------------------- - -CREATE TABLE address_types ( - address_type_id serial PRIMARY KEY, -- Address type identifier - name text UNIQUE NOT NULL, -- Address type - description text -- Address type description -) WITH OIDS; - --- Multi-rows insertion "insert .. values (row1), (row2)" is not supported by pgsql-8.1 --- 'Billing' Used to be 'Site' -INSERT INTO address_types (name) VALUES ('Personal'); -INSERT INTO address_types (name) VALUES ('Shipping'); -INSERT INTO address_types (name) VALUES ('Billing'); - --- Mailing addresses -CREATE TABLE addresses ( - address_id serial PRIMARY KEY, -- Address identifier - line1 text NOT NULL, -- Address line 1 - line2 text, -- Address line 2 - line3 text, -- Address line 3 - city text NOT NULL, -- City - state text NOT NULL, -- State or province - postalcode text NOT NULL, -- Postal code - country text NOT NULL -- Country -) WITH OIDS; - --- Each mailing address can be one of several types -CREATE TABLE address_address_type ( - address_id integer REFERENCES addresses NOT NULL, -- Address identifier - address_type_id integer REFERENCES address_types NOT NULL, -- Address type - PRIMARY KEY (address_id, address_type_id) -) WITH OIDS; -CREATE INDEX address_address_type_address_id_idx ON address_address_type (address_id); -CREATE INDEX address_address_type_address_type_id_idx ON address_address_type (address_type_id); - -CREATE OR REPLACE VIEW address_address_types AS -SELECT address_id, -array_accum(address_type_id) AS address_type_ids, -array_accum(address_types.name) AS address_types -FROM address_address_type -LEFT JOIN address_types USING (address_type_id) -GROUP BY address_id; - -CREATE TABLE site_address ( - site_id integer REFERENCES sites NOT NULL, -- Site identifier - address_id integer REFERENCES addresses NOT NULL, -- Address identifier - PRIMARY KEY (site_id, address_id) -) WITH OIDS; -CREATE INDEX site_address_site_id_idx ON site_address (site_id); -CREATE INDEX site_address_address_id_idx ON site_address (address_id); - -CREATE OR REPLACE VIEW site_addresses AS -SELECT site_id, -array_accum(address_id) AS address_ids -FROM site_address -GROUP BY site_id; - --------------------------------------------------------------------------------- --- Authentication Keys --------------------------------------------------------------------------------- - --- Valid key types -CREATE TABLE key_types ( - key_type text PRIMARY KEY -- Key type -) WITH OIDS; -INSERT INTO key_types (key_type) VALUES ('ssh'); - --- Authentication keys -CREATE TABLE keys ( - key_id serial PRIMARY KEY, -- Key identifier - key_type text REFERENCES key_types NOT NULL, -- Key type - key text NOT NULL, -- Key material - is_blacklisted boolean NOT NULL DEFAULT false -- Has been blacklisted -) WITH OIDS; - --- Account authentication key(s) -CREATE TABLE person_key ( - key_id integer REFERENCES keys PRIMARY KEY, -- Key identifier - person_id integer REFERENCES persons NOT NULL -- Account identifier -) WITH OIDS; -CREATE INDEX person_key_person_id_idx ON person_key (person_id); - -CREATE OR REPLACE VIEW person_keys AS -SELECT person_id, -array_accum(key_id) AS key_ids -FROM person_key -GROUP BY person_id; - --------------------------------------------------------------------------------- --- Account roles --------------------------------------------------------------------------------- - -CREATE TABLE person_role ( - person_id integer REFERENCES persons NOT NULL, -- Account identifier - role_id integer REFERENCES roles NOT NULL, -- Role identifier - PRIMARY KEY (person_id, role_id) -) WITH OIDS; -CREATE INDEX person_role_person_id_idx ON person_role (person_id); - --- Account roles -CREATE OR REPLACE VIEW person_roles AS -SELECT person_id, -array_accum(role_id) AS role_ids, -array_accum(roles.name) AS roles -FROM person_role -LEFT JOIN roles USING (role_id) -GROUP BY person_id; - --------------------------------------------------------------------------------- --- Nodes --------------------------------------------------------------------------------- - --- Valid node boot states (Nodes.py expect max length to be 20) -CREATE TABLE boot_states ( - boot_state text PRIMARY KEY -) WITH OIDS; -INSERT INTO boot_states (boot_state) VALUES ('boot'); -INSERT INTO boot_states (boot_state) VALUES ('safeboot'); -INSERT INTO boot_states (boot_state) VALUES ('reinstall'); -INSERT INTO boot_states (boot_state) VALUES ('disabled'); - -CREATE TABLE run_levels ( - run_level text PRIMARY KEY -) WITH OIDS; -INSERT INTO run_levels (run_level) VALUES ('boot'); -INSERT INTO run_levels (run_level) VALUES ('safeboot'); -INSERT INTO run_levels (run_level) VALUES ('failboot'); -INSERT INTO run_levels (run_level) VALUES ('reinstall'); - --- Known node types (Nodes.py expect max length to be 20) -CREATE TABLE node_types ( - node_type text PRIMARY KEY -) WITH OIDS; -INSERT INTO node_types (node_type) VALUES ('regular'); --- old dummynet stuff, to be removed -INSERT INTO node_types (node_type) VALUES ('dummynet'); - --- Nodes -CREATE TABLE nodes ( - -- Mandatory - node_id serial PRIMARY KEY, -- Node identifier - node_type text REFERENCES node_types -- node type - DEFAULT 'regular', - - hostname text NOT NULL, -- Node hostname - site_id integer REFERENCES sites NOT NULL, -- At which site - boot_state text REFERENCES boot_states NOT NULL -- Node boot state - DEFAULT 'reinstall', - run_level text REFERENCES run_levels DEFAULT NULL, -- Node Run Level - deleted boolean NOT NULL DEFAULT false, -- Is deleted - - -- Optional - model text, -- Hardware make and model - boot_nonce text, -- Random nonce updated by Boot Manager - version text, -- Boot CD version string updated by Boot Manager - ssh_rsa_key text, -- SSH host key updated by Boot Manager - key text, -- Node key generated when boot file is downloaded - verified boolean NOT NULL DEFAULT false, -- whether or not the node & pcu are verified - - -- Timestamps - date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_download timestamp without time zone, - last_pcu_reboot timestamp without time zone, - last_pcu_confirmation timestamp without time zone, - last_contact timestamp without time zone -) WITH OIDS; -CREATE INDEX nodes_hostname_idx ON nodes (hostname); -CREATE INDEX nodes_site_id_idx ON nodes (site_id); - --- Nodes at each site -CREATE OR REPLACE VIEW site_nodes AS -SELECT site_id, -array_accum(node_id) AS node_ids -FROM nodes -WHERE deleted IS false -GROUP BY site_id; - --------------------------------------------------------------------------------- --- node tags --------------------------------------------------------------------------------- - -CREATE TABLE node_tag ( - node_tag_id serial PRIMARY KEY, -- ID - node_id integer REFERENCES nodes NOT NULL, -- node id - tag_type_id integer REFERENCES tag_types, -- tag type id - value text -- value attached -) WITH OIDS; - --------------------------------------------------------------------------------- --- (network) interfaces --------------------------------------------------------------------------------- - --- Valid network addressing schemes -CREATE TABLE network_types ( - type text PRIMARY KEY -- Addressing scheme -) WITH OIDS; -INSERT INTO network_types (type) VALUES ('ipv4'); - --- Valid network configuration methods -CREATE TABLE network_methods ( - method text PRIMARY KEY -- Configuration method -) WITH OIDS; - -INSERT INTO network_methods (method) VALUES ('static'); -INSERT INTO network_methods (method) VALUES ('dhcp'); -INSERT INTO network_methods (method) VALUES ('proxy'); -INSERT INTO network_methods (method) VALUES ('tap'); -INSERT INTO network_methods (method) VALUES ('ipmi'); -INSERT INTO network_methods (method) VALUES ('unknown'); - --- Network interfaces -CREATE TABLE interfaces ( - -- Mandatory - interface_id serial PRIMARY KEY, -- Network interface identifier - node_id integer REFERENCES nodes NOT NULL, -- Which node - is_primary boolean NOT NULL DEFAULT false, -- Is the primary interface for this node - type text REFERENCES network_types NOT NULL, -- Addressing scheme - method text REFERENCES network_methods NOT NULL, -- Configuration method - - -- Optional, depending on type and method - ip text, -- IP address - mac text, -- MAC address - gateway text, -- Default gateway address - network text, -- Network address - broadcast text, -- Network broadcast address - netmask text, -- Network mask - dns1 text, -- Primary DNS server - dns2 text, -- Secondary DNS server - bwlimit integer, -- Bandwidth limit in bps - hostname text, -- Hostname of this interface - last_updated timestamp without time zone -- When the interface was last updated -) WITH OIDS; -CREATE INDEX interfaces_node_id_idx ON interfaces (node_id); - --- Ordered by primary interface first -CREATE OR REPLACE VIEW interfaces_ordered AS -SELECT node_id, interface_id -FROM interfaces -ORDER BY is_primary DESC; - --- Network interfaces on each node -CREATE OR REPLACE VIEW node_interfaces AS -SELECT node_id, -array_accum(interface_id) AS interface_ids -FROM interfaces_ordered -GROUP BY node_id; - --------------------------------------------------------------------------------- --- Interface tags (formerly known as interface settings) --------------------------------------------------------------------------------- - -CREATE TABLE interface_tag ( - interface_tag_id serial PRIMARY KEY, -- Interface Setting Identifier - interface_id integer REFERENCES interfaces NOT NULL,-- the interface this applies to - tag_type_id integer REFERENCES tag_types NOT NULL, -- the setting type - value text -- value attached -) WITH OIDS; - -CREATE OR REPLACE VIEW interface_tags AS -SELECT interface_id, -array_accum(interface_tag_id) AS interface_tag_ids -FROM interface_tag -GROUP BY interface_id; - -CREATE OR REPLACE VIEW view_interface_tags AS -SELECT -interface_tag.interface_tag_id, -interface_tag.interface_id, -interfaces.ip, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -tag_types.min_role_id, -interface_tag.value -FROM interface_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN interfaces USING (interface_id); - -CREATE OR REPLACE VIEW view_interfaces AS -SELECT -interfaces.interface_id, -interfaces.node_id, -interfaces.is_primary, -interfaces.type, -interfaces.method, -interfaces.ip, -interfaces.mac, -interfaces.gateway, -interfaces.network, -interfaces.broadcast, -interfaces.netmask, -interfaces.dns1, -interfaces.dns2, -interfaces.bwlimit, -interfaces.hostname, -CAST(date_part('epoch', interfaces.last_updated) AS bigint) AS last_updated, -COALESCE((SELECT interface_tag_ids FROM interface_tags WHERE interface_tags.interface_id = interfaces.interface_id), '{}') AS interface_tag_ids -FROM interfaces; - --------------------------------------------------------------------------------- --- ilinks : links between interfaces --------------------------------------------------------------------------------- -CREATE TABLE ilink ( - ilink_id serial PRIMARY KEY, -- id - tag_type_id integer REFERENCES tag_types, -- id of the tag type - src_interface_id integer REFERENCES interfaces not NULL, -- id of src interface - dst_interface_id integer REFERENCES interfaces NOT NULL, -- id of dst interface - value text -- optional value on the link -) WITH OIDS; - -CREATE OR REPLACE VIEW view_ilinks AS -SELECT * FROM tag_types -INNER JOIN ilink USING (tag_type_id); - --- xxx TODO : expose to view_interfaces the set of ilinks a given interface is part of --- this is needed for properly deleting these ilinks when an interface gets deleted --- as this is not done yet, it prevents DeleteInterface, thus DeleteNode, thus DeleteSite --- from working correctly when an iLink is set - --------------------------------------------------------------------------------- --- Node groups --------------------------------------------------------------------------------- - --- Node groups -CREATE TABLE nodegroups ( - nodegroup_id serial PRIMARY KEY, -- Group identifier - groupname text UNIQUE NOT NULL, -- Group name - tag_type_id integer REFERENCES tag_types, -- node is in nodegroup if it has this tag defined - -- can be null, make management faster & easier - value text -- with this value attached -) WITH OIDS; - --- xxx - first rough implem. similar to former semantics but might be slow -CREATE OR REPLACE VIEW nodegroup_node AS -SELECT nodegroup_id, node_id -FROM tag_types -JOIN node_tag -USING (tag_type_id) -JOIN nodegroups -USING (tag_type_id,value); - -CREATE OR REPLACE VIEW nodegroup_nodes AS -SELECT nodegroup_id, -array_accum(node_id) AS node_ids -FROM nodegroup_node -GROUP BY nodegroup_id; - --- Node groups that each node is a member of -CREATE OR REPLACE VIEW node_nodegroups AS -SELECT node_id, -array_accum(nodegroup_id) AS nodegroup_ids -FROM nodegroup_node -GROUP BY node_id; - --------------------------------------------------------------------------------- --- Node configuration files --------------------------------------------------------------------------------- - -CREATE TABLE conf_files ( - conf_file_id serial PRIMARY KEY, -- Configuration file identifier - enabled bool NOT NULL DEFAULT true, -- Configuration file is active - source text NOT NULL, -- Relative path on the boot server - -- where file can be downloaded - dest text NOT NULL, -- Absolute path where file should be installed - file_permissions text NOT NULL DEFAULT '0644', -- chmod(1) permissions - file_owner text NOT NULL DEFAULT 'root', -- chown(1) owner - file_group text NOT NULL DEFAULT 'root', -- chgrp(1) owner - preinstall_cmd text, -- Shell command to execute prior to installing - postinstall_cmd text, -- Shell command to execute after installing - error_cmd text, -- Shell command to execute if any error occurs - ignore_cmd_errors bool NOT NULL DEFAULT false, -- Install file anyway even if an error occurs - always_update bool NOT NULL DEFAULT false -- Always attempt to install file even if unchanged -) WITH OIDS; - -CREATE TABLE conf_file_node ( - conf_file_id integer REFERENCES conf_files NOT NULL, -- Configuration file identifier - node_id integer REFERENCES nodes NOT NULL, -- Node identifier - PRIMARY KEY (conf_file_id, node_id) -); -CREATE INDEX conf_file_node_conf_file_id_idx ON conf_file_node (conf_file_id); -CREATE INDEX conf_file_node_node_id_idx ON conf_file_node (node_id); - --- Nodes linked to each configuration file -CREATE OR REPLACE VIEW conf_file_nodes AS -SELECT conf_file_id, -array_accum(node_id) AS node_ids -FROM conf_file_node -GROUP BY conf_file_id; - --- Configuration files linked to each node -CREATE OR REPLACE VIEW node_conf_files AS -SELECT node_id, -array_accum(conf_file_id) AS conf_file_ids -FROM conf_file_node -GROUP BY node_id; - -CREATE TABLE conf_file_nodegroup ( - conf_file_id integer REFERENCES conf_files NOT NULL, -- Configuration file identifier - nodegroup_id integer REFERENCES nodegroups NOT NULL, -- Node group identifier - PRIMARY KEY (conf_file_id, nodegroup_id) -); -CREATE INDEX conf_file_nodegroup_conf_file_id_idx ON conf_file_nodegroup (conf_file_id); -CREATE INDEX conf_file_nodegroup_nodegroup_id_idx ON conf_file_nodegroup (nodegroup_id); - --- Node groups linked to each configuration file -CREATE OR REPLACE VIEW conf_file_nodegroups AS -SELECT conf_file_id, -array_accum(nodegroup_id) AS nodegroup_ids -FROM conf_file_nodegroup -GROUP BY conf_file_id; - --- Configuration files linked to each node group -CREATE OR REPLACE VIEW nodegroup_conf_files AS -SELECT nodegroup_id, -array_accum(conf_file_id) AS conf_file_ids -FROM conf_file_nodegroup -GROUP BY nodegroup_id; - --------------------------------------------------------------------------------- --- Power control units (PCUs) --------------------------------------------------------------------------------- - -CREATE TABLE pcus ( - -- Mandatory - pcu_id serial PRIMARY KEY, -- PCU identifier - site_id integer REFERENCES sites NOT NULL, -- Site identifier - hostname text, -- Hostname, not necessarily unique - -- (multiple logical sites could use the same PCU) - ip text NOT NULL, -- IP, not necessarily unique - - -- Optional - protocol text, -- Protocol, e.g. ssh or https or telnet - username text, -- Username, if applicable - "password" text, -- Password, if applicable - model text, -- Model, e.g. BayTech or iPal - last_updated timestamp without time zone, - notes text -- Random notes -) WITH OIDS; -CREATE INDEX pcus_site_id_idx ON pcus (site_id); - -CREATE OR REPLACE VIEW site_pcus AS -SELECT site_id, -array_accum(pcu_id) AS pcu_ids -FROM pcus -GROUP BY site_id; - -CREATE TABLE pcu_node ( - pcu_id integer REFERENCES pcus NOT NULL, -- PCU identifier - node_id integer REFERENCES nodes NOT NULL, -- Node identifier - port integer NOT NULL, -- Port number - PRIMARY KEY (pcu_id, node_id), -- The same node cannot be controlled by different ports - UNIQUE (pcu_id, port) -- The same port cannot control multiple nodes -); -CREATE INDEX pcu_node_pcu_id_idx ON pcu_node (pcu_id); -CREATE INDEX pcu_node_node_id_idx ON pcu_node (node_id); - -CREATE OR REPLACE VIEW node_pcus AS -SELECT node_id, -array_accum(pcu_id) AS pcu_ids, -array_accum(port) AS ports -FROM pcu_node -GROUP BY node_id; - -CREATE OR REPLACE VIEW pcu_nodes AS -SELECT pcu_id, -array_accum(node_id) AS node_ids, -array_accum(port) AS ports -FROM pcu_node -GROUP BY pcu_id; - --------------------------------------------------------------------------------- --- Slices --------------------------------------------------------------------------------- - -CREATE TABLE slice_instantiations ( - instantiation text PRIMARY KEY -) WITH OIDS; -INSERT INTO slice_instantiations (instantiation) VALUES ('not-instantiated'); -- Placeholder slice -INSERT INTO slice_instantiations (instantiation) VALUES ('plc-instantiated'); -- Instantiated by Node Manager -INSERT INTO slice_instantiations (instantiation) VALUES ('delegated'); -- Manually instantiated -INSERT INTO slice_instantiations (instantiation) VALUES ('nm-controller'); -- NM Controller - --- Slices -CREATE TABLE slices ( - slice_id serial PRIMARY KEY, -- Slice identifier - site_id integer REFERENCES sites NOT NULL, -- Site identifier - - name text NOT NULL, -- Slice name - instantiation text REFERENCES slice_instantiations -- Slice state, e.g. plc-instantiated - NOT NULL DEFAULT 'plc-instantiated', - url text, -- Project URL - description text, -- Project description - - max_nodes integer NOT NULL DEFAULT 100, -- Maximum number of nodes that can be assigned to this slice - - creator_person_id integer REFERENCES persons, -- Creator - created timestamp without time zone NOT NULL -- Creation date - DEFAULT CURRENT_TIMESTAMP, - expires timestamp without time zone NOT NULL -- Expiration date - DEFAULT CURRENT_TIMESTAMP + '2 weeks', - - is_deleted boolean NOT NULL DEFAULT false -) WITH OIDS; -CREATE INDEX slices_site_id_idx ON slices (site_id); -CREATE INDEX slices_name_idx ON slices (name); - --- Slivers -CREATE TABLE slice_node ( - slice_id integer REFERENCES slices NOT NULL, -- Slice identifier - node_id integer REFERENCES nodes NOT NULL, -- Node identifier - PRIMARY KEY (slice_id, node_id) -) WITH OIDS; -CREATE INDEX slice_node_slice_id_idx ON slice_node (slice_id); -CREATE INDEX slice_node_node_id_idx ON slice_node (node_id); - --- Synonym for slice_node -CREATE OR REPLACE VIEW slivers AS -SELECT * FROM slice_node; - --- Nodes in each slice -CREATE OR REPLACE VIEW slice_nodes AS -SELECT slice_id, -array_accum(node_id) AS node_ids -FROM slice_node -GROUP BY slice_id; - --- Slices on each node -CREATE OR REPLACE VIEW node_slices AS -SELECT node_id, -array_accum(slice_id) AS slice_ids -FROM slice_node -GROUP BY node_id; - --- Slices at each site -CREATE OR REPLACE VIEW site_slices AS -SELECT site_id, -array_accum(slice_id) AS slice_ids -FROM slices -WHERE is_deleted is false -GROUP BY site_id; - --- Slice membership -CREATE TABLE slice_person ( - slice_id integer REFERENCES slices NOT NULL, -- Slice identifier - person_id integer REFERENCES persons NOT NULL, -- Account identifier - PRIMARY KEY (slice_id, person_id) -) WITH OIDS; -CREATE INDEX slice_person_slice_id_idx ON slice_person (slice_id); -CREATE INDEX slice_person_person_id_idx ON slice_person (person_id); - --- Members of the slice -CREATE OR REPLACE VIEW slice_persons AS -SELECT slice_id, -array_accum(person_id) AS person_ids -FROM slice_person -GROUP BY slice_id; - --- Slices of which each person is a member -CREATE OR REPLACE VIEW person_slices AS -SELECT person_id, -array_accum(slice_id) AS slice_ids -FROM slice_person -GROUP BY person_id; - --------------------------------------------------------------------------------- --- Slice whitelist --------------------------------------------------------------------------------- --- slice whitelist on nodes -CREATE TABLE node_slice_whitelist ( - node_id integer REFERENCES nodes NOT NULL, -- Node id of whitelist - slice_id integer REFERENCES slices NOT NULL, -- Slice id thats allowd on this node - PRIMARY KEY (node_id, slice_id) -) WITH OIDS; -CREATE INDEX node_slice_whitelist_node_id_idx ON node_slice_whitelist (node_id); -CREATE INDEX node_slice_whitelist_slice_id_idx ON node_slice_whitelist (slice_id); - --- Slices on each node -CREATE OR REPLACE VIEW node_slices_whitelist AS -SELECT node_id, -array_accum(slice_id) AS slice_ids_whitelist -FROM node_slice_whitelist -GROUP BY node_id; - --------------------------------------------------------------------------------- --- Slice tags (formerly known as slice attributes) --------------------------------------------------------------------------------- - --- Slice/sliver attributes -CREATE TABLE slice_tag ( - slice_tag_id serial PRIMARY KEY, -- Slice attribute identifier - slice_id integer REFERENCES slices NOT NULL, -- Slice identifier - node_id integer REFERENCES nodes, -- Sliver attribute if set - nodegroup_id integer REFERENCES nodegroups, -- Node group attribute if set - tag_type_id integer REFERENCES tag_types NOT NULL, -- Attribute type identifier - value text -) WITH OIDS; -CREATE INDEX slice_tag_slice_id_idx ON slice_tag (slice_id); -CREATE INDEX slice_tag_node_id_idx ON slice_tag (node_id); -CREATE INDEX slice_tag_nodegroup_id_idx ON slice_tag (nodegroup_id); - --------------------------------------------------------------------------------- --- Initscripts --------------------------------------------------------------------------------- - --- Initscripts -CREATE TABLE initscripts ( - initscript_id serial PRIMARY KEY, -- Initscript identifier - name text NOT NULL, -- Initscript name - enabled bool NOT NULL DEFAULT true, -- Initscript is active - script text NOT NULL, -- Initscript code - UNIQUE (name) -) WITH OIDS; -CREATE INDEX initscripts_name_idx ON initscripts (name); - - --------------------------------------------------------------------------------- --- Peers --------------------------------------------------------------------------------- - --- Peers -CREATE TABLE peers ( - peer_id serial PRIMARY KEY, -- Peer identifier - peername text NOT NULL, -- Peer name - peer_url text NOT NULL, -- (HTTPS) URL of the peer PLCAPI interface - cacert text, -- (SSL) Public certificate of peer API server - key text, -- (GPG) Public key used for authentication - shortname text, -- abbreviated name for displaying foreign objects - hrn_root text, -- root for this peer domain - deleted boolean NOT NULL DEFAULT false -) WITH OIDS; -CREATE INDEX peers_peername_idx ON peers (peername) WHERE deleted IS false; -CREATE INDEX peers_shortname_idx ON peers (shortname) WHERE deleted IS false; - --- Objects at each peer -CREATE TABLE peer_site ( - site_id integer REFERENCES sites PRIMARY KEY, -- Local site identifier - peer_id integer REFERENCES peers NOT NULL, -- Peer identifier - peer_site_id integer NOT NULL, -- Foreign site identifier at peer - UNIQUE (peer_id, peer_site_id) -- The same foreign site should not be cached twice -) WITH OIDS; -CREATE INDEX peer_site_peer_id_idx ON peers (peer_id); - -CREATE OR REPLACE VIEW peer_sites AS -SELECT peer_id, -array_accum(site_id) AS site_ids, -array_accum(peer_site_id) AS peer_site_ids -FROM peer_site -GROUP BY peer_id; - -CREATE TABLE peer_person ( - person_id integer REFERENCES persons PRIMARY KEY, -- Local user identifier - peer_id integer REFERENCES peers NOT NULL, -- Peer identifier - peer_person_id integer NOT NULL, -- Foreign user identifier at peer - UNIQUE (peer_id, peer_person_id) -- The same foreign user should not be cached twice -) WITH OIDS; -CREATE INDEX peer_person_peer_id_idx ON peer_person (peer_id); - -CREATE OR REPLACE VIEW peer_persons AS -SELECT peer_id, -array_accum(person_id) AS person_ids, -array_accum(peer_person_id) AS peer_person_ids -FROM peer_person -GROUP BY peer_id; - -CREATE TABLE peer_key ( - key_id integer REFERENCES keys PRIMARY KEY, -- Local key identifier - peer_id integer REFERENCES peers NOT NULL, -- Peer identifier - peer_key_id integer NOT NULL, -- Foreign key identifier at peer - UNIQUE (peer_id, peer_key_id) -- The same foreign key should not be cached twice -) WITH OIDS; -CREATE INDEX peer_key_peer_id_idx ON peer_key (peer_id); - -CREATE OR REPLACE VIEW peer_keys AS -SELECT peer_id, -array_accum(key_id) AS key_ids, -array_accum(peer_key_id) AS peer_key_ids -FROM peer_key -GROUP BY peer_id; - -CREATE TABLE peer_node ( - node_id integer REFERENCES nodes PRIMARY KEY, -- Local node identifier - peer_id integer REFERENCES peers NOT NULL, -- Peer identifier - peer_node_id integer NOT NULL, -- Foreign node identifier - UNIQUE (peer_id, peer_node_id) -- The same foreign node should not be cached twice -) WITH OIDS; -CREATE INDEX peer_node_peer_id_idx ON peer_node (peer_id); - -CREATE OR REPLACE VIEW peer_nodes AS -SELECT peer_id, -array_accum(node_id) AS node_ids, -array_accum(peer_node_id) AS peer_node_ids -FROM peer_node -GROUP BY peer_id; - -CREATE TABLE peer_slice ( - slice_id integer REFERENCES slices PRIMARY KEY, -- Local slice identifier - peer_id integer REFERENCES peers NOT NULL, -- Peer identifier - peer_slice_id integer NOT NULL, -- Slice identifier at peer - UNIQUE (peer_id, peer_slice_id) -- The same foreign slice should not be cached twice -) WITH OIDS; -CREATE INDEX peer_slice_peer_id_idx ON peer_slice (peer_id); - -CREATE OR REPLACE VIEW peer_slices AS -SELECT peer_id, -array_accum(slice_id) AS slice_ids, -array_accum(peer_slice_id) AS peer_slice_ids -FROM peer_slice -GROUP BY peer_id; - --------------------------------------------------------------------------------- --- Authenticated sessions --------------------------------------------------------------------------------- - --- Authenticated sessions -CREATE TABLE sessions ( - session_id text PRIMARY KEY, -- Session identifier - expires timestamp without time zone -) WITH OIDS; - --- People can have multiple sessions -CREATE TABLE person_session ( - person_id integer REFERENCES persons NOT NULL, -- Account identifier - session_id text REFERENCES sessions NOT NULL, -- Session identifier - PRIMARY KEY (person_id, session_id), - UNIQUE (session_id) -- Sessions are unique -) WITH OIDS; -CREATE INDEX person_session_person_id_idx ON person_session (person_id); - --- Nodes can have only one session -CREATE TABLE node_session ( - node_id integer REFERENCES nodes NOT NULL, -- Node identifier - session_id text REFERENCES sessions NOT NULL, -- Session identifier - UNIQUE (node_id), -- Nodes can have only one session - UNIQUE (session_id) -- Sessions are unique -) WITH OIDS; - -------------------------------------------------------------------------------- --- PCU Types ------------------------------------------------------------------------------- -CREATE TABLE pcu_types ( - pcu_type_id serial PRIMARY KEY, - model text NOT NULL , -- PCU model name - name text -- Full PCU model name -) WITH OIDS; -CREATE INDEX pcu_types_model_idx ON pcu_types (model); - -CREATE TABLE pcu_protocol_type ( - pcu_protocol_type_id serial PRIMARY KEY, - pcu_type_id integer REFERENCES pcu_types NOT NULL, -- PCU type identifier - port integer NOT NULL, -- PCU port - protocol text NOT NULL, -- Protocol - supported boolean NOT NULL DEFAULT True -- Does PLC support -) WITH OIDS; -CREATE INDEX pcu_protocol_type_pcu_type_id ON pcu_protocol_type (pcu_type_id); - - -CREATE OR REPLACE VIEW pcu_protocol_types AS -SELECT pcu_type_id, -array_accum(pcu_protocol_type_id) as pcu_protocol_type_ids -FROM pcu_protocol_type -GROUP BY pcu_type_id; - --------------------------------------------------------------------------------- --- Message templates --------------------------------------------------------------------------------- - -CREATE TABLE messages ( - message_id text PRIMARY KEY, -- Message name - subject text, -- Message summary - template text, -- Message template - enabled bool NOT NULL DEFAULT true -- Whether message is enabled -) WITH OIDS; - --------------------------------------------------------------------------------- --- Events --------------------------------------------------------------------------------- - --- Events -CREATE TABLE events ( - event_id serial PRIMARY KEY, -- Event identifier - person_id integer REFERENCES persons, -- Person responsible for event, if any - node_id integer REFERENCES nodes, -- Node responsible for event, if any - auth_type text, -- Type of auth used. i.e. AuthMethod - fault_code integer NOT NULL DEFAULT 0, -- Did this event result in error - call_name text NOT NULL, -- Call responsible for this event - call text NOT NULL, -- Call responsible for this event, including parameters - message text, -- High level description of this event - runtime float DEFAULT 0, -- Event run time - time timestamp without time zone NOT NULL -- Event timestamp - DEFAULT CURRENT_TIMESTAMP -) WITH OIDS; - --- Database object(s) that may have been affected by a particular event -CREATE TABLE event_object ( - event_id integer REFERENCES events NOT NULL, -- Event identifier - object_id integer NOT NULL, -- Object identifier - object_type text NOT NULL Default 'Unknown' -- What type of object is this event affecting -) WITH OIDS; -CREATE INDEX event_object_event_id_idx ON event_object (event_id); -CREATE INDEX event_object_object_id_idx ON event_object (object_id); -CREATE INDEX event_object_object_type_idx ON event_object (object_type); - -CREATE OR REPLACE VIEW event_objects AS -SELECT event_id, -array_accum(object_id) AS object_ids, -array_accum(object_type) AS object_types -FROM event_object -GROUP BY event_id; - --------------------------------------------------------------------------------- --- Useful views --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_pcu_types AS -SELECT -pcu_types.pcu_type_id, -pcu_types.model, -pcu_types.name, -COALESCE((SELECT pcu_protocol_type_ids FROM pcu_protocol_types - WHERE pcu_protocol_types.pcu_type_id = pcu_types.pcu_type_id), '{}') -AS pcu_protocol_type_ids -FROM pcu_types; - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_events AS -SELECT -events.event_id, -events.person_id, -events.node_id, -events.auth_type, -events.fault_code, -events.call_name, -events.call, -events.message, -events.runtime, -CAST(date_part('epoch', events.time) AS bigint) AS time, -COALESCE((SELECT object_ids FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_ids, -COALESCE((SELECT object_types FROM event_objects WHERE event_objects.event_id = events.event_id), '{}') AS object_types -FROM events; - -CREATE OR REPLACE VIEW view_event_objects AS -SELECT -events.event_id, -events.person_id, -events.node_id, -events.fault_code, -events.call_name, -events.call, -events.message, -events.runtime, -CAST(date_part('epoch', events.time) AS bigint) AS time, -event_object.object_id, -event_object.object_type -FROM events LEFT JOIN event_object USING (event_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_persons AS -SELECT -persons.person_id, -persons.email, -persons.first_name, -persons.last_name, -persons.deleted, -persons.enabled, -persons.password, -persons.verification_key, -CAST(date_part('epoch', persons.verification_expires) AS bigint) AS verification_expires, -persons.title, -persons.phone, -persons.url, -persons.bio, -CAST(date_part('epoch', persons.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', persons.last_updated) AS bigint) AS last_updated, -peer_person.peer_id, -peer_person.peer_person_id, -COALESCE((SELECT role_ids FROM person_roles WHERE person_roles.person_id = persons.person_id), '{}') AS role_ids, -COALESCE((SELECT roles FROM person_roles WHERE person_roles.person_id = persons.person_id), '{}') AS roles, -COALESCE((SELECT site_ids FROM person_sites WHERE person_sites.person_id = persons.person_id), '{}') AS site_ids, -COALESCE((SELECT key_ids FROM person_keys WHERE person_keys.person_id = persons.person_id), '{}') AS key_ids, -COALESCE((SELECT slice_ids FROM person_slices WHERE person_slices.person_id = persons.person_id), '{}') AS slice_ids, -COALESCE((SELECT person_tag_ids FROM person_tags WHERE person_tags.person_id = persons.person_id), '{}') AS person_tag_ids -FROM persons -LEFT JOIN peer_person USING (person_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_peers AS -SELECT -peers.*, -COALESCE((SELECT site_ids FROM peer_sites WHERE peer_sites.peer_id = peers.peer_id), '{}') AS site_ids, -COALESCE((SELECT peer_site_ids FROM peer_sites WHERE peer_sites.peer_id = peers.peer_id), '{}') AS peer_site_ids, -COALESCE((SELECT person_ids FROM peer_persons WHERE peer_persons.peer_id = peers.peer_id), '{}') AS person_ids, -COALESCE((SELECT peer_person_ids FROM peer_persons WHERE peer_persons.peer_id = peers.peer_id), '{}') AS peer_person_ids, -COALESCE((SELECT key_ids FROM peer_keys WHERE peer_keys.peer_id = peers.peer_id), '{}') AS key_ids, -COALESCE((SELECT peer_key_ids FROM peer_keys WHERE peer_keys.peer_id = peers.peer_id), '{}') AS peer_key_ids, -COALESCE((SELECT node_ids FROM peer_nodes WHERE peer_nodes.peer_id = peers.peer_id), '{}') AS node_ids, -COALESCE((SELECT peer_node_ids FROM peer_nodes WHERE peer_nodes.peer_id = peers.peer_id), '{}') AS peer_node_ids, -COALESCE((SELECT slice_ids FROM peer_slices WHERE peer_slices.peer_id = peers.peer_id), '{}') AS slice_ids, -COALESCE((SELECT peer_slice_ids FROM peer_slices WHERE peer_slices.peer_id = peers.peer_id), '{}') AS peer_slice_ids -FROM peers; - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW node_tags AS -SELECT node_id, -array_accum(node_tag_id) AS node_tag_ids -FROM node_tag -GROUP BY node_id; - -CREATE OR REPLACE VIEW view_node_tags AS -SELECT -node_tag.node_tag_id, -node_tag.node_id, -nodes.hostname, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -tag_types.min_role_id, -node_tag.value -FROM node_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN nodes USING (node_id); - -CREATE OR REPLACE VIEW view_nodes AS -SELECT -nodes.node_id, -nodes.node_type, -nodes.hostname, -nodes.site_id, -nodes.boot_state, -nodes.run_level, -nodes.deleted, -nodes.model, -nodes.boot_nonce, -nodes.version, -nodes.verified, -nodes.ssh_rsa_key, -nodes.key, -CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, -CAST(date_part('epoch', nodes.last_contact) AS bigint) AS last_contact, -CAST(date_part('epoch', nodes.last_boot) AS bigint) AS last_boot, -CAST(date_part('epoch', nodes.last_download) AS bigint) AS last_download, -CAST(date_part('epoch', nodes.last_pcu_reboot) AS bigint) AS last_pcu_reboot, -CAST(date_part('epoch', nodes.last_pcu_confirmation) AS bigint) AS last_pcu_confirmation, -peer_node.peer_id, -peer_node.peer_node_id, -COALESCE((SELECT interface_ids FROM node_interfaces - WHERE node_interfaces.node_id = nodes.node_id), '{}') -AS interface_ids, -COALESCE((SELECT nodegroup_ids FROM node_nodegroups - WHERE node_nodegroups.node_id = nodes.node_id), '{}') -AS nodegroup_ids, -COALESCE((SELECT slice_ids FROM node_slices - WHERE node_slices.node_id = nodes.node_id), '{}') -AS slice_ids, -COALESCE((SELECT slice_ids_whitelist FROM node_slices_whitelist - WHERE node_slices_whitelist.node_id = nodes.node_id), '{}') -AS slice_ids_whitelist, -COALESCE((SELECT pcu_ids FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS pcu_ids, -COALESCE((SELECT ports FROM node_pcus - WHERE node_pcus.node_id = nodes.node_id), '{}') -AS ports, -COALESCE((SELECT conf_file_ids FROM node_conf_files - WHERE node_conf_files.node_id = nodes.node_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_tag_ids FROM node_tags - WHERE node_tags.node_id = nodes.node_id), '{}') -AS node_tag_ids, -node_session.session_id AS session -FROM nodes -LEFT JOIN peer_node USING (node_id) -LEFT JOIN node_session USING (node_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_nodegroups AS -SELECT -nodegroups.*, -tag_types.tagname, -COALESCE((SELECT conf_file_ids FROM nodegroup_conf_files - WHERE nodegroup_conf_files.nodegroup_id = nodegroups.nodegroup_id), '{}') -AS conf_file_ids, -COALESCE((SELECT node_ids FROM nodegroup_nodes - WHERE nodegroup_nodes.nodegroup_id = nodegroups.nodegroup_id), '{}') -AS node_ids -FROM nodegroups INNER JOIN tag_types USING (tag_type_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_conf_files AS -SELECT -conf_files.*, -COALESCE((SELECT node_ids FROM conf_file_nodes - WHERE conf_file_nodes.conf_file_id = conf_files.conf_file_id), '{}') -AS node_ids, -COALESCE((SELECT nodegroup_ids FROM conf_file_nodegroups - WHERE conf_file_nodegroups.conf_file_id = conf_files.conf_file_id), '{}') -AS nodegroup_ids -FROM conf_files; - --------------------------------------------------------------------------------- -DROP VIEW view_pcus; -CREATE OR REPLACE VIEW view_pcus AS -SELECT -pcus.pcu_id, -pcus.site_id, -pcus.hostname, -pcus.ip, -pcus.protocol, -pcus.username, -pcus.password, -pcus.model, -pcus.notes, -CAST(date_part('epoch', pcus.last_updated) AS bigint) AS last_updated, -COALESCE((SELECT node_ids FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS node_ids, -COALESCE((SELECT ports FROM pcu_nodes WHERE pcu_nodes.pcu_id = pcus.pcu_id), '{}') AS ports -FROM pcus; - - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_sites AS -SELECT -sites.site_id, -sites.login_base, -sites.name, -sites.abbreviated_name, -sites.deleted, -sites.enabled, -sites.is_public, -sites.max_slices, -sites.max_slivers, -sites.latitude, -sites.longitude, -sites.url, -sites.ext_consortium_id, -CAST(date_part('epoch', sites.date_created) AS bigint) AS date_created, -CAST(date_part('epoch', sites.last_updated) AS bigint) AS last_updated, -peer_site.peer_id, -peer_site.peer_site_id, -COALESCE((SELECT person_ids FROM site_persons WHERE site_persons.site_id = sites.site_id), '{}') AS person_ids, -COALESCE((SELECT node_ids FROM site_nodes WHERE site_nodes.site_id = sites.site_id), '{}') AS node_ids, -COALESCE((SELECT address_ids FROM site_addresses WHERE site_addresses.site_id = sites.site_id), '{}') AS address_ids, -COALESCE((SELECT slice_ids FROM site_slices WHERE site_slices.site_id = sites.site_id), '{}') AS slice_ids, -COALESCE((SELECT pcu_ids FROM site_pcus WHERE site_pcus.site_id = sites.site_id), '{}') AS pcu_ids, -COALESCE((SELECT site_tag_ids FROM site_tags WHERE site_tags.site_id = sites.site_id), '{}') AS site_tag_ids -FROM sites -LEFT JOIN peer_site USING (site_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_addresses AS -SELECT -addresses.*, -COALESCE((SELECT address_type_ids FROM address_address_types WHERE address_address_types.address_id = addresses.address_id), '{}') AS address_type_ids, -COALESCE((SELECT address_types FROM address_address_types WHERE address_address_types.address_id = addresses.address_id), '{}') AS address_types -FROM addresses; - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_keys AS -SELECT -keys.*, -person_key.person_id, -peer_key.peer_id, -peer_key.peer_key_id -FROM keys -LEFT JOIN person_key USING (key_id) -LEFT JOIN peer_key USING (key_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW slice_tags AS -SELECT slice_id, -array_accum(slice_tag_id) AS slice_tag_ids -FROM slice_tag -GROUP BY slice_id; - -CREATE OR REPLACE VIEW view_slices AS -SELECT -slices.slice_id, -slices.site_id, -slices.name, -slices.instantiation, -slices.url, -slices.description, -slices.max_nodes, -slices.creator_person_id, -slices.is_deleted, -CAST(date_part('epoch', slices.created) AS bigint) AS created, -CAST(date_part('epoch', slices.expires) AS bigint) AS expires, -peer_slice.peer_id, -peer_slice.peer_slice_id, -COALESCE((SELECT node_ids FROM slice_nodes WHERE slice_nodes.slice_id = slices.slice_id), '{}') AS node_ids, -COALESCE((SELECT person_ids FROM slice_persons WHERE slice_persons.slice_id = slices.slice_id), '{}') AS person_ids, -COALESCE((SELECT slice_tag_ids FROM slice_tags WHERE slice_tags.slice_id = slices.slice_id), '{}') AS slice_tag_ids -FROM slices -LEFT JOIN peer_slice USING (slice_id); - -CREATE OR REPLACE VIEW view_slice_tags AS -SELECT -slice_tag.slice_tag_id, -slice_tag.slice_id, -slice_tag.node_id, -slice_tag.nodegroup_id, -tag_types.tag_type_id, -tag_types.tagname, -tag_types.description, -tag_types.category, -tag_types.min_role_id, -slice_tag.value, -slices.name -FROM slice_tag -INNER JOIN tag_types USING (tag_type_id) -INNER JOIN slices USING (slice_id); - --------------------------------------------------------------------------------- -CREATE OR REPLACE VIEW view_sessions AS -SELECT -sessions.session_id, -CAST(date_part('epoch', sessions.expires) AS bigint) AS expires, -person_session.person_id, -node_session.node_id -FROM sessions -LEFT JOIN person_session USING (session_id) -LEFT JOIN node_session USING (session_id); - --------------------------------------------------------------------------------- --- Built-in maintenance account and default site --------------------------------------------------------------------------------- - -INSERT INTO persons (first_name, last_name, email, password, enabled) -VALUES ('Maintenance', 'Account', 'maint@localhost.localdomain', 'nopass', true); - -INSERT INTO person_role (person_id, role_id) VALUES (1, 10); -INSERT INTO person_role (person_id, role_id) VALUES (1, 20); -INSERT INTO person_role (person_id, role_id) VALUES (1, 30); -INSERT INTO person_role (person_id, role_id) VALUES (1, 40); - -INSERT INTO sites (login_base, name, abbreviated_name, max_slices) -VALUES ('pl', 'PlanetLab Central', 'PLC', 100); diff --git a/plc.d/db b/plc.d/db deleted file mode 100755 index 3b84b59..0000000 --- a/plc.d/db +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash -# -# priority: 900 -# -# Bootstrap the database -# -# Mark Huang -# Copyright (C) 2006 The Trustees of Princeton University -# - -# Source function library and configuration -. /etc/plc.d/functions -. /etc/planetlab/plc_config - -# Be verbose -set -x - -# Export so that we do not have to specify -p to psql invocations -export PGPORT=$PLC_DB_PORT - -# Install extensions -function extend_db() -{ - shopt -s nullglob - for file in /usr/share/plc_api/extensions/*-up*; do - script=${file##*/} - name=${script%-up*} - extension=${script##*.} - version=$(psql -U $PLC_DB_USER --quiet --tuples-only --no-align -c \ - "SELECT version FROM plc_db_extensions WHERE name='$name' LIMIT 1" \ - $PLC_DB_NAME 2>/dev/null | awk 'BEGIN { ver=0 } /^[0-9]+$/ { ver=$1 } END { print ver }') - if [ $version -eq 0 ]; then - if [ "$extension" = "sql" ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - psql -U $PLC_DB_USER -f $file $PLC_DB_NAME - elif [ -x $file ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - $file - else - dialog "\nWarning: extension $file not executable" - fi - check - fi - for file in /usr/share/plc_api/extensions/$name/migrations/[0-9]*-up-*; do - script=${file##*/} - index=${script%-up-*} - extension=${script##*.} - if [ $index -gt $version ] ; then - if [ "$extension" = "sql" ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - psql -U $PLC_DB_USER -f $file $PLC_DB_NAME - elif [ -x $file ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - $file - else - dialog "\nWarning: migration $file not executable" - fi - check - fi - done - done -} - -# Updates the database by applying all migration scripts in -# /usr/share/plc_api/migrations/N-up-*, where N is greater than the -# current subversion. At least one of the migration scripts with the -# same N must update plc_db_version.subversion. -function migrate_db() -{ - subversion=$(psql -U $PLC_DB_USER --quiet --tuples-only --no-align -c \ - "SELECT subversion FROM plc_db_version LIMIT 1" \ - $PLC_DB_NAME 2>/dev/null || echo 0) - shopt -s nullglob - for file in /usr/share/plc_api/migrations/[0-9]*-up-* ; do - script=$(basename $file) - index=${script%-up*} - extension=${script##*.} - if [ $index -gt $subversion ] ; then - if [ "$extension" = "sql" ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - psql -U $PLC_DB_USER -f $file $PLC_DB_NAME - elif [ -x $file ] ; then - dialog " - $script (dbdumped)" - dump_planetlab_db "before-$script" - $file - else - dialog "\nWarning: migration $file not executable" - fi - check - fi - done -} - -function checkpoint_planetlab_db() -{ - dumpfile=$1 - pg_dump -U $PLC_DB_USER $PLC_DB_NAME > $dumpfile - check -} - -function restore_planetlab_db() -{ - dumpfile=$1 - if [ -n "$dumpfile" ] ; then - [ -f "$dumpfile" ] && psql -a -U $PLC_DB_USER $PLC_DB_NAME < $dumpfile - check - fi -} - -# use a single date of this script invocation for the dump_*_db functions. -DATE=$(date +"%Y-%m-%d-%H-%M-%S") - -# Dumps the database - optional argument to specify filename suffix -function dump_planetlab_db() -{ - if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi - dumpfile=/var/lib/pgsql/backups/$(date +"${PLC_DB_NAME}.${DATE}${suffix}.sql") - checkpoint_planetlab_db $dumpfile -} - -function restore_drupal_db() -{ - dumpfile=$1 - if [ -n "$dumpfile" ] ; then - [ -f "$dumpfile" ] && psql -a -U $PLC_DB_USER drupal < $1 - check - fi -} - -function checkpoint_drupal_db() -{ - dumpfile=$1 - pg_dump -U $PLC_DB_USER drupal > $dumpfile - check -} - -function dump_drupal_db() -{ - dumpfile=/var/lib/pgsql/backups/$(date +"drupal.${DATE}.sql") - checkpoint_drupal_db $dumpfile - check -} - -# Clean up old backups -function clean_dumps() -{ - find /var/lib/pgsql/backups '(' -name "$PLC_DB_NAME.*.sql" -o -name "drupal.*.sql" ')' -a -atime +15 | xargs rm -f - check -} - -[ $PLC_DB_ENABLED -ne 1 ] && exit 0 -case "$1" in - start) - MESSAGE=$"Bootstrapping the database" - dialog "$MESSAGE" - - # Apply schema updates - migrate_db - extend_db - - # Update the maintenance account username. This can't be - # done through the api-config script since it uses the - # maintenance account to access the API. The maintenance - # account should be person_id 1 since it is created by the - # DB schema itself. - psql -U $PLC_DB_USER -c "UPDATE persons SET email='$PLC_API_MAINTENANCE_USER' WHERE person_id=1" $PLC_DB_NAME - - # Update the Drupal site_name variable - # also turn off drupal native user registration - psql -U $PLC_DB_USER drupal < -# Copyright (C) 2006 The Trustees of Princeton University -# - -# Source function library and configuration -. /etc/plc.d/functions -. /etc/planetlab/plc_config -local_config=/etc/planetlab/configs/site.xml - -# Be verbose -set -x - -# Default locations -PGDATA=/var/lib/pgsql/data -postgresql_conf=$PGDATA/postgresql.conf -pghba_conf=$PGDATA/pg_hba.conf -postgresql_sysconfig=/etc/sysconfig/pgsql/postgresql - -# Export so that we do not have to specify -p to psql invocations -export PGPORT=$PLC_DB_PORT - -# can't trust the return of service postgresql start / nor status -function postgresql_check () { - - # wait until postmaster is up and running - or 10s max - if status postmaster && [ -f /var/lock/subsys/postgresql ] ; then - # The only way we can be sure is if we can access it - for i in $(seq 1 10) ; do - # Must do this as the postgres user initially (before we - # fix pg_hba.conf to passwordless localhost access). - su -c 'psql -U postgres -c "" template1' postgres && return 0 - sleep 1 - done - fi - - return 1 -} - -case "$1" in - start) - if [ "$PLC_DB_ENABLED" != "1" ] ; then - exit 0 - fi - - MESSAGE=$"Starting PostgreSQL server" - dialog "$MESSAGE" - - ######## sysconfig -# xxx on f16, the systemd init script won't read /etc/sysconfig/pgsql/postgresql any more -# need to find out how to perform this configuration, if still needed - # Set data directory and redirect startup output to /var/log/pgsql - mkdir -p $(dirname $postgresql_sysconfig) - touch $postgresql_sysconfig - tmp=${postgresql_sysconfig}.new - # remove any previous definitions and write ours - ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig - echo "PGDATA=$PGDATA" - echo "PGLOG=/var/log/pgsql" - echo "PGPORT=$PLC_DB_PORT" - ) > $tmp ; mv -f $tmp $postgresql_sysconfig - - ######## /var/lib/pgsql/data - # Fix ownership of /var/lib/pgsql (rpm installation may have changed it) - chown -R -H postgres:postgres $(dirname $PGDATA) - - # PostgreSQL must be started at least once to bootstrap - # /var/lib/pgsql/data - if [ ! -f $postgresql_conf ] ; then -# fedora 16 uses systemd -# http://docs.fedoraproject.org/en-US/Fedora/16/html/Release_Notes/sect-Release_Notes-Changes_for_Sysadmin.html - if type postgresql-setup >& /dev/null ; then - postgresql-setup initdb || : - check - else - service postgresql initdb &> /dev/null || postgresql : - check - fi - fi - - ######## /var/lib/pgsql/data/postgresql.conf - # Enable DB server. drop Postgresql<=7.x - # PostgreSQL >=8.0 defines listen_addresses - # listen on a specific IP + localhost, more robust when run within a vserver - sed -i -e '/^listen_addresses/d' $postgresql_conf - echo "listen_addresses = '${PLC_DB_HOST},localhost'" >> $postgresql_conf - # tweak timezone to be 'UTC' - sed -i -e '/^timezone=/d' $postgresql_conf - echo "timezone='UTC'" >> $postgresql_conf - - ######## /var/lib/pgsql/data/pg_hba.conf - # Disable access to MyPLC and drupal DBs from all hosts - sed -i -e '/^\(host\|local\)/d' $pghba_conf - - # Enable passwordless localhost access - echo "local all all trust" >>$pghba_conf - - # Enable access from the API, boot, and web servers - PLC_API_IP=$(gethostbyname $PLC_API_HOST) - PLC_BOOT_IP=$(gethostbyname $PLC_BOOT_HOST) - PLC_WWW_IP=$(gethostbyname $PLC_WWW_HOST) - ip_failure=0 - if [ -z "$PLC_API_IP" ] ; then - MESSAGE=$"PLC_API_IP is not set" - dialog "$MESSAGE" - ip_failure=1 - fi - if [ -z "$PLC_BOOT_IP" ] ; then - MESSAGE=$"PLC_BOOT_IP is not set" - dialog "$MESSAGE" - ip_failure=1 - fi - if [ -z "$PLC_WWW_IP" ] ; then - MESSAGE=$"PLC_WWW_IP is not set" - dialog "$MESSAGE" - ip_failure=1 - fi - if [ $ip_failure -eq 1 ] ; then - /bin/false - check - fi - - ( - echo "host $PLC_DB_NAME $PLC_DB_USER 127.0.0.1/32 password" - echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_API_IP/32 password" - echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_BOOT_IP/32 password" - echo "host $PLC_DB_NAME $PLC_DB_USER $PLC_WWW_IP/32 password" - # Drupal also uses PostgreSQL - echo "host drupal $PLC_DB_USER 127.0.0.1/32 password" - echo "host drupal $PLC_DB_USER $PLC_WWW_IP/32 password" - ) >>$pghba_conf - - # Append site-specific access rules - for file in $pghba_conf.d/*.conf ; do - cat "$file" >>$pghba_conf - done - - # Fix ownership (sed -i changes it) - chown postgres:postgres $postgresql_conf $pghba_conf - - ######## Start up the server - ignore retcod and check this our way - (exec 3>&- 4>&- ; service postgresql start) - postgresql_check - check - - ######## Create/update the unprivileged database user and password - if [ -z "$PLC_DB_PASSWORD" ] ; then - PLC_DB_PASSWORD=$(uuidgen) - plc-config --category=plc_db --variable=password --value="$PLC_DB_PASSWORD" --save=$local_config $local_config - #service plc reload - plc_reload force - fi - if ! psql -U $PLC_DB_USER -c "" template1 >/dev/null 2>&1 ; then - psql -U postgres -c "CREATE USER $PLC_DB_USER PASSWORD '$PLC_DB_PASSWORD'" template1 - else - psql -U postgres -c "ALTER USER $PLC_DB_USER WITH PASSWORD '$PLC_DB_PASSWORD'" template1 - fi - check - - ######## Create the databases if necessary - if ! psql -U $PLC_DB_USER -c "" $PLC_DB_NAME >/dev/null 2>&1 ; then - createdb -U postgres --template=template0 --encoding=UNICODE --owner=$PLC_DB_USER $PLC_DB_NAME - psql -U $PLC_DB_USER -f /usr/share/plc_api/$PLC_DB_NAME.sql $PLC_DB_NAME - fi - check - if ! psql -U $PLC_DB_USER -c "" drupal >/dev/null 2>&1 ; then - createdb -U postgres --template=template0 --encoding=UNICODE --owner=$PLC_DB_USER drupal - psql -U $PLC_DB_USER -f /var/www/html/database/database.pgsql drupal - fi - check - - result "$MESSAGE" - ;; - - stop) - MESSAGE=$"Stopping PostgreSQL server" - dialog "$MESSAGE" - - # Drop the current user in case the username changes - psql -U postgres -c "DROP USER $PLC_DB_USER" template1 - - # WARNING: If the DB name changes, the old DB will be left - # intact and a new one will be created. If it changes - # back, the old DB will not be re-created. - - # Shut down the server - service postgresql stop - - # /etc/init.d/postgresql fails if it is not running - [ "$PLC_DB_ENABLED" = 1 ] && check - - result "$MESSAGE" - ;; -esac - -exit $ERRORS diff --git a/tools/dzombie.py b/tools/dzombie.py deleted file mode 100755 index 2095c96..0000000 --- a/tools/dzombie.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/python -# -# Tool that removes zombie records from database tables# -import sys -import os -import getopt -import pgdb -from pprint import pprint - -schema_file = None -config_file = "/etc/planetlab/plc_config" -config = {} -execfile(config_file, config) - -def usage(): - print "Usage: %s SCHEMA_FILE " % sys.argv[0] - sys.exit(1) - -try: - schema_file = sys.argv[1] -except IndexError: - print "Error: too few arguments" - usage() - -# all foreing keys exist as primary kyes in another table -# will represent all foreign keys as -# { 'table.foreign_key': 'table.primary_key'} -foreign_keys = {} -foreign_keys_ordered = [] -zombie_keys = {} -# parse the schema for foreign keys -try: - file = open(schema_file, 'r') - index = 0 - lines = file.readlines() - while index < len(lines): - line = lines[index].strip() - # find all created objects - if line.startswith("CREATE"): - line_parts = line.split(" ") - if line_parts[1:3] == ['OR', 'REPLACE']: - line_parts = line_parts[2:] - item_type = line_parts[1].strip() - item_name = line_parts[2].strip() - if item_type.upper() in ['TABLE']: - while index < len(lines): - index = index + 1 - nextline =lines[index].strip() - if nextline.find("--") > -1: - nextline = nextline[0:nextline.index("--")].replace(',', '') - if nextline.upper().find("REFERENCES") > -1: - nextline_parts = nextline.split(" ") - foreign_key_name = nextline_parts[0].strip() - foreign_key_table = nextline_parts[nextline_parts.index("REFERENCES")+1].strip() - foreign_key = item_name + "."+ foreign_key_name - primary_key = foreign_key_table +"."+ foreign_key_name - foreign_keys[foreign_key] = primary_key - foreign_keys_ordered.append(foreign_key) - elif nextline.find(";") >= 0: - break - index = index + 1 -except: - raise - -db = pgdb.connect(user = config['PLC_DB_USER'], - database = config['PLC_DB_NAME']) -cursor = db.cursor() -try: - for foreign_key in foreign_keys_ordered: - primary_key = foreign_keys[foreign_key] - sql = "SELECT distinct %s from %s" - - # get all foreign keys in this table - foreign_key_parts = foreign_key.split(".") - - # do not delete from primary tables - if foreign_key_parts[0] in ['addresses', 'boot_states', 'conf_files', \ - 'keys', 'messages', 'nodegroups', 'interfaces', 'nodes', 'pcus', 'peers' \ - 'persons', 'roles', 'sessions', 'sites', 'slices']: - #print "skipping table %s" % foreign_key_parts[0] - continue - - cursor.execute(sql % (foreign_key_parts[1], foreign_key_parts[0])) - foreign_rows = cursor.fetchall() - - # get all the primary keys from this foreign key's primary table - primary_key_parts = primary_key.split(".") - # foreign key name may not match primary key name. must rename these - if primary_key_parts[1] == 'creator_person_id': - primary_key_parts[1] = 'person_id' - elif primary_key_parts[1] == 'min_role_id': - primary_key_parts[1] = 'role_id' - sql = sql % (primary_key_parts[1], primary_key_parts[0]) - - # determin which primary records are deleted - desc = os.popen('psql planetlab4 postgres -c "\d %s;"' % primary_key_parts[0]) - result = desc.readlines() - if primary_key_parts[0] in ['slices']: - sql = sql + " where name not like '%_deleted'" - elif filter(lambda line: line.find("deleted") > -1, result): - sql = sql + " where deleted = false" - - cursor.execute(sql) - primary_key_rows = cursor.fetchall() - - # if foreign key isnt present in primay_key query, it either doesnt exist or marked as deleted - # also, ignore null foreign keys, not considered zombied - zombie_keys_func = lambda key: key not in primary_key_rows and not key == [None] - zombie_keys_list = [zombie_key[0] for zombie_key in filter(zombie_keys_func, foreign_rows)] - print zombie_keys_list - # delete these zombie records - if zombie_keys_list: - print " -> Deleting %d zombie record(s) from %s after checking %s" % \ - (len(zombie_keys_list), foreign_key_parts[0], primary_key_parts[0]) - sql_delete = 'DELETE FROM %s WHERE %s IN %s' % \ - (foreign_key_parts[0], foreign_key_parts[1], tuple(zombie_keys_list)) - cursor.execute(sql_delete) - db.commit() - #zombie_keys[foreign_key] = zombie_keys_list - print "done" -except pgdb.DatabaseError: - raise diff --git a/tools/planetlab3_dump.sh b/tools/planetlab3_dump.sh deleted file mode 100755 index e6a86e3..0000000 --- a/tools/planetlab3_dump.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash -# -# Dumps the planetlab3 database on zulu, fixing a few things on the way -# -# Mark Huang -# Copyright (C) 2007 The Trustees of Princeton University -# -# $Id$ -# - -tables=( -node_bootstates -nodes -nodenetworks -node_nodenetworks -nodegroups -nodegroup_nodes -override_bootscripts -pod_hash -conf_file -conf_assoc -address_types -addresses -organizations -sites -roles -capabilities -persons -person_roles -person_capabilities -person_address -key_types -keys -person_keys -person_site -node_root_access -authorized_subnets -site_authorized_subnets -event_classes -dslice03_states -dslice03_attributetypes -dslice03_slices -dslice03_attributes -dslice03_sliceattribute -dslice03_slicenode -dslice03_sliceuser -dslice03_siteinfo -pcu -pcu_ports -join_request -whatsnew -node_hostnames -blacklist -dslice03_initscripts -dslice03_defaultattribute -peered_mas -sessions -) - -# Dump tables -for table in "${tables[@]}" ; do - pg_dump -U postgres -t $table planetlab3 -done | - -# Do some manual cleanup -sed -f <(cat < -# Copyright (C) 2006 The Trustees of Princeton University -# - -import re -import base64 - -# Convert nm_net_{exempt_,}{min,max}_rate (bps) to -# net_{i2_,}{min,max}_rate and net_{i2_,}{min,max}_rate (kbps) -rename = {'nm_net_min_rate': 'net_min_rate', - 'nm_net_max_rate': 'net_max_rate', - 'nm_net_exempt_min_rate': 'net_i2_min_rate', - 'nm_net_exempt_max_rate': 'net_i2_max_rate'} -for slice_attribute in GetSliceTags({'name': rename.keys()}): - id = slice_attribute['slice_attribute_id'] - name = slice_attribute['name'] - slice_id = slice_attribute['slice_id'] - - # Convert bps to kbps - bps = int(slice_attribute['value']) - kbps = bps / 1000 - - # Add the new attribute - if GetSlices([slice_id]): - AddSliceTag(slice_id, rename[name], str(kbps)) - - # Delete the old attribute - DeleteSliceTag(id) - -# Convert nm_net_{exempt_,}avg_rate to -# net_{i2_,}max_kbyte and net_{i2_,}thresh_kbyte -rename = {'nm_net_avg_rate': {'max': 'net_max_kbyte', - 'thresh': 'net_thresh_kbyte'}, - 'nm_net_exempt_avg_rate': {'max': 'net_i2_max_kbyte', - 'thresh': 'net_i2_thresh_kbyte'}} -for slice_attribute in GetSliceTags({'name': rename.keys()}): - id = slice_attribute['slice_attribute_id'] - name = slice_attribute['name'] - slice_id = slice_attribute['slice_id'] - - # Convert bps to 80% and 100% of max bytes per day - bps = int(slice_attribute['value']) - max_kbyte = bps * 24 * 60 * 60 / 8 / 1000 - thresh_kbyte = int(0.8 * max_kbyte) - - # Add the new attribute - if GetSlices([slice_id]): - AddSliceTag(slice_id, rename[name]['max'], str(max_kbyte)) - AddSliceTag(slice_id, rename[name]['thresh'], str(thresh_kbyte)) - - # Delete the old attribute - DeleteSliceTag(id) - -# Convert plc_slice_state -for slice_attribute in GetSliceTags({'name': 'plc_slice_state'}): - id = slice_attribute['slice_attribute_id'] - name = slice_attribute['name'] - slice_id = slice_attribute['slice_id'] - - # Add the new attribute - if GetSlices([slice_id]): - if slice_attribute['value'] == "suspended": - AddSliceTag(slice_id, 'enabled', "0") - else: - AddSliceTag(slice_id, 'enabled', "1") - - # Delete the old attribute - DeleteSliceTag(id) - -# Straight renames -rename = {'nm_cpu_share': 'cpu_share', - 'nm_disk_quota': 'disk_max', - 'nm_net_share': 'net_share', - 'nm_net_exempt_share': 'net_i2_share', - 'nm_net_max_byte': 'net_max_kbyte', - 'nm_net_max_thresh_byte': 'net_thresh_kbyte', - 'nm_net_max_exempt_byte': 'net_i2_max_kbyte', - 'nm_net_max_thresh_exempt_byte': 'net_i2_thresh_kbyte'} -for slice_attribute in GetSliceTags({'name': rename.keys()}): - id = slice_attribute['slice_attribute_id'] - name = slice_attribute['name'] - slice_id = slice_attribute['slice_id'] - - # Pass straight through - value = slice_attribute['value'] - - # Add the new attribute - if GetSlices([slice_id]): - AddSliceTag(slice_id, rename[name], value) - - # Delete the old attribute - DeleteSliceTag(id) - -# Update plc_ticket_pubkey attribute -for slice_attribute in GetSliceTags({'name': "plc_ticket_pubkey"}): - id = slice_attribute['slice_attribute_id'] - - UpdateSliceTag(id, """ -MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDKXa72MEKDAnVyzEpKOB1ot2eW -xG/TG2aa7q/2oy1xf5XMmU9H9uKwO+GoUeinp1BSxgkVRF0VhEGGaqKR9kYQzX0k -ht4+P2hAr+UyU4cp0NxV4xfmyAbrNKuHVjawMUCu5BH0IkBUC/89ckxk71oROnak -FbI7ojUezSGr4aVabQIDAQAB -""".lstrip()) - -# Delete _deleted and deprecated slice attributes and types -for attribute_type in GetSliceTagTypes(): - id = attribute_type['attribute_type_id'] - name = attribute_type['name'] - - if name == 'general_prop_share' or \ - re.match('nm_', name) or \ - re.search('_deleted$', name): - DeleteSliceTagType(id) - # N.B. Automatically deletes all slice attributes of this type - -# Add Proper ops -proper_ops = [ - # give Stork permission to mount and unmount client dirs - ('arizona_stork', 'mount_dir'), - ('arizona_stork', 'set_file_flags pass, "1"'), - ('arizona_stork', 'set_file_flags_list "1"'), - ('arizona_stork', 'bind_socket sockname=64?:*'), - ('arizona_stork2', 'mount_dir'), - ('arizona_stork2', 'set_file_flags pass, "1"'), - ('arizona_stork2', 'set_file_flags_list "1"'), - ('arizona_stork2', 'bind_socket sockname=64?:*'), - - # give CoMon the necessary permissions to run slicestat - ('princeton_slicestat', 'exec "root", pass, "/usr/local/planetlab/bin/pl-ps", none'), - ('princeton_slicestat', 'exec "root", pass, "/usr/sbin/vtop", "bn1", none'), - ('princeton_slicestat', 'open_file file=/proc/virtual/*/cacct'), - ('princeton_slicestat', 'open_file file=/proc/virtual/*/limit'), - ('princeton_comon', 'open_file file=/var/log/secure'), - ('princeton_comon', 'exec "root", pass, "/bin/df", "/vservers", none'), - - # give pl_slicedir access to /etc/passwd - ('pl_slicedir', 'open_file pass, "/etc/passwd"'), - - # nyu_d are building a DNS demux so give them access to port 53 - ('nyu_d', 'bind_socket'), - ('nyu_oasis', 'bind_socket'), - - # QA slices need to be able to create and delete bind-mounts - ('pl_qa_0', 'mount_dir'), - ('pl_qa_1', 'mount_dir'), - - # irb_snort needs packet sockets for tcpdump - ('irb_snort', 'create_socket'), - - # uw_ankur is using netlink sockets to do the same thing as netflow - ('uw_ankur', 'create_socket'), - - # cornell_codons gets access to port 53 for now - ('cornell_codons', 'create_socket'), - - # give Mic Bowman's conf-monitor service read-only access to root fs - # and the ability to run df - ('idsl_monitor', 'mount_dir "root:/", pass, "ro"'), - ('idsl_monitor', 'unmount'), - ('idsl_monitor', 'exec "root", pass, "/bin/df", "-P", "/", "/vservers", none'), - - # give Shark access to port 111 to run portmap - # and port 955 to run mount - ('nyu_shkr', 'bind_socket'), - ('nyu_shkr', 'mount_dir "nfs:**:**"'), - ('nyu_shkr', 'exec "root", pass, "/bin/umount", "-l", "/vservers/nyu_shkr/**", none'), - - # give tsinghua_lgh access to restricted ports - ('tsinghua_lgh', 'bind_socket'), - - # CoDeeN needs port 53 too - ('princeton_codeen', 'bind_socket sockname=53:*'), - - # give ucin_load access to /var/log/wtmp - ('ucin_load', 'open_file file=/var/log/wtmp*'), - - # give google_highground permission to bind port 81 (and raw sockets) - ('google_highground', 'bind_socket'), - - # pl_conf needs access to port 814 - ('pl_conf', 'bind_socket sockname=814:*'), - ('pl_conf', 'open file=/home/*/.ssh/authorized_keys'), - - # give princeton_visp permission to read all packets sent through the - # tap0 device - ('princeton_visp', 'open file=/dev/net/tun, flags=rw'), - - # The PLB group needs the BGP port - ('princeton_iias', 'bind_socket sockname=179:*'), - ('princeton_visp', 'bind_socket sockname=179:*'), - ('mit_rcp', 'bind_socket sockname=179:*'), - ('princeton_bgpmux', 'bind_socket sockname=179:*'), - ('princeton_bgpmux2', 'bind_socket sockname=179:*'), - - # PL-VINI group - ('mit_rcp', 'exec "root", pass, "/usr/bin/chrt"'), - ('princeton_iias', 'exec "root", pass, "/usr/bin/chrt"'), - - # Tycoon needs access to /etc/passwd to determine Slicename->XID mappings - ('hplabs_tycoon_aucd', 'open_file file=/etc/passwd'), -] - -for slice, op in proper_ops: - try: - AddSliceTag(slice, 'proper_op', op) - except Exception, err: - print "Warning: %s:" % slice, err - -initscripts = dict([(initscript['initscript_id'], initscript) for initscript in [{'initscript_id': 8, 'script': '#! /bin/sh

# <Program Name>
#    bindscript
#
# <Author>
#    Jeffry Johnston and Jeremy Plichta
#
# <Purpose>
#    Downloads and installs stork on a node.

# save original PWD
OLDPWD=$PWD

# error reporting function
error()
{
   echo
   echo "Please E-mail stork-support@cs.arizona.edu if you believe you have" 
   echo "received this message in error."

   # get rid of CERT file
   if [ -f $CERT ]
   then
      rm -f $CERT > /dev/null
   fi

   # restore original PWD
   cd $OLDPWD
   exit 1
}

CERT=`pwd`/tempcrtfile

#functions

###
### createCertificate()
###    prints out the equifax certificate to use and stores
###    the file name in $CERT
###
function createCertificate(){
cat > $CERT <<EQUIFAX
-----BEGIN CERTIFICATE-----
MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJV
UzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1
aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0
MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoT
E0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJl
IEdsb2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
gYkCgYEAuucXkAJlsTRVPEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQy
td4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORR
OhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxnhcXIw2EC
AwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8w
HwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6o
oHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf
2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkAZ70Br83gcfxa
z2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIYNMR1
pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV
-----END CERTIFICATE----- 
EQUIFAX
}

###
### overWriteConf()
###	overwrite the default stork.conf file
###     that was installed by the rpm package.
###     this is a temporary hack because I need
###     to change the nestport and I dont know
###     enough to repackage the rpm with the
###     correct settings
function overWriteConf(){
cat > /usr/local/stork/etc/stork.conf <<ENDOFFILE
pacman=/usr/local/stork/bin/pacman
dtd-packages=/usr/local/stork/bin/packages.dtd
dtd-groups=/usr/local/stork/bin/groups.dtd
storknestupdatelistenerport=649

#bittorrenttrackerhost=quadrus.cs.arizona.edu
bittorrenttrackerhost=nr06.cs.arizona.edu

bittorrenttrackerport=6880
bittorrentuploadrate=0
bittorrentseedlookuptimeout=30

#packagerepository = quadrus.cs.arizona.edu/PlanetLab/V3|dist, stable
packagerepository = nr06.cs.arizona.edu/PlanetLab/V3|dist, stable
#packageinforepository = quadrus.cs.arizona.edu/PlanetLab/V3/stork.info
packageinforepository = nr06.cs.arizona.edu/PlanetLab/V3/stork.info

username = PlanetLab
publickeyfile = /usr/local/stork/var/keys/PlanetLab.publickey
packagemanagers = nestrpm, rpm, targz
transfermethod= nest,bittorrent,coblitz,coral,http,ftp
nestport=6000
tarpackinfopath=/usr/local/stork/var/tarinfo
ENDOFFILE
} 


###
### downloadNR06()
###    download a file from nr06 using curl
###
### args: 
###       - the path of the file you wish to download
###         relative from https://nr06.cs.arizona.edu
###       - the file to save it to
###       - returned value as specified in verifyDownload
function downloadNR06(){
    curl --cacert $CERT https://nr06.cs.arizona.edu/$1 -o $2 2>/dev/null
    verifyDownload $2 $3
}

###
### verifyDownload()
###     verify that a file that was just download with downloadNR06
###     was download correctly. Since we are getting stuff from a
###     http server we are assuming that if we get a 404 response
###     that the page we want does not exist. Also, if the output file
###     does not exist that means that only headers were returned
###     without any content. this too is a invalid file download
###
### args:
###       - the file to verify
###       - return variable, will have 1 if fail 0 if good
###
function verifyDownload(){
    eval "$2=0"
    if [ ! -f $1 ];
    then
        eval "$2=1"
    elif grep '404 Not Found' $1 > /dev/null
    then
	rm -f $1
        eval "$2=1"
    else
        eval "$2=0"
    fi
}


# check for root user
if [ $UID -ne "0" ]
then
   echo "You must run this program with root permissions..."
   error
fi   
 
# clean up in case this script was run before and failed
rm -rf /tmp/stork &> /dev/null

# create /tmp/stork directory
mkdir /tmp/stork 
if [ $? -ne "0" ]
then
   echo
   echo "Could not create the /tmp/stork directory..."
   error
fi

# export our root directory to Stork
echo "arizona_stork2" > /.exportdir
if [ $? -ne "0" ]
then
   echo
   echo "Could not create the /.exportdir file..."
   error
fi
 
# tell stork that we want to be served
if [ -f /etc/slicename ]
then
   SLICENAME=`cat /etc/slicename`
else 
   SLICENAME=$USER
fi
wget -O /tmp/stork/$SLICENAME "http://localhost:648/$SLICENAME\$bindscript"

# verify that the download was successful
if [ ! -f /tmp/stork/$SLICENAME -o $? -ne 0 ]
then
   echo
   echo "Stork doesn't seem to be running on this node..."
   error
fi

# wait for stork slice 
echo "Waiting for Stork to accept our binding..."
while [ ! -f /tmp/stork/stork_says_go ]
do
   sleep 1
done

# change PWD to the /tmp/stork directory 
cd /tmp/stork
if [ $? -ne "0" ]
then
   echo
   echo "Could not access the /tmp/stork directory..."
   error
fi

# confirm that packages to be installed actually exist
if echo *.rpm | grep '*' > /dev/null
then
   echo
   echo "Error: Stork package download failed..."
   error
fi

# remove Stork packages and files
echo
echo "Removing Stork files..."

# build a list of packages to remove
packages=""
for filename in *.rpm
do
  # convert filename to a package name
  pack=`rpm -qp --qf "%{NAME}\n" $filename`
  if [ $? -eq "0" ]
  then
    packages="$packages $pack"
  fi
done   

# remove old Stork packages
rpm -e $packages &> /dev/null

# remove anything left in /usr/local/stork/bin
rm -rf /usr/local/stork/bin/* &> /dev/null 

# install Stork packages
echo
echo "Installing packages..." 

# build a list of packages to install
packages=""
for filename in *.rpm
do
  packages="$packages $filename"
done   

# install the new stork packages
rpm -i $packages

# report package installation errors
if [ $? -ne "0" ]
then
  echo "Warning: Possible error installing Stork packages..."
fi

# restore original PWD
cd $OLDPWD

# clean up temporary files
rm -rf /tmp/stork &> /dev/null

# SEE TO-DO 1
#create the equifax certificate to use for curl
#createCertificate

# TO-DO 1
# implement the below in the beggining of stork.py
#attempt to download the users public key from the repository
#downloadNR06 "user-upload/pubkeys/$SLICENAME.publickey" "/usr/local/stork/var/$SLICENAME.publickey" RET

#if [ $RET -ne 0 ];
#then
#   echo
#   echo "Could not fetch your public key from the repository."
#   echo "If you want to upload one for the next time you run"
#   echo "the initscript please visit"
#   echo "http://nr06.cs.arizona.edu/testphp/upload.php"
#   echo
#fi

#attempt to download the users stork.conf file from the repository
#downloadNR06 "user-upload/conf/$SLICENAME.stork.conf" "/usr/local/stork/etc/stork.conf.users" RET

#if [ $RET -ne 0 ];
#then
#   echo
#   echo "Could not fetch your stork.conf file from the repository."
#   echo "If you want to upload one for the next time you run"
#   echo "the initscript please visit"
#   echo "http://nr06.cs.arizona.edu/testphp/upload.php"
#   echo "Stork will work without a configuration file but to make one"
#   echo "please place a file named stork.conf in /usr/local/stork/etc"
#   echo "refer to the manual for more directions or email:"
#   echo "stork-support@cs.arizona.edu for additional assistance."
#   echo
#fi

#dont need to overwrite the default conf file
#because it should be fixed in the new rpms
#overWriteConf

# run stork to update keyfiles and download package lists
echo
echo "Attempting to communicate with stork..."
if stork 
then
   echo
   echo "Congratulations, you have successfully bound to stork!"
   echo
   echo "For help, you may type stork --help"
   echo
   #echo "There is also a storkquery command that will provide information"
   #echo "about packages in the repository."
   echo
   echo "For more help, visit the stork project online at"
   echo "http://www.cs.arizona.edu/stork/.  Please contact"
   echo "stork-support@cs.arizona.edu for additional assistance." 
   #rm -f $CERT > /dev/null
else
   echo
   echo "An error occurred during install finalization...  Please contact"
   echo "stork-support@cs.arizona.edu for assistance."
   #rm -f $CERT > /dev/null
   exit 1
fi

# done
exit 0
', 'name': 'arizona_stork_2', 'encoding': 'base64'}, {'initscript_id': 9, 'script': 'IyEvYmluL2Jhc2gNCmNkIC8NCnJtIC1mIHN0YXJ0X3B1cnBsZQ0Kd2dldCBodHRwOi8vd3d3LmNzLnByaW5jZXRvbi5lZHUvfmRlaXNlbnN0L3B1cnBsZS9zdGFydF9wdXJwbGUNCmNobW9kIDc1NSBzdGFydF9wdXJwbGUNCnN1IHByaW5jZXRvbl9wdXJwbGUgLWMgJy4vc3RhcnRfcHVycGxlJw0K', 'name': 'princeton_purple', 'encoding': 'base64'}, {'initscript_id': 6, 'script': 'IyEgL2Jpbi9zaA0KDQojIHNhdmUgb3JpZ2luYWwgUFdEDQpPTERQV0Q9JFBXRA0KDQojIGVycm9yIHJlcG9ydGluZyBmdW5jdGlvbg0KZXJyb3IoKQ0Kew0KICAgZWNobw0KICAgZWNobyAiUGxlYXNlIEUtbWFpbCBzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGlmIHlvdSBiZWxpZXZlIHlvdSBoYXZlIiANCiAgIGVjaG8gInJlY2VpdmVkIHRoaXMgbWVzc2FnZSBpbiBlcnJvci4iDQoNCiAgICMgcmVzdG9yZSBvcmlnaW5hbCBQV0QNCiAgIGNkICRPTERQV0QNCiAgIGV4aXQgMQ0KfQ0KDQojIGNoZWNrIGZvciByb290IHVzZXINCmlmIFsgJFVJRCAtbmUgIjAiIF0NCnRoZW4NCiAgIGVjaG8gJ1lvdSBtdXN0IGJlIHJvb3QgdG8gcnVuIHRoaXMgcHJvZ3JhbS4uLicNCiAgIGVycm9yDQpmaSAgIA0KIA0KIyBDbGVhbiB1cCBpbiBjYXNlIEkgcmFuIHRoaXMgYmVmb3JlDQpybSAtZiAvdG1wL3N0b3JrKiA+IC9kZXYvbnVsbCAyPiYxDQoNCiMgRmlyc3Qgb2YgYWxsIGV4cG9ydCBvdXIgcm9vdCBkaXJlY3RvcnkgdG8gU3RvcmsNCmVjaG8gImFyaXpvbmFfc3RvcmsiID4gLy5leHBvcnRkaXINCiANCiMgTm93IHRlbGwgc3RvcmsgdGhhdCB3ZSB3YW50IHRvIGJlIHNlcnZlZA0KaWYgWyAtZiAvZXRjL3NsaWNlbmFtZSBdDQp0aGVuDQogICBTTElDRU5BTUU9YGNhdCAvZXRjL3NsaWNlbmFtZWANCmVsc2UgDQogICBTTElDRU5BTUU9JFVTRVINCmZpDQoNCndnZXQgaHR0cDovL2xvY2FsaG9zdDo2NDAvJFNMSUNFTkFNRQ0KDQojIGNoZWNrIHRvIG1ha2Ugc3VyZSB0aGUgZG93bmxvYWQgd2FzIHN1Y2Nlc3NmdWwNCmlmIFsgISAtZiAkU0xJQ0VOQU1FIC1vICQ/IC1uZSAwIF0NCnRoZW4NCiAgIGVjaG8NCiAgIGVjaG8gIlN0b3JrIGRvZXNuJ3Qgc2VlbSB0byBiZSBydW5uaW5nIG9uIHRoaXMgbm9kZS4uLiINCiAgIGVycm9yDQpmaQ0KDQojIHdhaXQgZm9yIHN0b3JrIHNsaWNlIA0KZWNobyAiV2FpdGluZyBmb3IgU3RvcmsgdG8gYWNjZXB0IG91ciBiaW5kaW5nLi4uIg0Kd2hpbGUgWyAhIC1mIC90bXAvc3Rvcmtfc2F5c19nbyBdDQpkbw0KICAgc2xlZXAgMQ0KZG9uZQ0KDQojIGNoYW5nZSBQV0QgdG8gdGhlIC90bXAgZGlyZWN0b3J5IA0KY2QgL3RtcA0KaWYgWyAkPyAtbmUgIjAiIF0NCnRoZW4NCiAgIGVjaG8NCiAgIGVjaG8gIkNvdWxkIG5vdCBhY2Nlc3MgdGhlIC90bXAgZGlyZWN0b3J5Li4uIg0KICAgZXJyb3INCmZpDQoNCiMgY29uZmlybSB0aGF0IHBhY2thZ2VzIHRvIGJlIGluc3RhbGxlZCBhY3R1YWxseSBleGlzdA0KaWYgZWNobyAqLnJwbSB8IGdyZXAgJyonID4gL2Rldi9udWxsDQp0aGVuDQogICBlY2hvDQogICBlY2hvICJFcnJvcjogU3RvcmsgcGFja2FnZSBkb3dubG9hZCBmYWlsZWQuLi4iDQogICBlcnJvcg0KZmkNCg0KIyBpbnN0YWxsIFN0b3JrIHBhY2thZ2VzDQplY2hvICJJbnN0YWxsaW5nIHBhY2thZ2VzLi4uIiANCmZvciBwYWNrIGluICoucnBtDQpkbw0KICAgIyByZW1vdmUgdGhlIG9sZCBzdG9yayBwYWNrYWdlLCBpZiBhbnkNCiAgIHJwbSAtZSBgcnBtIC1xcCAtLXFmICIle05BTUV9XG4iICRwYWNrYCA+IC9kZXYvbnVsbCAyPiYxDQoNCiAgICMgcmVtb3ZlIGFueXRoaW5nIGxlZnQgaW4gL3Vzci9sb2NhbC9zdG9yay9iaW4NCiAgIHJtIC1yZiAvdXNyL2xvY2FsL3N0b3JrL2Jpbi8qID4gL2Rldi9udWxsIDI+JjENCg0KICAgIyBpbnN0YWxsIHRoZSBuZXcgc3RvcmsgcGFja2FnZQ0KICAgcnBtIC1pICRwYWNrDQoNCiAgICMgcmVwb3J0IHBhY2thZ2UgaW5zdGFsbGF0aW9uIGVycm9ycw0KICAgaWYgWyAkPyAtbmUgIjAiIF0NCiAgIHRoZW4NCiAgICAgZWNobyAiV2FybmluZzogUG9zc2libGUgZXJyb3IgaW5zdGFsbGluZyBTdG9yayBwYWNrYWdlOiAkcGFjay4uLiINCiAgIGZpDQpkb25lDQoNCiMgcmVzdG9yZSBvcmlnaW5hbCBQV0QNCmNkICRPTERQV0QNCg0KIyBjbGVhbiB1cCB0ZW1wb3JhcnkgZmlsZXMNCnJtIC1mIC90bXAvc3RvcmsqID4gL2Rldi9udWxsIDI+JjENCnJtICRTTElDRU5BTUUqIA0KDQojIHJ1biBzdG9yayB0byB1cGRhdGUga2V5ZmlsZXMgYW5kIGRvd25sb2FkIHBhY2thZ2UgbGlzdHMNCmVjaG8gIkF0dGVtcHRpbmcgdG8gY29tbXVuaWNhdGUgd2l0aCBzdG9yay4uLiINCmlmIHN0b3JrIA0KdGhlbg0KICAgZWNobw0KICAgZWNobyAiQ29uZ3JhdHVsYXRpb25zLCB5b3UgaGF2ZSBzdWNjZXNzZnVsbHkgYm91bmQgdG8gc3RvcmshIg0KICAgZWNobw0KICAgZWNobyAiRm9yIGhlbHAsIHlvdSBtYXkgdHlwZSBzdG9yayAtLWhlbHAgIg0KICAgZWNobw0KICAgZWNobyAiVGhlcmUgaXMgYWxzbyBhIHN0b3JrcXVlcnkgY29tbWFuZCB0aGF0IHdpbGwgcHJvdmlkZSBpbmZvcm1hdGlvbiINCiAgIGVjaG8gImFib3V0IHBhY2thZ2VzIGluIHRoZSByZXBvc2l0b3J5LiINCiAgIGVjaG8NCiAgIGVjaG8gIkZvciBtb3JlIGhlbHAsIHZpc2l0IHRoZSBzdG9yayBwcm9qZWN0IG9ubGluZSBhdCINCiAgIGVjaG8gImh0dHA6Ly93d3cuY3MuYXJpem9uYS5lZHUvc3RvcmsvLiAgUGxlYXNlIGNvbnRhY3QiDQogICBlY2hvICJzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGZvciBhZGRpdGlvbmFsIGFzc2lzdGFuY2UuIiANCmVsc2UNCiAgIGVjaG8NCiAgIGVjaG8gIkFuIGVycm9yIG9jY3VycmVkIGR1cmluZyBpbnN0YWxsIGZpbmFsaXphdGlvbi4uLiAgUGxlYXNlIGNvbnRhY3QiDQogICBlY2hvICJzdG9yay1zdXBwb3J0QGNzLmFyaXpvbmEuZWR1IGZvciBhc3Npc3RhbmNlLiINCiAgIGV4aXQgMQ0KZmkNCg0KIw0KIyBIZWxsbyBXb3JsZCBkZW1vIGNvZGUNCiMNCg0KIyBQdWJsaWMga2V5IGZvciB0aGlzIGRlbW8NCmNhdCA+L3Vzci9sb2NhbC9zdG9yay92YXIva2V5cy9oZWxsby5wdWJsaWNrZXkgPDwiRU9GIg0KLS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0NCk1Gd3dEUVlKS29aSWh2Y05BUUVCQlFBRFN3QXdTQUpCQU1XcVE3K2VxQVljNlRPSUJPbkJyRnZqYjlnRVViaWgNCkkxd0Nyeld4a09aa01BcXFmY1RuMW9tcCtLMGd0cUtBK3VaNEIzRGlQRXI0Q0V0Myt5MmJlMGtDQXdFQUFRPT0NCi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQ0KRU9GDQpzZWQgLWkgLWUgJ3MvXnVzZXJuYW1lLiovdXNlcm5hbWUgPSBoZWxsby8nIC91c3IvbG9jYWwvc3RvcmsvZXRjL3N0b3JrLmNvbmYNCg0KIyBJbnN0YWxsIFJQTQ0Kc3RvcmsgdXBncmFkZSBoZWxsbw0KDQojIGVuZA0KZXhpdCAwDQo=', 'name': 'princeton_hello_stork', 'encoding': 'base64'}, {'initscript_id': 10, 'script': 'IyEvYmluL2Jhc2gNCg0KIyBJbml0IHNjcmlwdCBmb3IgdGhlIFBsYW5ldExhYiAiSGVsbG8gV29ybGQiIGRlbW8gdXNpbmcgR29vZ2xlIEVhcnRoLg0KIyBJbnN0YWxscyBhIGNyb250YWIgZW50cnkgb24gdGhlIG5vZGUgdGhhdCBwaG9uZXMgaG9tZSB0byB0aGUgc2VydmVyDQojIGV2ZXJ5IHRocmVlIG1pbnV0ZXMuDQoNClNFUlZFUj0xMjguMTEyLjEzOS43Mzo4MDQyCQkjIHBsYW5ldGxhYi0zLmNzLnByaW5jZXRvbi5lZHUNCg0KL3Vzci9iaW4vY3VybCAtcyBodHRwOi8vJFNFUlZFUi8NCmVjaG8gIiovNSAqICogKiAqIC91c3IvYmluL2N1cmwgLXMgaHR0cDovLyRTRVJWRVIvIiB8IGNyb250YWIgLQ0KL3NiaW4vY2hrY29uZmlnIGNyb25kIG9uDQo=', 'name': 'princeton_hello', 'encoding': 'base64'}]]) - -# Convert plc_initscript.initscript_id to raw initscript attribute -for slice_attribute in GetSliceTags({'name': 'plc_initscript'}): - id = slice_attribute['slice_attribute_id'] - slice_id = slice_attribute['slice_id'] - initscript_id = int(slice_attribute['value']) - - # Delete old attribute - DeleteSliceTag(id) - - if initscript_id not in initscripts: - print "Warning: Missing initscript %d" % initscript_id - continue - - initscript = base64.b64decode(initscripts[initscript_id]['script']) - - # Add as initscript attribute - AddSliceTag(slice_id, 'initscript', initscript) - -# Add our custom yum.conf entries -conf_file_id = AddConfFile({ - 'enabled': True, - 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&alpha', - 'dest': '/etc/yum.conf', - 'file_permissions': '644', - 'file_owner': 'root', - 'file_group': 'root', - 'preinstall_cmd': '', - 'postinstall_cmd': '', - 'error_cmd': '', - 'ignore_cmd_errors': False, - 'always_update': False}) -AddConfFileToNodeGroup(conf_file_id, 'Alpha') - -conf_file_id = AddConfFile({ - 'enabled': True, - 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&beta', - 'dest': '/etc/yum.conf', - 'file_permissions': '644', - 'file_owner': 'root', - 'file_group': 'root', - 'preinstall_cmd': '', - 'postinstall_cmd': '', - 'error_cmd': '', - 'ignore_cmd_errors': False, - 'always_update': False}) -AddConfFileToNodeGroup(conf_file_id, 'Beta') - -conf_file_id = AddConfFile({ - 'enabled': True, - 'source': 'PlanetLabConf/yum.conf.php?gpgcheck=1&rollout', - 'dest': '/etc/yum.conf', - 'file_permissions': '644', - 'file_owner': 'root', - 'file_group': 'root', - 'preinstall_cmd': '', - 'postinstall_cmd': '', - 'error_cmd': '', - 'ignore_cmd_errors': False, - 'always_update': False}) -AddConfFileToNodeGroup(conf_file_id, 'Rollout') - -# Add OneLab as a peer -onelab = {'peername': u'OneLab', 'peer_url': u'https://onelab-plc.inria.fr/PLCAPI/', 'key': u'-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v1.4.5 (GNU/Linux)\n\nmQGiBEW0kJMRBACaTlrW0eYlQwkzRuMFfEYMwyqBT9Bm6R4g68SJ5GdjCRu3XCnd\nGTGCFF4ewOu6IcUmZDv39eqxShBWyx+JqBogYPGNvPrj07jXXKaSBCM7TPk+9kMW\nPziIxSClvO15XaPKv89c6kFaEBe0z1xsoMB/TNoLmhFUxmc24O7JnEqmYwCgjzIS\nHP7u9KIOYk1ZlTdOtwyRxVkD/1uYbPzD0Qigf8uF9ADzx7I4F1ATd2ezYq0EfzhD\nTDa15FPWwA7jm+Mye//ovT01Ju6JQtCU4N9wRsV2Yy2tWcWFZiYt+BISPVS0lJDx\nQ2Cd2+kEWyl9ByL9/ACHmCUz0OOaz9j1x+GpJLArjUdZSJOs68kPw90F62mrLHfg\nYCHpA/0ZcdJQG9QYNZ67KMFqNPho+uRww5/7kxQ4wkSyP7EK3QUVgXG5OWZ/1mPZ\njon9N04nnjrL9qoQv7m04ih3rmqyGy1MsicNCoys0RNh1eavPdAsXD1ZEXnWPA7z\naC37hxUaRPP3hH+1ifjPpAWQX1E89MK2y2zQpZipvEOAO2Lw8LRCT25lTGFiIENl\nbnRyYWwgKGh0dHA6Ly9vbmVsYWItcGxjLmlucmlhLmZyLykgPHN1cHBvcnRAb25l\nLWxhYi5vcmc+iGAEExECACAFAkW0kJMCGyMGCwkIBwMCBBUCCAMEFgIDAQIeAQIX\ngAAKCRBuu7E0vzFd9fvbAJ9QB2neTSbAN5HuoigIbuKzTUCTjQCeM/3h7/OmjD+z\n6yXtWD4Fzyfr7fSIYAQTEQIAIAUCRbibbAIbIwYLCQgHAwIEFQIIAwQWAgMBAh4B\nAheAAAoJEG67sTS/MV31w3AAn2t6qb94HIPmqCoD/ptK34Dv+VW0AJ4782ffPPnk\nbVXHU/Sx31QCoFmj34hgBBMRAgAgBQJFtJJBAhsjBgsJCAcDAgQVAggDBBYCAwEC\nHgECF4AACgkQbruxNL8xXfU5UQCeKqXWeNzTqdMqj/qHPkp1JCb+isEAn2AzDnde\nITF0aYd02RAKsU4sKePEtEJPbmVMYWIgQ2VudHJhbCAoaHR0cDovL29uZWxhYi1w\nbGMuaW5yaWEuZnIvKSA8c3VwcG9ydEBvbmUtbGFiLm9yZz6IYAQTEQIAIAUCRbi2\npgIbIwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEG67sTS/MV31W4AAn0rW5yjR\n2a8jPP/V44gw1JhqnE8jAKCMAEh0nPjvle5oLEGectC3Es9Pm7kBDQRFtJCUEAQA\nhp38fNVy/aJiPg2lUKKnA6KjrRm3LxD66N8MSWfxGCIYzQRJHhmZWnS+m1DDOjdu\nFG9FM6QrsCRRcEQuvhKI2ORFfK75D24lj4QaXzw7vfBbAibTaDsYa0b5LxfR5pGj\nYPCQ5LrRex+Ws3DrB3acJE5/XnYJZ+rUO1ZJlm00FTMAAwUD/Ai4ZUunVB8F0VqS\nhJgDYQF08/OlAnDAcbL//P5dtXdztUNSgXZM4wW/XFnDvAsBuRnbfkT/3BeptM9L\neEbdrMi4eThLstSl13ITOsZbSL3i/2OO9sPAxupWzRWOXcQILpqR2YMRK1EapO+M\nNhjrgxU9JpMXz24FESocczSyywDXiEkEGBECAAkFAkW0kJQCGwwACgkQbruxNL8x\nXfXGxQCfZqzSqinohParWaHv+4XNoIz2B7IAn2Ge0O5wjYZeV/joulkTXfPKm7Iu\n=SsZg\n-----END PGP PUBLIC KEY BLOCK-----\n', 'cacert': u'Certificate:\r\n Data:\r\n Version: 3 (0x2)\r\n Serial Number: 67109883 (0x40003fb)\r\n Signature Algorithm: sha1WithRSAEncryption\r\n Issuer: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=G\r\n Validity\r\n Not Before: Mar 14 20:30:00 2006 GMT\r\n Not After : Mar 14 23:59:00 2013 GMT\r\n Subject: C=BE, O=Cybertrust, OU=Educational CA, CN=Cybertrust Educationa\r\n Subject Public Key Info:\r\n Public Key Algorithm: rsaEncryption\r\n RSA Public Key: (2048 bit)\r\n Modulus (2048 bit):\r\n 00:95:22:a1:10:1d:4a:46:60:6e:05:91:9b:df:83:\r\n c2:ed:12:b2:5a:7c:f8:ab:e1:f8:50:5c:28:2c:7e:\r\n 7e:00:38:93:b0:8b:4a:f1:c2:4c:3c:10:2c:3c:ef:\r\n b0:ec:a1:69:2f:b9:fc:cc:08:14:6b:8d:4f:18:f3:\r\n 83:d2:fa:a9:37:08:20:aa:5c:aa:80:60:a2:d5:a5:\r\n 22:00:cf:5a:e5:b4:97:df:ba:1e:be:5c:8e:17:19:\r\n 66:fd:af:9f:7c:7b:89:b2:0e:24:d8:c7:ab:63:c4:\r\n 95:32:8d:48:e6:63:59:7d:04:b8:33:a8:bd:d7:5d:\r\n 64:bc:63:b5:f7:4d:28:fd:f9:06:72:31:5c:ba:45:\r\n 94:65:a3:d2:b4:58:ec:3b:61:58:44:a3:2f:62:b3:\r\n 9b:80:b4:82:fd:d5:c7:cc:51:25:e5:95:3f:47:2f:\r\n 30:7b:ac:c8:78:6e:e2:e1:6d:27:eb:3d:cc:01:82:\r\n e8:35:77:8d:ab:58:bb:55:d1:d5:a4:81:56:8d:1c:\r\n d0:14:b1:b0:06:de:a0:91:22:f3:f0:a8:34:17:47:\r\n c6:e0:3e:f6:0c:5a:ac:7e:50:4b:cd:e1:69:6e:06:\r\n fc:06:7e:6a:4d:b4:95:99:a0:59:5c:35:66:ec:d9:\r\n 49:d4:17:e0:60:b0:5d:a5:d7:1a:e2:2a:6e:66:f2:\r\n af:1d\r\n Exponent: 65537 (0x10001)\r\n X509v3 extensions:\r\n X509v3 CRL Distribution Points: \r\n URI:http://www.public-trust.com/cgi-bin/CRL/2018/cdp.crl\r\n\r\n X509v3 Subject Key Identifier: \r\n 65:65:A3:3D:D7:3B:11:A3:0A:07:25:37:C9:42:4A:5B:76:77:50:E1\r\n X509v3 Certificate Policies: \r\n Policy: 1.3.6.1.4.1.6334.1.0\r\n CPS: http://www.public-trust.com/CPS/OmniRoot.html\r\n\r\n X509v3 Authority Key Identifier: \r\n DirName:/C=US/O=GTE Corporation/OU=GTE CyberTrust Solutions, Inc\r\n serial:01:A5\r\n\r\n X509v3 Key Usage: critical\r\n Certificate Sign, CRL Sign\r\n X509v3 Basic Constraints: critical\r\n CA:TRUE, pathlen:0\r\n Signature Algorithm: sha1WithRSAEncryption\r\n 43:b3:45:83:54:71:c4:1f:dc:b2:3c:6b:4e:bf:26:f2:4e:f2:\r\n ad:9a:5b:fa:86:37:88:e8:14:6c:41:18:42:5f:ef:65:3e:eb:\r\n 03:77:a0:b7:9e:75:7a:51:7c:bb:15:5b:b8:af:91:a0:34:92:\r\n 53:ed:7f:2a:49:84:ac:b9:80:4b:b5:c7:b2:23:22:fb:eb:d8:\r\n fb:6e:c9:3c:f3:d2:d1:bb:be:c9:1c:ff:6d:01:db:69:80:0e:\r\n 99:a5:ea:9e:7b:97:98:8f:b7:cf:22:9c:b3:b8:5d:e5:a9:33:\r\n 17:74:c6:97:37:0f:b4:e9:26:82:5f:61:0b:3f:1e:3d:64:e9:\r\n 2b:9b\r\n-----BEGIN CERTIFICATE-----\r\nMIIEQjCCA6ugAwIBAgIEBAAD+zANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV\r\nUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU\r\ncnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds\r\nb2JhbCBSb290MB4XDTA2MDMxNDIwMzAwMFoXDTEzMDMxNDIzNTkwMFowXzELMAkG\r\nA1UEBhMCQkUxEzARBgNVBAoTCkN5YmVydHJ1c3QxFzAVBgNVBAsTDkVkdWNhdGlv\r\nbmFsIENBMSIwIAYDVQQDExlDeWJlcnRydXN0IEVkdWNhdGlvbmFsIENBMIIBIjAN\r\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlSKhEB1KRmBuBZGb34PC7RKyWnz4\r\nq+H4UFwoLH5+ADiTsItK8cJMPBAsPO+w7KFpL7n8zAgUa41PGPOD0vqpNwggqlyq\r\ngGCi1aUiAM9a5bSX37oevlyOFxlm/a+ffHuJsg4k2MerY8SVMo1I5mNZfQS4M6i9\r\n111kvGO1900o/fkGcjFcukWUZaPStFjsO2FYRKMvYrObgLSC/dXHzFEl5ZU/Ry8w\r\ne6zIeG7i4W0n6z3MAYLoNXeNq1i7VdHVpIFWjRzQFLGwBt6gkSLz8Kg0F0fG4D72\r\nDFqsflBLzeFpbgb8Bn5qTbSVmaBZXDVm7NlJ1BfgYLBdpdca4ipuZvKvHQIDAQAB\r\no4IBbzCCAWswRQYDVR0fBD4wPDA6oDigNoY0aHR0cDovL3d3dy5wdWJsaWMtdHJ1\r\nc3QuY29tL2NnaS1iaW4vQ1JMLzIwMTgvY2RwLmNybDAdBgNVHQ4EFgQUZWWjPdc7\r\nEaMKByU3yUJKW3Z3UOEwUwYDVR0gBEwwSjBIBgkrBgEEAbE+AQAwOzA5BggrBgEF\r\nBQcCARYtaHR0cDovL3d3dy5wdWJsaWMtdHJ1c3QuY29tL0NQUy9PbW5pUm9vdC5o\r\ndG1sMIGJBgNVHSMEgYEwf6F5pHcwdTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD0dU\r\nRSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3QgU29sdXRpb25z\r\nLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwgUm9vdIICAaUw\r\nDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwDQYJKoZIhvcNAQEF\r\nBQADgYEAQ7NFg1RxxB/csjxrTr8m8k7yrZpb+oY3iOgUbEEYQl/vZT7rA3egt551\r\nelF8uxVbuK+RoDSSU+1/KkmErLmAS7XHsiMi++vY+27JPPPS0bu+yRz/bQHbaYAO\r\nmaXqnnuXmI+3zyKcs7hd5akzF3TGlzcPtOkmgl9hCz8ePWTpK5s=\r\n-----END CERTIFICATE-----\r\nCertificate:\r\n Data:\r\n Version: 1 (0x0)\r\n Serial Number: 421 (0x1a5)\r\n Signature Algorithm: md5WithRSAEncryption\r\n Issuer: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=GTE CyberTrust Global Root\r\n Validity\r\n Not Before: Aug 13 00:29:00 1998 GMT\r\n Not After : Aug 13 23:59:00 2018 GMT\r\n Subject: C=US, O=GTE Corporation, OU=GTE CyberTrust Solutions, Inc., CN=GTE CyberTrust Global Root\r\n Subject Public Key Info:\r\n Public Key Algorithm: rsaEncryption\r\n RSA Public Key: (1024 bit)\r\n Modulus (1024 bit):\r\n 00:95:0f:a0:b6:f0:50:9c:e8:7a:c7:88:cd:dd:17:\r\n 0e:2e:b0:94:d0:1b:3d:0e:f6:94:c0:8a:94:c7:06:\r\n c8:90:97:c8:b8:64:1a:7a:7e:6c:3c:53:e1:37:28:\r\n 73:60:7f:b2:97:53:07:9f:53:f9:6d:58:94:d2:af:\r\n 8d:6d:88:67:80:e6:ed:b2:95:cf:72:31:ca:a5:1c:\r\n 72:ba:5c:02:e7:64:42:e7:f9:a9:2c:d6:3a:0d:ac:\r\n 8d:42:aa:24:01:39:e6:9c:3f:01:85:57:0d:58:87:\r\n 45:f8:d3:85:aa:93:69:26:85:70:48:80:3f:12:15:\r\n c7:79:b4:1f:05:2f:3b:62:99\r\n Exponent: 65537 (0x10001)\r\n Signature Algorithm: md5WithRSAEncryption\r\n 6d:eb:1b:09:e9:5e:d9:51:db:67:22:61:a4:2a:3c:48:77:e3:\r\n a0:7c:a6:de:73:a2:14:03:85:3d:fb:ab:0e:30:c5:83:16:33:\r\n 81:13:08:9e:7b:34:4e:df:40:c8:74:d7:b9:7d:dc:f4:76:55:\r\n 7d:9b:63:54:18:e9:f0:ea:f3:5c:b1:d9:8b:42:1e:b9:c0:95:\r\n 4e:ba:fa:d5:e2:7c:f5:68:61:bf:8e:ec:05:97:5f:5b:b0:d7:\r\n a3:85:34:c4:24:a7:0d:0f:95:93:ef:cb:94:d8:9e:1f:9d:5c:\r\n 85:6d:c7:aa:ae:4f:1f:22:b5:cd:95:ad:ba:a7:cc:f9:ab:0b:\r\n 7a:7f\r\n-----BEGIN CERTIFICATE-----\r\nMIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD\r\nVQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv\r\nbHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv\r\nb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV\r\nUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU\r\ncnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds\r\nb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH\r\niM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS\r\nr41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4\r\n04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r\r\nGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9\r\n3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P\r\nlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/\r\n-----END CERTIFICATE-----\r\n'} - -AddPeer(onelab) diff --git a/tools/upgrade-db.py b/tools/upgrade-db.py deleted file mode 100755 index 4c9d1d5..0000000 --- a/tools/upgrade-db.py +++ /dev/null @@ -1,457 +0,0 @@ -#!/usr/bin/python -# -# Tool for upgrading/converting a db -# Requirements: -# 1) Databse Schema - schema for the new database you what to upgrade to -# 2) Config File - the config file that describes how to convert the db -# -# Notes: -# 1) Will attempt to convert the db defined in /etc/planetlab/plc_config -# 2) Does not automatically drop archived database. They must be removed -# manually - -import sys -import os -import getopt -import pgdb - -config = {} -config_file = "/etc/planetlab/plc_config" -execfile(config_file, config) -upgrade_config_file = "plcdb.3-4.conf" -schema_file = "planetlab4.sql" -temp_dir = "/tmp" - - -def usage(): - print "Usage: %s [OPTION] UPGRADE_CONFIG_FILE " % sys.argv[0] - print "Options:" - print " -s, --schema=FILE Upgraded Database Schema" - print " -t, --temp-dir=DIR Temp Directory" - print " --help This message" - sys.exit(1) - -try: - (opts, argv) = getopt.getopt(sys.argv[1:], - "s:d:", - ["schema=", - "temp-dir=", - "help"]) -except getopt.GetoptError, err: - print "Error: ", err.msg - usage() - -for (opt, optval) in opts: - if opt == "-s" or opt == "--schema": - schema_file = optval - elif opt == "-d" or opt == "--temp-dir": - temp_dir = optval - elif opt == "--help": - usage() -try: - upgrade_config_file = argv[0] -except IndexError: - print "Error: too few arguments" - usage() - -schema = {} -inserts = [] -schema_items_ordered = [] -sequences = {} -temp_tables = {} - - -# load conf file for this upgrade -try: - upgrade_config = {} - execfile(upgrade_config_file, upgrade_config) - upgrade_config.pop('__builtins__') - db_version_previous = upgrade_config['DB_VERSION_PREVIOUS'] - db_version_new = upgrade_config['DB_VERSION_NEW'] - -except IOError, fault: - print "Error: upgrade config file (%s) not found. Exiting" % \ - (fault) - sys.exit(1) -except KeyError, fault: - print "Error: %s not set in upgrade confing (%s). Exiting" % \ - (fault, upgrade_config_file) - sys.exit(1) - - - - -def connect(): - db = pgdb.connect(user = config['PLC_DB_USER'], - database = config['PLC_DB_NAME']) - return db - -def archive_db(database, archived_database): - - archive_db = " dropdb -U postgres %s > /dev/null 2>&1;" \ - " psql template1 postgres -qc " \ - " 'ALTER DATABASE %s RENAME TO %s;';" % \ - (archived_database, database, archived_database) - exit_status = os.system(archive_db) - if exit_status: - print "Error: unable to archive database. Upgrade failed" - sys.exit(1) - #print "Status: %s has been archived. now named %s" % (database, archived_database) - - -def encode_utf8(inputfile_name, outputfile_name): - # rewrite a iso-8859-1 encoded file in utf8 - try: - inputfile = open(inputfile_name, 'r') - outputfile = open(outputfile_name, 'w') - for line in inputfile: - if line.upper().find('SET CLIENT_ENCODING') > -1: - continue - outputfile.write(unicode(line, 'iso-8859-1').encode('utf8')) - inputfile.close() - outputfile.close() - except: - print 'error encoding file' - raise - -def create_item_from_schema(item_name): - - try: - (type, body_list) = schema[item_name] - exit_status = os.system('psql %s %s -qc "%s" > /dev/null 2>&1' % \ - (config['PLC_DB_NAME'], config['PLC_DB_USER'],"".join(body_list) ) ) - if exit_status: - raise Exception - except Exception, fault: - print 'Error: create %s failed. Check schema.' % item_name - sys.exit(1) - raise fault - - except KeyError: - print "Error: cannot create %s. definition not found in %s" % \ - (key, schema_file) - return False - -def fix_row(row, table_name, table_fields): - - if table_name in ['interfaces']: - # convert str bwlimit to bps int - bwlimit_index = table_fields.index('bwlimit') - if isinstance(row[bwlimit_index], int): - pass - elif row[bwlimit_index].find('mbit') > -1: - row[bwlimit_index] = int(row[bwlimit_index].split('mbit')[0]) \ - * 1000000 - elif row[bwlimit_index].find('kbit') > -1: - row[bwlimit_index] = int(row[bwlimit_index].split('kbit')[0]) \ - * 1000 - elif table_name in ['slice_attribute']: - # modify some invalid foreign keys - attribute_type_index = table_fields.index('attribute_type_id') - if row[attribute_type_index] == 10004: - row[attribute_type_index] = 10016 - elif row[attribute_type_index] == 10006: - row[attribute_type_index] = 10017 - elif row[attribute_type_index] in [10031, 10033]: - row[attribute_type_index] = 10037 - elif row[attribute_type_index] in [10034, 10035]: - row[attribute_type_index] = 10036 - elif table_name in ['slice_attribute_types']: - type_id_index = table_fields.index('attribute_type_id') - if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]: - return None - return row - -def fix_table(table, table_name, table_fields): - if table_name in ['slice_attribute_types']: - # remove duplicate/redundant primary keys - type_id_index = table_fields.index('attribute_type_id') - for row in table: - if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]: - table.remove(row) - return table - -def remove_temp_tables(): - # remove temp_tables - try: - for temp_table in temp_tables: - os.remove(temp_tables[temp_table]) - except: - raise - -def generate_temp_table(table_name, db): - cursor = db.cursor() - try: - # get upgrade directions - table_def = upgrade_config[table_name].replace('(', '').replace(')', '').split(',') - table_fields, old_fields, joins, wheres = [], [], set(), set() - for field in table_def: - field_parts = field.strip().split(':') - table_fields.append(field_parts[0]) - old_fields.append(field_parts[1]) - if field_parts[2:]: - joins.update(set(filter(lambda x: not x.find('=') > -1, field_parts[2:]))) - wheres.update(set(filter(lambda x: x.find('=') > -1, field_parts[2:]))) - - # get indices of fields that cannot be null - (type, body_list) = schema[table_name] - not_null_indices = [] - for field in table_fields: - for body_line in body_list: - if body_line.find(field) > -1 and \ - body_line.upper().find("NOT NULL") > -1: - not_null_indices.append(table_fields.index(field)) - # get index of primary key - primary_key_indices = [] - for body_line in body_list: - if body_line.find("PRIMARY KEY") > -1: - primary_key = body_line - for field in table_fields: - if primary_key.find(" "+field+" ") > -1: - primary_key_indices.append(table_fields.index(field)) - #break - - # get old data - get_old_data = "SELECT DISTINCT %s FROM %s" % \ - (", ".join(old_fields), old_fields[0].split(".")[0]) - for join in joins: - get_old_data = get_old_data + " INNER JOIN %s USING (%s) " % \ - (join.split('.')[0], join.split('.')[1]) - if wheres: - get_old_data = get_old_data + " WHERE " - for where in wheres: - get_old_data = get_old_data + " %s" % where - cursor.execute(get_old_data) - rows = cursor.fetchall() - - # write data to a temp file - temp_file_name = '%s/%s.tmp' % (temp_dir, table_name) - temp_file = open(temp_file_name, 'w') - for row in rows: - # attempt to make any necessary fixes to data - row = fix_row(row, table_name, table_fields) - # do not attempt to write null rows - if row == None: - continue - # do not attempt to write rows with null primary keys - if filter(lambda x: row[x] == None, primary_key_indices): - continue - for i in range(len(row)): - # convert nulls into something pg can understand - if row[i] == None: - if i in not_null_indices: - # XX doesnt work if column is int type - row[i] = "" - else: - row[i] = "\N" - if isinstance(row[i], int) or isinstance(row[i], float): - row[i] = str(row[i]) - # escape whatever can mess up the data format - if isinstance(row[i], str): - row[i] = row[i].replace('\t', '\\t') - row[i] = row[i].replace('\n', '\\n') - row[i] = row[i].replace('\r', '\\r') - data_row = "\t".join(row) - temp_file.write(data_row + "\n") - temp_file.write("\.\n") - temp_file.close() - temp_tables[table_name] = temp_file_name - - except KeyError: - #print "WARNING: cannot upgrade %s. upgrade def not found. skipping" % \ - # (table_name) - return False - except IndexError, fault: - print "Error: error found in upgrade config file. " \ - "check %s configuration. Aborting " % \ - (table_name) - sys.exit(1) - except: - print "Error: configuration for %s doesnt match db schema. " \ - " Aborting" % (table_name) - try: - db.rollback() - except: - pass - raise - - -# Connect to current db -db = connect() -cursor = db.cursor() - -# determin current db version -try: - cursor.execute("SELECT relname from pg_class where relname = 'plc_db_version'") - rows = cursor.fetchall() - if not rows: - print "Warning: current db has no version. Unable to validate config file." - else: - cursor.execute("SELECT version FROM plc_db_version") - rows = cursor.fetchall() - if not rows or not rows[0]: - print "Warning: current db has no version. Unable to validate config file." - elif rows[0][0] == db_version_new: - print "Status: Versions are the same. No upgrade necessary." - sys.exit() - elif not rows[0][0] == db_version_previous: - print "Stauts: DB_VERSION_PREVIOUS in config file (%s) does not" \ - " match current db version %d" % (upgrade_config_file, rows[0][0]) - sys.exit() - else: - print "STATUS: attempting upgrade from %d to %d" % \ - (db_version_previous, db_version_new) - - # check db encoding - sql = " SELECT pg_catalog.pg_encoding_to_char(d.encoding)" \ - " FROM pg_catalog.pg_database d " \ - " WHERE d.datname = '%s' " % config['PLC_DB_NAME'] - cursor.execute(sql) - rows = cursor.fetchall() - if rows[0][0] not in ['UTF8', 'UNICODE']: - print "WARNING: db encoding is not utf8. Attempting to encode" - db.close() - # generate db dump - dump_file = '%s/dump.sql' % (temp_dir) - dump_file_encoded = dump_file + ".utf8" - dump_cmd = 'pg_dump -i %s -U postgres -f %s > /dev/null 2>&1' % \ - (config['PLC_DB_NAME'], dump_file) - if os.system(dump_cmd): - print "ERROR: during db dump. Exiting." - sys.exit(1) - # encode dump to utf8 - print "Status: encoding database dump" - encode_utf8(dump_file, dump_file_encoded) - # archive original db - archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_sqlascii_archived') - # create a utf8 database and upload encoded data - recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null; ' \ - 'psql -a -U %s %s < %s > /dev/null 2>&1;' % \ - (config['PLC_DB_NAME'], config['PLC_DB_USER'], \ - config['PLC_DB_NAME'], dump_file_encoded) - print "Status: recreating database as utf8" - if os.system(recreate_cmd): - print "Error: database encoding failed. Aborting" - sys.exit(1) - - os.remove(dump_file_encoded) - os.remove(dump_file) -except: - raise - - -db = connect() -cursor = db.cursor() - -# parse the schema user wishes to upgrade to -try: - file = open(schema_file, 'r') - index = 0 - lines = file.readlines() - while index < len(lines): - line = lines[index] - if line.find("--") > -1: - line_parts = line.split("--") - line = line_parts[0] - # find all created objects - if line.startswith("CREATE"): - line_parts = line.split(" ") - if line_parts[1:3] == ['OR', 'REPLACE']: - line_parts = line_parts[2:] - item_type = line_parts[1] - item_name = line_parts[2] - schema_items_ordered.append(item_name) - if item_type in ['INDEX']: - schema[item_name] = (item_type, line) - - # functions, tables, views span over multiple lines - # handle differently than indexes - elif item_type in ['AGGREGATE', 'TABLE', 'VIEW']: - fields = [line] - while index < len(lines): - index = index + 1 - nextline =lines[index] - if nextline.find("--") > -1: - new_line_parts = nextline.split("--") - nextline = new_line_parts[0] - # look for any sequences - if item_type in ['TABLE'] and nextline.find('serial') > -1: - sequences[item_name] = nextline.strip().split()[0] - fields.append(nextline) - if nextline.find(";") >= 0: - break - schema[item_name] = (item_type, fields) - else: - print "Error: unknown type %s" % item_type - elif line.startswith("INSERT"): - inserts.append(line) - index = index + 1 - -except: - raise - -print "Status: generating temp tables" -# generate all temp tables -for key in schema_items_ordered: - (type, body_list) = schema[key] - if type == 'TABLE': - generate_temp_table(key, db) - -# disconenct from current database and archive it -cursor.close() -db.close() - -print "Status: archiving database" -archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived') -os.system('createdb -U postgres -E UTF8 %s > /dev/null; ' % config['PLC_DB_NAME']) - -print "Status: upgrading database" -# attempt to create and load all items from schema into temp db -try: - for key in schema_items_ordered: - (type, body_list) = schema[key] - create_item_from_schema(key) - if type == 'TABLE': - if upgrade_config.has_key(key): - # attempt to populate with temp table data - table_def = upgrade_config[key].replace('(', '').replace(')', '').split(',') - table_fields = [field.strip().split(':')[0] for field in table_def] - insert_cmd = "psql %s %s -c " \ - " 'COPY %s (%s) FROM stdin;' < %s " % \ - (config['PLC_DB_NAME'], config['PLC_DB_USER'], key, - ", ".join(table_fields), temp_tables[key] ) - exit_status = os.system(insert_cmd) - if exit_status: - print "Error: upgrade %s failed" % key - sys.exit(1) - # update the primary key sequence - if sequences.has_key(key): - sequence = key +"_"+ sequences[key] +"_seq" - update_seq = "psql %s %s -c " \ - " \"select setval('%s', max(%s)) FROM %s;\" > /dev/null" % \ - (config['PLC_DB_NAME'], config['PLC_DB_USER'], sequence, - sequences[key], key) - exit_status = os.system(update_seq) - if exit_status: - print "Error: sequence %s update failed" % sequence - sys.exit(1) - else: - # check if there are any insert stmts in schema for this table - print "Warning: %s has no temp data file. Unable to populate with old data" % key - for insert_stmt in inserts: - if insert_stmt.find(key) > -1: - insert_cmd = 'psql %s postgres -qc "%s;" > /dev/null 2>&1' % \ - (config['PLC_DB_NAME'], insert_stmt) - os.system(insert_cmd) -except: - print "Error: failed to populate db. Unarchiving original database and aborting" - undo_command = "dropdb -U postgres %s > /dev/null; psql template1 postgres -qc" \ - " 'ALTER DATABASE %s RENAME TO %s;'; > /dev/null" % \ - (config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived', config['PLC_DB_NAME']) - os.system(undo_command) - #remove_temp_tables() - raise - -#remove_temp_tables() - -print "upgrade complete"