#!/usr/bin/python
#
-# Tool for upgrading a db based on db version #
+# Tool for upgrading/converting a db
+# Requirements:
+# 1) Databse Schema - schema for the new database you what to upgrade to
+# 2) Config File - the config file that describes how to convert the db
+#
+# Notes:
+# 1) Will attempt to convert the db defined in /etc/planetlab/plc_config
+# 2) Does not automatically drop archived database. They must be removed
+# manually
+
import sys
import os
import getopt
schema = {}
inserts = []
schema_items_ordered = []
+sequences = {}
temp_tables = {}
archive_db = " dropdb -U postgres %s > /dev/null 2>&1;" \
" psql template1 postgres -qc " \
- " 'ALTER DATABASE %s RENAME TO %s;';" \
- " createdb -U postgres %s > /dev/null; " % \
- (archived_database, database, archived_database, database)
- exit_status = os.system(archive_db)
+ " 'ALTER DATABASE %s RENAME TO %s;';" % \
+ (archived_database, database, archived_database)
+ exit_status = os.system(archive_db)
if exit_status:
print "Error: unable to archive database. Upgrade failed"
sys.exit(1)
inputfile = open(inputfile_name, 'r')
outputfile = open(outputfile_name, 'w')
for line in inputfile:
- outputfile.write(unicode(line, 'iso-8859-1').encode('utf8'))
+ if line.upper().find('SET CLIENT_ENCODING') > -1:
+ continue
+ outputfile.write(unicode(line, 'iso-8859-1').encode('utf8'))
inputfile.close()
outputfile.close()
except:
def fix_row(row, table_name, table_fields):
- if table_name in ['nodenetworks']:
+ if table_name in ['interfaces']:
# convert str bwlimit to bps int
bwlimit_index = table_fields.index('bwlimit')
if isinstance(row[bwlimit_index], int):
row[attribute_type_index] = 10016
elif row[attribute_type_index] == 10006:
row[attribute_type_index] = 10017
+ elif row[attribute_type_index] in [10031, 10033]:
+ row[attribute_type_index] = 10037
+ elif row[attribute_type_index] in [10034, 10035]:
+ row[attribute_type_index] = 10036
elif table_name in ['slice_attribute_types']:
type_id_index = table_fields.index('attribute_type_id')
- if row[type_id_index] in [10004, 10006]:
+ if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
return None
return row
# remove duplicate/redundant primary keys
type_id_index = table_fields.index('attribute_type_id')
for row in table:
- if row[type_id_index] in [10004, 10006]:
+ if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
table.remove(row)
return table
try:
# get upgrade directions
table_def = upgrade_config[table_name].replace('(', '').replace(')', '').split(',')
- table_fields, old_fields, required_joins = [], [], set()
+ table_fields, old_fields, joins, wheres = [], [], set(), set()
for field in table_def:
field_parts = field.strip().split(':')
table_fields.append(field_parts[0])
old_fields.append(field_parts[1])
- if field_parts[2:]:
- required_joins.update(set(field_parts[2:]))
+ if field_parts[2:]:
+ joins.update(set(filter(lambda x: not x.find('=') > -1, field_parts[2:])))
+ wheres.update(set(filter(lambda x: x.find('=') > -1, field_parts[2:])))
# get indices of fields that cannot be null
(type, body_list) = schema[table_name]
if body_line.find(field) > -1 and \
body_line.upper().find("NOT NULL") > -1:
not_null_indices.append(table_fields.index(field))
-
# get index of primary key
primary_key_indices = []
for body_line in body_list:
if body_line.find("PRIMARY KEY") > -1:
primary_key = body_line
for field in table_fields:
- if primary_key.find(field) > -1:
+ if primary_key.find(" "+field+" ") > -1:
primary_key_indices.append(table_fields.index(field))
- break
-
+ #break
+
# get old data
get_old_data = "SELECT DISTINCT %s FROM %s" % \
(", ".join(old_fields), old_fields[0].split(".")[0])
- for join in required_joins:
+ for join in joins:
get_old_data = get_old_data + " INNER JOIN %s USING (%s) " % \
(join.split('.')[0], join.split('.')[1])
-
+ if wheres:
+ get_old_data = get_old_data + " WHERE "
+ for where in wheres:
+ get_old_data = get_old_data + " %s" % where
cursor.execute(get_old_data)
rows = cursor.fetchall()
# check db encoding
sql = " SELECT pg_catalog.pg_encoding_to_char(d.encoding)" \
" FROM pg_catalog.pg_database d " \
- " LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid " \
" WHERE d.datname = '%s' " % config['PLC_DB_NAME']
cursor.execute(sql)
rows = cursor.fetchall()
- if rows[0][0] not in ['UTF8']:
+ if rows[0][0] not in ['UTF8', 'UNICODE']:
print "WARNING: db encoding is not utf8. Attempting to encode"
db.close()
# generate db dump
dump_file = '%s/dump.sql' % (temp_dir)
dump_file_encoded = dump_file + ".utf8"
- dump_cmd = 'pg_dump -i %s -U %s -f %s > /dev/null 2>&1' % \
- (config['PLC_DB_NAME'], config['PLC_DB_USER'], dump_file)
+ dump_cmd = 'pg_dump -i %s -U postgres -f %s > /dev/null 2>&1' % \
+ (config['PLC_DB_NAME'], dump_file)
if os.system(dump_cmd):
print "ERROR: during db dump. Exiting."
sys.exit(1)
# archive original db
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_sqlascii_archived')
# create a utf8 database and upload encoded data
- recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null 2>&1; ' \
+ recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null; ' \
'psql -a -U %s %s < %s > /dev/null 2>&1;' % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], \
config['PLC_DB_NAME'], dump_file_encoded)
lines = file.readlines()
while index < len(lines):
line = lines[index]
+ if line.find("--") > -1:
+ line_parts = line.split("--")
+ line = line_parts[0]
# find all created objects
if line.startswith("CREATE"):
line_parts = line.split(" ")
+ if line_parts[1:3] == ['OR', 'REPLACE']:
+ line_parts = line_parts[2:]
item_type = line_parts[1]
item_name = line_parts[2]
schema_items_ordered.append(item_name)
while index < len(lines):
index = index + 1
nextline =lines[index]
+ if nextline.find("--") > -1:
+ new_line_parts = nextline.split("--")
+ nextline = new_line_parts[0]
+ # look for any sequences
+ if item_type in ['TABLE'] and nextline.find('serial') > -1:
+ sequences[item_name] = nextline.strip().split()[0]
fields.append(nextline)
if nextline.find(";") >= 0:
break
print "Status: archiving database"
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived')
-
+os.system('createdb -U postgres -E UTF8 %s > /dev/null; ' % config['PLC_DB_NAME'])
print "Status: upgrading database"
# attempt to create and load all items from schema into temp db
create_item_from_schema(key)
if type == 'TABLE':
if upgrade_config.has_key(key):
+ # attempt to populate with temp table data
table_def = upgrade_config[key].replace('(', '').replace(')', '').split(',')
table_fields = [field.strip().split(':')[0] for field in table_def]
insert_cmd = "psql %s %s -c " \
" 'COPY %s (%s) FROM stdin;' < %s " % \
- (config['PLC_DB_NAME'], config['PLC_DB_USER'], key, ", ".join(table_fields), temp_tables[key] )
+ (config['PLC_DB_NAME'], config['PLC_DB_USER'], key,
+ ", ".join(table_fields), temp_tables[key] )
exit_status = os.system(insert_cmd)
if exit_status:
print "Error: upgrade %s failed" % key
- raise
+ sys.exit(1)
+ # update the primary key sequence
+ if sequences.has_key(key):
+ sequence = key +"_"+ sequences[key] +"_seq"
+ update_seq = "psql %s %s -c " \
+ " \"select setval('%s', max(%s)) FROM %s;\" > /dev/null" % \
+ (config['PLC_DB_NAME'], config['PLC_DB_USER'], sequence,
+ sequences[key], key)
+ exit_status = os.system(update_seq)
+ if exit_status:
+ print "Error: sequence %s update failed" % sequence
+ sys.exit(1)
else:
# check if there are any insert stmts in schema for this table
print "Warning: %s has no temp data file. Unable to populate with old data" % key
" 'ALTER DATABASE %s RENAME TO %s;'; > /dev/null" % \
(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived', config['PLC_DB_NAME'])
os.system(undo_command)
- remove_temp_tables()
+ #remove_temp_tables()
raise
-remove_temp_tables()
+#remove_temp_tables()
print "upgrade complete"