else
+$(RSYNC) plcsh PLC planetlab5.sql migrations $(SSHURL)/usr/share/plc_api/
+$(RSYNC) db-config.d/ $(SSHURL)/etc/planetlab/db-config.d/
+ +$(RSYNC) plc.d/ $(SSHURL)/etc/plc.d/
$(SSHCOMMAND) exec apachectl graceful
endif
filter should be a dictionary of field names and values
representing the criteria for filtering.
example : filter = { 'hostname' : '*.edu' , site_id : [34,54] }
+
+
Whether the filter represents an intersection (AND) or a union (OR)
- of these criteria is determined by the join_with argument
- provided to the sql method below
+ of these criteria is determined as follows:
+ * if the dictionnary has the '-AND' or the '-OR' key, this is chosen
+ * otherwise, the join_with argument, as provided to the sql method below,
+ is expected to hold the 'AND' or 'OR' string
+ this argument defaults to 'AND' and in most of the code, this default applies
+ as the join_with argument is left unspecified
+
Special features:
* '-LIMIT' : the amount of rows to be returned
example : filter = { '-OFFSET' : 100, '-LIMIT':25}
+
Here are a few realistic examples
- GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' , '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } )
+ GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' ,
+ '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } )
would return regular (usual) nodes matching '*.edu' in alphabetical order from 31th to 55th
+ GetNodes ( { '~peer_id' : None } )
+ returns the foreign nodes - that have an integer peer_id
+
GetPersons ( { '|role_ids' : [ 20 , 40] } )
would return all persons that have either pi (20) or tech (40) roles
all 4 forms are equivalent and would return all admin users in the system
"""
+ debug=False
+# debug=True
+
def __init__(self, fields = {}, filter = {}, doc = "Attribute filter"):
# Store the filter in our dict instance
dict.__init__(self, filter)
Returns a SQL conditional that represents this filter.
"""
+ if self.has_key('-AND'):
+ del self['-AND']
+ join_with='AND'
+ if self.has_key('-OR'):
+ del self['-OR']
+ join_with='OR'
+
+ self.join_with=join_with
+
# So that we always return something
if join_with == "AND":
conditionals = ["True"]
clip_part += " ORDER BY " + ",".join(sorts)
if clips:
clip_part += " " + " ".join(clips)
-# print 'where_part=',where_part,'clip_part',clip_part
+ if Filter.debug: print 'Filter.sql: where_part=',where_part,'clip_part',clip_part
return (where_part,clip_part)
--- /dev/null
+#
+# $Id$
+# $URL$
+# Thierry Parmentelat -- INRIA
+#
+# Utilities for filtering on leases
+#
+
+from types import StringTypes
+from PLC.Faults import *
+from PLC.Filter import Filter
+from PLC.Parameter import Parameter, Mixed
+from PLC.Timestamp import Timestamp
+
+# supersede the generic Filter class to support time intersection
+class LeaseFilter (Filter):
+
+ # general notes on input parameters
+ # int_timestamp: number of seconds since the epoch
+ # str_timestamp: see Timestamp.sql_validate
+ # timeslot: a tuple (from,until), each being either int_timestamp or str_timestamp
+
+ local_fields = { 'alive': Mixed ( Parameter (int, "int_timestamp: leases alive at that time"),
+ Parameter (str, "str_timestamp: leases alive at that time"),
+ Parameter (tuple,"timeslot: the leases alive during this timeslot")),
+ 'clip': Mixed ( Parameter (int, "int_timestamp: leases alive after that time"),
+ Parameter (str, "str_timestamp: leases alive after at that time"),
+ Parameter (tuple,"timeslot: the leases alive during this timeslot")),
+ }
+
+ def __init__(self, fields = {}, filter = {},
+ doc = "Lease filter -- adds the 'alive' and 'clip' capabilities for filtering on leases"):
+ Filter.__init__(self,fields,filter,doc)
+ self.fields.update (LeaseFilter.local_fields)
+
+
+ ## canonical type
+ @staticmethod
+ def quote (timestamp): return Timestamp.cast_long(timestamp)
+
+ ## basic SQL utilities
+ @staticmethod
+ def sql_time_intersect (f1,u1,f2,u2):
+ # either f2 is in [f1,u1], or u2 is in [f1,u1], or f2<=f1<=u1<=u2
+ return ("((%(f1)s <= %(f2)s) AND (%(f2)s <= %(u1)s)) " + \
+ "OR ((%(f1)s <= %(u2)s) AND (%(u2)s <= %(u1)s)) " + \
+ "OR ((%(f2)s<=%(f1)s) AND (%(u1)s<=%(u2)s))")%locals()
+
+ @staticmethod
+ def time_in_range (timestamp,f1,u1):
+ return Timestamp.cast_long(f1) <= Timestamp.cast_long(timestamp) \
+ and Timestamp.cast_long(timestamp) <= Timestamp.cast_long(u1)
+
+ @staticmethod
+ def sql_time_in_range (timestamp,f1,u1):
+ # is timestamp in [f1,u1]
+ return "((%(f1)s <= %(timestamp)s) AND (%(timestamp)s <= %(u1)s))"%locals()
+
+ @staticmethod
+ def sql_timeslot_after (f1,u1,mark):
+ # is the lease alive after mark, i.e. u1 >= mark
+ return "(%(u1)s >= %(mark)s)"%locals()
+
+
+ ## hooks for the local fields
+ def sql_alive (self, alive):
+ if isinstance (alive,int) or isinstance (alive, StringTypes):
+ # the lease is alive at that time if from <= alive <= until
+ alive=LeaseFilter.quote(alive)
+ return LeaseFilter.sql_time_in_range(alive,'t_from','t_until')
+ elif isinstance (alive,tuple):
+ (f,u)=alive
+ f=LeaseFilter.quote(f)
+ u=LeaseFilter.quote(u)
+ return LeaseFilter.sql_time_intersect (f,u,'t_from','t_until')
+ else: raise PLCInvalidArgument ("LeaseFilter: alive field %r"%alive)
+
+ def sql_clip (self, clip):
+ if isinstance (clip,int) or isinstance (clip, StringTypes):
+ start=LeaseFilter.quote(clip)
+ return LeaseFilter.sql_timeslot_after('t_from','t_until',start)
+ elif isinstance (clip,tuple):
+ (f,u)=clip
+ f=LeaseFilter.quote(f)
+ u=LeaseFilter.quote(u)
+ return LeaseFilter.sql_time_intersect(f,u,'t_from','t_until')
+ else: raise PLCInvalidArgument ("LeaseFilter: clip field %r"%clip)
+
+
+ ## supersede the generic Filter 'sql' method
+ def sql(self, api, join_with = "AND"):
+ # preserve locally what belongs to us, hide it from the superclass
+ # self.local is a dict local_key : user_value
+ # self.negation is a dict local_key : string
+ self.local={}
+ self.negation={}
+ for (k,v) in LeaseFilter.local_fields.items():
+ if self.has_key(k):
+ self.local[k]=self[k]
+ del self[k]
+ self.negation[k]=""
+ elif self.has_key('~'+k):
+ self.local[k]=self['~'+k]
+ del self['~'+k]
+ self.negation[k]="NOT "
+ # run the generic filtering code
+ (where_part,clip_part) = Filter.sql(self,api,join_with)
+ for (k,v) in self.local.items():
+ try:
+ # locate hook function associated with key
+ method=LeaseFilter.__dict__['sql_'+k]
+ where_part += " %s %s(%s)" %(self.join_with,self.negation[k],method(self,self.local[k]))
+ except Exception,e:
+ raise PLCInvalidArgument,"LeaseFilter: something wrong with filter key %s, val was %r -- %r"%(k,v,e)
+ if Filter.debug: print 'LeaseFilter.sql: where_part=',where_part,'clip_part',clip_part
+ return (where_part,clip_part)
+
+######## xxx not sure where this belongs yet
+# given a set of nodes, and a timeslot,
+# returns the available leases that have at least a given duration
+def free_leases (api, node_ids, t_from, t_until, min_duration):
+
+ # get the leases for these nodes and timeslot
+ filter = {'node_id':node_ids,
+ 'clip': (t_from, t_until),
+ # sort by node, and inside one node, chronologically
+ '-SORT' : ('node_id','t_from'),
+ }
+ leases = Leases (api, filter)
+
+ result=[]
+
+ # sort node_ids
+ node_ids.sort()
+
+ # scan nodes from the input
+ input_node_id=0
+ # scan nodes from the leases
+ lease_node_id=0
+
+ return '?? what now ??'
+
+def node_free_leases (node_id, node_leases, t_from, t_until):
+
+ # no lease yet : return one solid lease
+ if not node_leases:
+ return [ {'node_id':node_id,
+ 't_from':t_from,
+ 't_until':t_until} ]
+
+ result=[]
+ current_time=t_from
+ is_on=LeaseFilter.time_in_range(node_leases[0]['t_from'],t_from,t_until)
+
+ while True:
+# print 'DBG','current_time',current_time,'is_on',is_on,'result',result
+ # lease is active
+ if is_on:
+ current_time=node_leases[0]['t_until']
+ is_on=False
+ del node_leases[0]
+ if not node_leases: return result
+ # free, has no remaining lease
+ elif not node_leases:
+ result.append( {'node_id':node_id, 't_from':current_time, 't_until': t_until} )
+ return result
+ # free and has remaining leases
+ else:
+ next_time = node_leases[0]['t_from']
+ result.append( {'node_id':node_id,'t_from':current_time,'t_until':next_time})
+ current_time = next_time
+ is_on=True
+
+
--- /dev/null
+#
+# Functions for interacting with the leases table in the database
+#
+# $Id$
+# $URL$
+# Thierry Parmentelat -- INRIA
+#
+
+from datetime import datetime
+
+from PLC.Faults import *
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Table import Row, Table
+from PLC.Nodes import Node, Nodes
+from PLC.Slices import Slice, Slices
+from PLC.LeaseFilter import LeaseFilter
+from PLC.Timestamp import Timestamp
+
+class Lease(Row):
+ """
+ Representation of a row in the leases table. To use, optionally
+ instantiate with a dict of values. Update as you would a
+ dict. Commit to the database with sync().
+ """
+
+ table_name = 'leases'
+ primary_key = 'lease_id'
+ join_tables = [ ]
+ fields = {
+ # native
+ 'lease_id': Parameter(int, "Lease identifier"),
+ 't_from': Timestamp.Parameter("timeslot start"),
+ 't_until': Timestamp.Parameter("timeslot end"),
+ 'node_id': Node.fields['node_id'],
+ 'slice_id': Slice.fields['slice_id'],
+
+ # derived
+ 'hostname': Node.fields['hostname'],
+ 'node_type': Node.fields['node_type'],
+ 'name': Slice.fields['name'],
+ 'site_id': Slice.fields['site_id'],
+ 'duration': Parameter(int, "duration in seconds"),
+ 'expired' : Parameter(bool, "time slot is over"),
+ }
+
+ related_fields = { }
+
+ # leases do not have arbitrary boundaries
+ # f_from and t_until are rounded to this period of time
+ # initial model is one hour
+ granularity = 60*60
+
+ def validate_time (self, timestamp, round_up):
+ # convert to long
+ timestamp = Timestamp.cast_long(timestamp)
+ # the trick for rounding up rather than down
+ if round_up: timestamp += (Lease.granularity-1)
+ # round down
+ timestamp = (timestamp/Lease.granularity) * Lease.granularity
+ # return a SQL string
+ return Timestamp.sql_validate_utc(timestamp)
+
+ # round UP
+ def validate_t_from(self,timestamp):
+ return self.validate_time (timestamp, round_up=True)
+ # round DOWN
+ def validate_t_until (self, timestamp):
+ return self.validate_time (timestamp, round_up=False)
+
+class Leases(Table):
+ """
+ Representation of row(s) from the leases table in the
+ database.
+ """
+
+ def __init__(self, api, lease_filter = None, columns = None):
+ Table.__init__(self, api, Lease, columns)
+
+ # the view that we're selecting upon: start with view_leases
+ view = "view_leases"
+ sql = "SELECT %s FROM %s WHERE true" % (", ".join(self.columns.keys()),view)
+
+
+ if lease_filter is not None:
+
+ if isinstance (lease_filter, int):
+ lease_filter = Filter (Lease.fields, {'lease_id': lease_filter})
+ elif isinstance(lease_filter, (list, tuple, set)):
+ lease_filter = Filter(Lease.fields, {'lease_id': lease_filter})
+ elif isinstance(lease_filter, dict):
+ lease_filter = LeaseFilter(Lease.fields, lease_filter)
+ else:
+ raise PLCInvalidArgument, "Wrong lease filter %r"%lease_filter
+ sql += " AND (%s) %s" % lease_filter.sql(api)
+
+ self.selectall(sql)
--- /dev/null
+# $Id$
+# $URL$
+# Thierry Parmentelat -- INRIA
+
+from PLC.Faults import *
+from PLC.Auth import Auth
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Table import Row
+
+from PLC.Leases import Leases, Lease
+from PLC.Nodes import Nodes, Node
+from PLC.Slices import Slices, Slice
+from PLC.Timestamp import Timestamp
+
+can_update = ['name', 'instantiation', 'url', 'description', 'max_nodes']
+
+class AddLeases(Method):
+ """
+ Adds a new lease.
+ Mandatory arguments are node(s), slice, t_from and t_until
+ times can be either integers, datetime's, or human readable (see Timestamp)
+
+ PIs may only add leases associated with their own sites (i.e.,
+ to a slice that belongs to their site).
+ Users may only add leases associated with their own slices.
+
+ Returns the new lease_ids if successful, faults otherwise.
+ """
+
+ roles = ['admin', 'pi', 'user']
+
+ accepts = [
+ Auth(),
+ Mixed(Node.fields['node_id'],[Node.fields['node_id']],
+ Node.fields['hostname'],[Node.fields['hostname']],),
+ Mixed(Slice.fields['slice_id'],
+ Slice.fields['name']),
+ Mixed(Lease.fields['t_from']),
+ Mixed(Lease.fields['t_until']),
+ ]
+
+ returns = Parameter(dict, " 'new_ids' is the list of newly created ids, 'errors' is a list of error strings")
+
+ def call(self, auth, node_id_or_hostname_s, slice_id_or_name, t_from, t_until):
+
+ # xxx - round to plain hours somewhere
+
+ # Get node information
+ nodes = Nodes(self.api, node_id_or_hostname_s)
+ if not nodes:
+ raise PLCInvalidArgument, "No such node(s) %r"%node_id_or_hostname_s
+ for node in nodes:
+ if node['node_type'] != 'reservable':
+ raise PLCInvalidArgument, "Node %s is not reservable"%node['hostname']
+
+ # Get slice information
+ slices = Slices(self.api, [slice_id_or_name])
+ if not slices:
+ raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name
+ slice = slices[0]
+
+ # check access
+ if 'admin' not in self.caller['roles']:
+ if self.caller['person_id'] in slice['person_ids']:
+ pass
+ elif 'pi' not in self.caller['roles']:
+ raise PLCPermissionDenied, "Not a member of the specified slice"
+ elif slice['site_id'] not in self.caller['site_ids']:
+ raise PLCPermissionDenied, "Specified slice not associated with any of your sites"
+
+ # normalize timestamps
+ t_from = Timestamp.sql_validate_utc(t_from)
+ t_until = Timestamp.sql_validate_utc(t_until)
+
+ ########## create stuff
+ errors=[]
+ result_ids=[]
+ for node in nodes:
+ if node['peer_id'] is not None:
+ errors.append("Cannot set lease on remote node %r"%node['hostname'])
+ continue
+ # let the DB check for time consistency
+ try:
+ lease = Lease (self.api, {'node_id':node['node_id'], 'slice_id': slice['slice_id'],
+ 't_from':t_from, 't_until':t_until})
+ lease.sync()
+ result_ids.append(lease['lease_id'])
+ except Exception,e:
+ errors.append("Could not create lease on n=%s s=%s [%s .. %s] -- %r" % \
+ (node['hostname'],slice['name'],t_from,t_until,e))
+ nodes.remove(node)
+
+ self.event_objects = {'Slice': [slice['slice_id']],
+ 'Node': [node['node_id'] for node in nodes]}
+ self.message = "New leases %r on n=%r s=%s [%s -> %s]" % \
+ (result_ids,[node['hostname'] for node in nodes],slice['name'],t_from,t_until)
+
+ return {'new_ids': result_ids,
+ 'errors': errors}
UpdateNodeTag(self.api).__call__(auth,node_tags[0]['node_tag_id'],value)
self.event_objects = {'Site': [site['site_id']],
- 'Node': [node['node_id']]}
- self.message = "Node %s created" % node['node_id']
+ 'Node': [node['node_id']]}
+ self.message = "Node %d=%s created" % (node['node_id'],node['hostname'])
return node['node_id']
--- /dev/null
+# $Id$
+# $URL$
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+from PLC.Leases import Lease, Leases
+from PLC.Slices import Slice, Slices
+
+class DeleteLeases(Method):
+ """
+ Deletes a lease.
+
+ Users may only delete leases attached to their slices.
+ PIs may delete any of the leases for slices at their sites, or any
+ slices of which they are members. Admins may delete any lease.
+
+ Returns 1 if successful, faults otherwise.
+ """
+
+ roles = ['admin', 'pi', 'tech', 'user']
+
+ accepts = [
+ Auth(),
+ Mixed(Lease.fields['lease_id'],[ Lease.fields['lease_id']]),
+ ]
+
+ returns = Parameter(int, '1 if successful')
+
+
+ def call(self, auth, lease_ids):
+ # Get associated lease details
+ leases = Leases(self.api, lease_ids)
+ if len(leases) != len(lease_ids):
+ raise PLCInvalidArgument, "Could not find all leases %r"%lease_ids
+
+ # fetch related slices
+ slices = Slices(self.api, [ lease['slice_id'] for lease in leases],['slice_id','person_ids'])
+ # create hash on slice_id
+ slice_map = dict ( [ (slice['slice_id'],slice) for slice in slices ] )
+
+ lease_ids=[lease['lease_id'] for lease in leases]
+ for lease in leases:
+ if 'admin' not in self.caller['roles']:
+ slice=slice_map[lease['slice_id']]
+ # check slices only once
+ if not slice.has_key('verified'):
+ if self.caller['person_id'] in slice['person_ids']:
+ pass
+ elif 'pi' not in self.caller['roles']:
+ raise PLCPermissionDenied, "Not a member of slice %r"%slice['name']
+ elif slice['site_id'] not in self.caller['site_ids']:
+ raise PLCPermissionDenied, "Slice %r not associated with any of your sites"%slice['name']
+ slice['verified']=True
+
+ lease.delete()
+
+ # Logging variables
+ self.event_objects = {'Lease': lease_ids }
+ self.message = 'Leases %r deleted' % lease_ids
+
+ return 1
--- /dev/null
+# $Id$
+# $URL$
+# Thierry Parmentelat -- INRIA
+
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Auth import Auth
+from PLC.Leases import Lease, Leases, LeaseFilter
+
+class GetLeases(Method):
+ """
+ Returns an array of structs containing details about leases. If
+ lease_filter is specified and is an array of lease identifiers or
+ lease names, or a struct of lease attributes, only leases matching
+ the filter will be returned. If return_fields is specified, only the
+ specified details will be returned.
+
+ All leases are exposed to all users.
+
+ In addition to the usual filter capabilities, the following are supported:
+ * GetLeases ({ 'alive' : '2010-02-20 20:00' , <regular_filter_fields...> })
+ returns the leases that are active at that point in time
+ * GetLeases ({ 'alive' : ('2010-02-20 20:00' , '2010-02-20 21:00' ) , ... })
+ ditto for a time range
+
+ This is implemented in the LeaseFilter class; negation actually is supported
+ through the usual '~alive' form, although maybe not really useful.
+
+ """
+
+ roles = ['admin', 'pi', 'user', 'node']
+
+ accepts = [
+ Auth(),
+ Mixed(Lease.fields['lease_id'],
+ [Lease.fields['lease_id']],
+ LeaseFilter(Lease.fields)),
+ Parameter([str], "List of fields to return", nullok = True)
+ ]
+
+ returns = [Lease.fields]
+
+ def call(self, auth, lease_filter = None, return_fields = None):
+
+ # Must query at least lease_id (see below)
+ if return_fields is not None and 'lease_id' not in return_fields:
+ return_fields.append('lease_id')
+ added_fields = True
+ else:
+ added_fields = False
+
+ leases = Leases(self.api, lease_filter, return_fields)
+
+ # Remove lease_id if not specified
+ if added_fields:
+ for lease in leases:
+ if 'lease_id' in lease:
+ del lease['lease_id']
+
+ return leases
--- /dev/null
+# $Id$
+# $URL$
+from PLC.Faults import *
+from PLC.Method import Method
+from PLC.Parameter import Parameter, Mixed
+from PLC.Auth import Auth
+
+from PLC.Timestamp import Timestamp, Duration
+
+from PLC.Leases import Lease, Leases
+from PLC.Slices import Slice, Slices
+
+can_update = lambda (field, value): field in ['t_from', 't_until', 'duration']
+
+class UpdateLeases(Method):
+ """
+ Updates the parameters of a (set of) existing lease(s) with the values in
+ lease_fields; specifically this applies to the timeslot definition.
+ As a convenience you may, in addition to the t_from and t_until fields,
+ you can also set the 'duration' field.
+
+ Users may only update leases attached to their slices.
+ PIs may update any of the leases for slices at their sites, or any
+ slices of which they are members. Admins may update any lease.
+
+ Returns a dict of successfully updated lease_ids and error messages.
+ """
+
+ roles = ['admin', 'pi', 'tech', 'user']
+
+ lease_fields = dict(filter(can_update, Lease.fields.items()))
+
+ accepts = [
+ Auth(),
+ Mixed (Lease.fields['lease_id'],
+ [Lease.fields['lease_id']]),
+ lease_fields
+ ]
+
+ returns = Parameter(dict, " 'updated_ids' is the list ids updated, 'errors' is a list of error strings")
+
+ debug=False
+# debug=True
+
+ def call(self, auth, lease_ids, input_fields):
+ input_fields = dict(filter(can_update, input_fields.items()))
+
+ if 'duration' in input_fields:
+ if 't_from' in input_fields and 't_until' in input_fields:
+ raise PLCInvalidArgument, "Cannot set t_from AND t_until AND duration"
+ # specify 'duration':0 to keep duration unchanged
+ if input_fields['duration'] : input_fields['duration']=Duration.validate(input_fields['duration'])
+
+ # Get lease information
+ leases = Leases(self.api, lease_ids)
+ if not leases:
+ raise PLCInvalidArgument, "No such leases %r"%lease_ids
+
+ # fetch related slices
+ slices = Slices(self.api, [ lease['slice_id'] for lease in leases],['slice_id','person_ids'])
+ # create hash on slice_id
+ slice_map = dict ( [ (slice['slice_id'],slice) for slice in slices ] )
+
+ updated_ids=[]
+ errors=[]
+
+ lease_ids=[lease['lease_id'] for lease in leases]
+ for lease in leases:
+
+ if 'admin' not in self.caller['roles']:
+ slice=slice_map[lease['slice_id']]
+ # check slices only once
+ if not slice.has_key('verified'):
+ if self.caller['person_id'] in slice['person_ids']:
+ pass
+ elif 'pi' not in self.caller['roles']:
+ raise PLCPermissionDenied, "Not a member of slice %r"%slice['name']
+ elif slice['site_id'] not in self.caller['site_ids']:
+ raise PLCPermissionDenied, "Slice %r not associated with any of your sites"%slice['name']
+ slice['verified']=True
+
+ try:
+ # we've ruled out already the case where all 3 (from, to, duration) where specified
+ if 'duration' not in input_fields:
+ lease_fields=input_fields
+ else:
+ # all arithmetics on longs..
+ duration=Duration.validate(input_fields['duration'])
+ # specify 'duration':0 to keep duration unchanged
+ if not duration:
+ duration = Timestamp.cast_long(lease['t_until'])-Timestamp.cast_long(lease['t_from'])
+ if 't_from' in input_fields:
+ lease_fields={'t_from':input_fields['t_from'],
+ 't_until':Timestamp.cast_long(input_fields['from'])+duration}
+ elif 't_until' in input_fields:
+ lease_fields={'t_from':Timestamp.cast_long(input_fields['t_until'])-duration,
+ 't_until':input_fields['t_until']}
+ else:
+ lease_fields={'t_until':Timestamp.cast_long(lease['t_from'])+duration}
+ if UpdateLeases.debug:
+ print 'lease_fields',lease_fields
+ for k in [ 't_from', 't_until'] :
+ if k in lease_fields: print k,'aka',Timestamp.sql_validate_utc(lease_fields[k])
+
+ lease.update(lease_fields)
+ lease.sync()
+ updated_ids.append(lease['lease_id'])
+ except Exception,e:
+ errors.append("Could not update lease %d - check new time limits ? -- %r"%(lease['lease_id'],e))
+
+ # Logging variables
+ self.event_objects = {'Lease': updated_ids}
+ self.message = 'lease %r updated: %s' % (lease_ids, ", ".join(input_fields.keys()))
+
+ return {'updated_ids' : updated_ids,
+ 'errors' : errors }
AddInterface
AddInterfaceTag
AddKeyType
+AddLeases
AddMessage
AddNetworkMethod
AddNetworkType
DeleteInterfaceTag
DeleteKey
DeleteKeyType
+DeleteLeases
DeleteMessage
DeleteNetworkMethod
DeleteNetworkType
GetInterfaces
GetKeyTypes
GetKeys
+GetLeases
GetMessages
GetNetworkMethods
GetNetworkTypes
UpdateInterface
UpdateInterfaceTag
UpdateKey
+UpdateLeases
UpdateMessage
UpdateNode
UpdateNodeGroup
primary_key = 'node_id'
join_tables = [ 'slice_node', 'peer_node', 'slice_tag',
'node_session', 'node_slice_whitelist',
- 'node_tag', 'conf_file_node', 'pcu_node', ]
+ 'node_tag', 'conf_file_node', 'pcu_node', 'leases', ]
fields = {
'node_id': Parameter(int, "Node identifier"),
'node_type': Parameter(str,"Node type",max=20),
def __new__(cls, *types):
return tuple.__new__(cls, types)
-
def python_type(arg):
"""
Returns the Python type of the specified argument, which may be a
#
from types import StringTypes
-from datetime import datetime
try:
from hashlib import md5
except ImportError:
from PLC.Debug import profile, log
from PLC.Faults import *
-if not psycopg2:
- is8bit = re.compile("[\x80-\xff]").search
-
- def unicast(typecast):
- """
- pgdb returns raw UTF-8 strings. This function casts strings that
- appear to contain non-ASCII characters to unicode objects.
- """
-
- def wrapper(*args, **kwds):
- value = typecast(*args, **kwds)
-
- # pgdb always encodes unicode objects as UTF-8 regardless of
- # the DB encoding (and gives you no option for overriding
- # the encoding), so always decode 8-bit objects as UTF-8.
- if isinstance(value, str) and is8bit(value):
- value = unicode(value, "utf-8")
-
- return value
-
- return wrapper
-
- pgdb.pgdbTypeCache.typecast = unicast(pgdb.pgdbTypeCache.typecast)
-
class PostgreSQL:
def __init__(self, api):
self.api = api
def cursor(self):
if self.connection is None:
# (Re)initialize database connection
- if psycopg2:
- try:
- # Try UNIX socket first
- self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER,
- password = self.api.config.PLC_DB_PASSWORD,
- database = self.api.config.PLC_DB_NAME)
- except psycopg2.OperationalError:
- # Fall back on TCP
- self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER,
- password = self.api.config.PLC_DB_PASSWORD,
- database = self.api.config.PLC_DB_NAME,
- host = self.api.config.PLC_DB_HOST,
- port = self.api.config.PLC_DB_PORT)
- self.connection.set_client_encoding("UNICODE")
- else:
- self.connection = pgdb.connect(user = self.api.config.PLC_DB_USER,
- password = self.api.config.PLC_DB_PASSWORD,
- host = "%s:%d" % (api.config.PLC_DB_HOST, api.config.PLC_DB_PORT),
- database = self.api.config.PLC_DB_NAME)
+ try:
+ # Try UNIX socket first
+ self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER,
+ password = self.api.config.PLC_DB_PASSWORD,
+ database = self.api.config.PLC_DB_NAME)
+ except psycopg2.OperationalError:
+ # Fall back on TCP
+ self.connection = psycopg2.connect(user = self.api.config.PLC_DB_USER,
+ password = self.api.config.PLC_DB_PASSWORD,
+ database = self.api.config.PLC_DB_NAME,
+ host = self.api.config.PLC_DB_HOST,
+ port = self.api.config.PLC_DB_PORT)
+ self.connection.set_client_encoding("UNICODE")
(self.rowcount, self.description, self.lastrowid) = \
(None, None, None)
from PLC.Nodes import Node
from PLC.Persons import Person, Persons
from PLC.SliceTags import SliceTag
+from PLC.Timestamp import Timestamp
class Slice(Row):
"""
table_name = 'slices'
primary_key = 'slice_id'
- join_tables = ['slice_node', 'slice_person', 'slice_tag', 'peer_slice', 'node_slice_whitelist']
+ join_tables = ['slice_node', 'slice_person', 'slice_tag', 'peer_slice', 'node_slice_whitelist', 'leases', ]
fields = {
'slice_id': Parameter(int, "Slice identifier"),
'site_id': Parameter(int, "Identifier of the site to which this slice belongs"),
# N.B.: Responsibility of the caller to ensure that expires is
# not too far into the future.
check_future = not ('is_deleted' in self and self['is_deleted'])
- return Row.validate_timestamp(self, expires, check_future = check_future)
+ return Timestamp.sql_validate( expires, check_future = check_future)
add_person = Row.add_object(Person, 'slice_person')
remove_person = Row.remove_object(Person, 'slice_person')
import time
import calendar
+from PLC.Timestamp import Timestamp
from PLC.Faults import *
from PLC.Parameter import Parameter
+
class Row(dict):
"""
Representation of a row in a database table. To use, optionally
else:
raise PLCInvalidArguemnt, "No such associate function associate_%s" % args[1]
- def validate_timestamp(self, timestamp, check_future = False):
- """
- Validates the specified GMT timestamp string (must be in
- %Y-%m-%d %H:%M:%S format) or number (seconds since UNIX epoch,
- i.e., 1970-01-01 00:00:00 GMT). If check_future is True,
- raises an exception if timestamp is not in the future. Returns
- a GMT timestamp string.
- """
-
- time_format = "%Y-%m-%d %H:%M:%S"
-
- if isinstance(timestamp, StringTypes):
- # calendar.timegm() is the inverse of time.gmtime()
- timestamp = calendar.timegm(time.strptime(timestamp, time_format))
-
- # Human readable timestamp string
- human = time.strftime(time_format, time.gmtime(timestamp))
-
- if check_future and timestamp < time.time():
- raise PLCInvalidArgument, "'%s' not in the future" % human
-
- return human
+ def validate_timestamp (self, timestamp):
+ return Timestamp.sql_validate(timestamp)
def add_object(self, classobj, join_table, columns = None):
"""
--- /dev/null
+#
+# Utilities to handle timestamps / durations from/to integers and strings
+#
+# $Id$
+# $URL$
+#
+
+#
+# datetime.{datetime,timedelta} are powerful tools, but these objects are not
+# natively marshalled over xmlrpc
+#
+
+from types import StringTypes
+import time, calendar
+import datetime
+
+from PLC.Faults import *
+from PLC.Parameter import Parameter, Mixed
+
+# a dummy class mostly used as a namespace
+class Timestamp:
+
+ debug=False
+# debug=True
+
+ # this is how we expose times to SQL
+ sql_format = "%Y-%m-%d %H:%M:%S"
+ sql_format_utc = "%Y-%m-%d %H:%M:%S UTC"
+ # this one (datetime.isoformat) would work too but that's less readable - we support this input though
+ iso_format = "%Y-%m-%dT%H:%M:%S"
+ # sometimes it's convenient to understand more formats
+ input_formats = [ sql_format,
+ sql_format_utc,
+ iso_format,
+ "%Y-%m-%d %H:%M",
+ "%Y-%m-%d %H:%M UTC",
+ ]
+
+ # for timestamps we usually accept either an int, or an ISO string,
+ # the datetime.datetime stuff can in general be used locally,
+ # but not sure it can be marshalled over xmlrpc though
+
+ @staticmethod
+ def Parameter (doc):
+ return Mixed (Parameter (int, doc + " (unix timestamp)"),
+ Parameter (str, doc + " (formatted as %s)"%Timestamp.sql_format),
+ )
+
+ @staticmethod
+ def sql_validate (input, timezone=False, check_future = False):
+ """
+ Validates the specified GMT timestamp, returns a
+ standardized string suitable for SQL input.
+
+ Input may be a number (seconds since UNIX epoch back in 1970,
+ or a string (in one of the supported input formats).
+
+ If timezone is True, the resulting string contains
+ timezone information, which is hard-wired as 'UTC'
+
+ If check_future is True, raises an exception if timestamp is in
+ the past.
+
+ Returns a GMT timestamp string suitable to feed SQL.
+ """
+
+ if not timezone: output_format = Timestamp.sql_format
+ else: output_format = Timestamp.sql_format_utc
+
+ if Timestamp.debug: print 'sql_validate, in:',input,
+ if isinstance(input, StringTypes):
+ sql=''
+ # calendar.timegm() is the inverse of time.gmtime()
+ for time_format in Timestamp.input_formats:
+ try:
+ timestamp = calendar.timegm(time.strptime(input, time_format))
+ sql = time.strftime(output_format, time.gmtime(timestamp))
+ break
+ # wrong format: ignore
+ except ValueError: pass
+ # could not parse it
+ if not sql:
+ raise PLCInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats)
+ elif isinstance (input,(int,long,float)):
+ try:
+ timestamp = long(input)
+ sql = time.strftime(output_format, time.gmtime(timestamp))
+ except Exception,e:
+ raise PLCInvalidArgument, "Timestamp %r not recognized -- %r"%(input,e)
+ else:
+ raise PLCInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input))
+
+ if check_future and input < time.time():
+ raise PLCInvalidArgument, "'%s' not in the future" % sql
+
+ if Timestamp.debug: print 'sql_validate, out:',sql
+ return sql
+
+ @staticmethod
+ def sql_validate_utc (timestamp):
+ "For convenience, return sql_validate(intput, timezone=True, check_future=False)"
+ return Timestamp.sql_validate (timestamp, timezone=True, check_future=False)
+
+
+ @staticmethod
+ def cast_long (input):
+ """
+ Translates input timestamp as a unix timestamp.
+
+ Input may be a number (seconds since UNIX epoch, i.e., 1970-01-01
+ 00:00:00 GMT), a string (in one of the supported input formats above).
+
+ """
+ if Timestamp.debug: print 'cast_long, in:',input,
+ if isinstance(input, StringTypes):
+ timestamp=0
+ for time_format in Timestamp.input_formats:
+ try:
+ result=calendar.timegm(time.strptime(input, time_format))
+ if Timestamp.debug: print 'out:',result
+ return result
+ # wrong format: ignore
+ except ValueError: pass
+ raise PLCInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats)
+ elif isinstance (input,(int,long,float)):
+ result=long(input)
+ if Timestamp.debug: print 'out:',result
+ return result
+ else:
+ raise PLCInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input))
+
+
+# utility for displaying durations
+# be consistent in avoiding the datetime stuff
+class Duration:
+
+ MINUTE = 60
+ HOUR = 3600
+ DAY = 3600*24
+
+ @staticmethod
+ def to_string(duration):
+ result=[]
+ left=duration
+ (days,left) = divmod(left,Duration.DAY)
+ if days: result.append("%d d)"%td.days)
+ (hours,left) = divmod (left,Duration.HOUR)
+ if hours: result.append("%d h"%hours)
+ (minutes, seconds) = divmod (left, Duration.MINUTE)
+ if minutes: result.append("%d m"%minutes)
+ if seconds: result.append("%d s"%seconds)
+ if not result: result = ['void']
+ return "-".join(result)
+
+ @staticmethod
+ def validate (duration):
+ # support seconds only for now, works for int/long/str
+ try:
+ return long (duration)
+ except:
+ raise PLCInvalidArgument, "Could not parse duration %r"%duration
## Please use make index to update this file
all = """
API
+Accessor
AddressTypes
Addresses
Auth
Interfaces
KeyTypes
Keys
+LeaseFilter
+Leases
Messages
Method
+Namespace
NetworkMethods
NetworkTypes
NodeGroups
Slices
Table
TagTypes
+Timestamp
sendmail
""".split()
--- /dev/null
+-- $Id$
+-- $URL$
+
+-- revert cleanup on node_types
+INSERT INTO node_types VALUES ('dummynet');
+
+UPDATE nodes SET node_type='regular' WHERE node_type='reservable';
+DELETE FROM node_types WHERE node_type='reservable';
+
+-- drop new tables
+DROP VIEW view_leases;
+DROP VIEW view_all_leases;
+DROP TABLE leases;
+
+DROP FUNCTION IF EXISTS overlapping_trigger();
+
+--------------------------------------------------
+UPDATE plc_db_version SET subversion = 100;
--- /dev/null
+-- $Id$
+-- $URL$
+
+-- we're using the 'lease' nodetype to model reservable nodes
+INSERT INTO node_types VALUES ('reservable');
+-- also the dummynet node_type is obsolete
+DELETE FROM node_types WHERE node_type='dummynet';
+
+SET TIMEZONE TO 'UTC';
+
+CREATE TABLE leases (
+ lease_id serial PRIMARY KEY, -- id
+ t_from timestamp with time zone NOT NULL, -- from
+ t_until timestamp with time zone NOT NULL, -- until
+ node_id integer REFERENCES nodes NOT NULL, -- subject node
+ slice_id integer REFERENCES slices, -- slice owning the node
+-- xxx for testing
+-- CONSTRAINT future CHECK (t_from > CURRENT_TIMESTAMP),
+ CONSTRAINT start_before_end CHECK (t_until > t_from)
+) WITH OIDS;
+
+--
+-- hook to check for overlapping time slots on a given node_id
+-- xxx might use the builtin OVERLAPS feature
+-- http://www.postgresql.org/docs/8.3/interactive/functions-datetime.html
+--
+CREATE language plpgsql;
+CREATE FUNCTION overlapping_trigger() RETURNS trigger AS $overlapping_trigger$
+BEGIN
+ PERFORM lease_id FROM leases WHERE
+ -- consider only leases on the same node
+ NEW.node_id = node_id
+ -- consider only non expired leases
+ AND t_until > CURRENT_TIMESTAMP
+ -- useful for updates
+ AND NEW.lease_id <> lease_id
+ -- new start date is in range
+ AND ( (NEW.t_from >= t_from AND NEW.t_from < t_until)
+ -- new end date is in range
+ OR (NEW.t_until > t_from AND NEW.t_until <= t_until)
+ -- complete overlap: new from before from, new until after until
+ OR (NEW.t_from <= t_from AND NEW.t_until >= t_until));
+ IF FOUND THEN
+ RAISE EXCEPTION 'overlapping error: node % - slice %, % -> %', NEW.node_id, NEW.slice_id, NEW.t_from, NEW.t_until;
+ END IF;
+ RETURN NEW;
+END;
+$overlapping_trigger$ LANGUAGE plpgsql;
+
+CREATE
+ TRIGGER overlapping_trigger BEFORE INSERT OR UPDATE
+ ON leases FOR EACH ROW EXECUTE PROCEDURE overlapping_trigger();
+
+
+-- this is to let the API a chance to check for leases attached
+-- to a node that is not 'reservable'
+CREATE OR REPLACE VIEW view_all_leases AS
+SELECT
+leases.lease_id,
+CAST(date_part('epoch', leases.t_from) AS bigint) AS t_from,
+CAST(date_part('epoch', leases.t_until) AS bigint) AS t_until,
+-- dbg
+leases.t_from as s_from,
+leases.t_until as s_until,
+leases.node_id,
+leases.slice_id,
+nodes.hostname,
+nodes.node_type,
+slices.name,
+slices.site_id,
+CAST( date_part ('epoch',leases.t_until-leases.t_from) AS bigint) AS duration,
+leases.t_until < CURRENT_TIMESTAMP as expired
+FROM slices INNER JOIN leases USING (slice_id)
+JOIN nodes USING (node_id);
+
+-- only the relevant leases
+CREATE OR REPLACE VIEW view_leases AS
+SELECT * FROM view_all_leases
+WHERE node_type = 'reservable';
+
+
+--------------------------------------------------
+UPDATE plc_db_version SET subversion = 101;
# PostgreSQL 7.x uses tcpip_socket.
if grep -q listen_addresses $postgresql_conf ; then
sed -i -e '/^listen_addresses/d' $postgresql_conf
- echo "listen_addresses = '*'" >>$postgresql_conf
- elif grep -q tcpip_socket $postgresql_conf ; then
- sed -i -e '/^tcpip_socket/d' $postgresql_conf
- echo "tcpip_socket = true" >>$postgresql_conf
+ echo "listen_addresses = '*'" >> $postgresql_conf
+ # tweak timezone to be 'UTC'
+ sed -i -e '/^timezone=/d' $postgresql_conf
+ echo "timezone='UTC'" >> $postgresql_conf
+ else
+ dialog "PostgreSQL <= 7.x - not supported"
+ /bin/false
+ check
fi
# Disable access to all DBs from all hosts