# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-
+from datetime import datetime
from types import StringTypes
import re
from PLC.Parameter import Parameter, Mixed
from PLC.Filter import Filter
from PLC.Debug import profile
-from PLC.Table import Row, Table
+from PLC.Storage.AlchemyObject import AlchemyObj
from PLC.NodeTypes import NodeTypes
from PLC.BootStates import BootStates
from PLC.Interfaces import Interface, Interfaces
+from PLC.ConfFileNodes import ConfFileNode
+from PLC.SliceNodes import SliceNode
+from PLC.SliceNodeWhitelists import SliceNodeWhitelist
+from PLC.PCUNodes import PCUNode
+from PLC.PCUNodePorts import PCUNodePort
+from PLC.NodeTags import NodeTag
+from PLC.NodeGroups import NodeGroup
def valid_hostname(hostname):
# 1. Each part begins and ends with a letter or number.
return hostname and \
re.match(good_hostname, hostname, re.IGNORECASE)
-class Node(Row):
+class Node(AlchemyObj):
"""
Representation of a row in the nodes table. To use, optionally
instantiate with a dict of values. Update as you would a
dict. Commit to the database with sync().
"""
- table_name = 'nodes'
- primary_key = 'node_id'
+ tablename = 'nodes'
join_tables = [ 'slice_node', 'peer_node', 'slice_tag',
'node_session', 'node_slice_whitelist',
'node_tag', 'conf_file_node', 'pcu_node', 'leases', ]
fields = {
- 'node_id': Parameter(int, "Node identifier"),
+ 'node_id': Parameter(int, "Node identifier", primary_key=True),
'node_type': Parameter(str,"Node type",max=20),
'hostname': Parameter(str, "Fully qualified hostname", max = 255),
'site_id': Parameter(int, "Site at which this node is located"),
- 'boot_state': Parameter(str, "Boot state", max = 20),
- 'run_level': Parameter(str, "Run level", max = 20),
+ 'boot_state': Parameter(str, "Boot state", max = 20, nullok=True),
+ 'run_level': Parameter(str, "Run level", max = 20, nullok=True),
'model': Parameter(str, "Make and model of the actual machine", max = 255, nullok = True),
- 'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128),
- 'version': Parameter(str, "Apparent Boot CD version", max = 64),
- 'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024),
- 'date_created': Parameter(int, "Date and time when node entry was created", ro = True),
- 'last_updated': Parameter(int, "Date and time when node entry was created", ro = True),
- 'last_contact': Parameter(int, "Date and time when node last contacted plc", ro = True),
- 'last_boot': Parameter(int, "Date and time when node last booted", ro = True),
- 'last_download': Parameter(int, "Date and time when node boot image was created", ro = True),
- 'last_pcu_reboot': Parameter(int, "Date and time when PCU reboot was attempted", ro = True),
- 'last_pcu_confirmation': Parameter(int, "Date and time when PCU reboot was confirmed", ro = True),
- 'verified': Parameter(bool, "Whether the node configuration is verified correct", ro=False),
- 'key': Parameter(str, "(Admin only) Node key", max = 256),
- 'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True),
- 'interface_ids': Parameter([int], "List of network interfaces that this node has"),
- 'conf_file_ids': Parameter([int], "List of configuration files specific to this node"),
+ 'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128, nullok=True),
+ 'version': Parameter(str, "Apparent Boot CD version", max = 64, nullok=True),
+ 'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024, nullok=True),
+ 'date_created': Parameter(datetime, "Date and time when node entry was created", ro = True),
+ 'last_updated': Parameter(datetime, "Date and time when node entry was created", ro = True),
+ 'last_contact': Parameter(datetime, "Date and time when node last contacted plc", ro = True, nullok=True),
+ 'last_boot': Parameter(datetime, "Date and time when node last booted", ro = True, nullok=True),
+ 'last_download': Parameter(datetime, "Date and time when node boot image was created", ro = True, nullok=True),
+ 'last_pcu_reboot': Parameter(datetime, "Date and time when PCU reboot was attempted", ro = True, nullok=True),
+ 'last_pcu_confirmation': Parameter(datetime, "Date and time when PCU reboot was confirmed", ro = True, nullok=True),
+ 'last_time_spent_online': Parameter(datetime, "Length of time the node was last online before shutdown/failure", ro = True, nullok=True),
+ 'last_time_spent_offline': Parameter(datetime, "Length of time the node was last offline after failure and before reboot", ro = True, nullok=True),
+ 'verified': Parameter(bool, "Whether the node configuration is verified correct", ro=False, nullok=True),
+ 'key': Parameter(str, "(Admin only) Node key", max = 256, nullok=True),
+ 'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True, nullok=True),
+ 'interface_ids': Parameter([int], "List of network interfaces that this node has", joined=True),
+ 'conf_file_ids': Parameter([int], "List of configuration files specific to this node", joined=True),
# 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"),
- 'slice_ids': Parameter([int], "List of slices on this node"),
- 'slice_ids_whitelist': Parameter([int], "List of slices allowed on this node"),
- 'pcu_ids': Parameter([int], "List of PCUs that control this node"),
- 'ports': Parameter([int], "List of PCU ports that this node is connected to"),
+ 'slice_ids': Parameter([int], "List of slices on this node", joined=True),
+ 'slice_ids_whitelist': Parameter([int], "List of slices allowed on this node", joined=True),
+ 'pcu_ids': Parameter([int], "List of PCUs that control this node", joined=True),
+ 'ports': Parameter([int], "List of PCU ports that this node is connected to", joined=True),
'peer_id': Parameter(int, "Peer to which this node belongs", nullok = True),
'peer_node_id': Parameter(int, "Foreign node identifier at peer", nullok = True),
- 'node_tag_ids' : Parameter ([int], "List of tags attached to this node"),
- 'nodegroup_ids': Parameter([int], "List of node groups that this node is in"),
- }
- related_fields = {
- 'interfaces': [Mixed(Parameter(int, "Interface identifier"),
- Filter(Interface.fields))],
- 'conf_files': [Parameter(int, "ConfFile identifier")],
- 'slices': [Mixed(Parameter(int, "Slice identifier"),
- Parameter(str, "Slice name"))],
- 'slices_whitelist': [Mixed(Parameter(int, "Slice identifier"),
- Parameter(str, "Slice name"))]
+ 'node_tag_ids' : Parameter ([int], "List of tags attached to this node", joined=True),
+ 'nodegroup_ids': Parameter([int], "List of node groups that this node is in", joined=True),
}
-
- view_tags_name = "view_node_tags"
- # tags are used by the Add/Get/Update methods to expose tags
- # this is initialized here and updated by the accessors factory
tags = { }
def validate_hostname(self, hostname):
if not valid_hostname(hostname):
raise PLCInvalidArgument, "Invalid hostname"
- conflicts = Nodes(self.api, [hostname])
+ conflicts = Node().select(filter={'hostname': hostname})
for node in conflicts:
if 'node_id' not in self or self['node_id'] != node['node_id']:
raise PLCInvalidArgument, "Hostname already in use"
return hostname
def validate_node_type(self, node_type):
- node_types = [row['node_type'] for row in NodeTypes(self.api)]
- if node_type not in node_types:
- raise PLCInvalidArgument, "Invalid node type %r"%node_type
+ # Make sure node type does not alredy exist
+ conflicts = NodeTypes(self.api, [name])
+ if not conflicts:
+ raise PLCInvalidArgument, "Invalid node_type"
return node_type
def validate_boot_state(self, boot_state):
raise PLCInvalidArgument, "Invalid boot state %r"%boot_state
return boot_state
- validate_date_created = Row.validate_timestamp
- validate_last_updated = Row.validate_timestamp
- validate_last_contact = Row.validate_timestamp
- validate_last_boot = Row.validate_timestamp
- validate_last_download = Row.validate_timestamp
- validate_last_pcu_reboot = Row.validate_timestamp
- validate_last_pcu_confirmation = Row.validate_timestamp
+ validate_date_created = AlchemyObj.validate_timestamp
+ validate_last_updated = AlchemyObj.validate_timestamp
+ validate_last_contact = AlchemyObj.validate_timestamp
+ validate_last_boot = AlchemyObj.validate_timestamp
+ validate_last_download = AlchemyObj.validate_timestamp
+ validate_last_pcu_reboot = AlchemyObj.validate_timestamp
+ validate_last_pcu_confirmation = AlchemyObj.validate_timestamp
- def update_timestamp(self, col_name, commit = True):
- """
- Update col_name field with current time
- """
+ def update_readonly_int(self, col_name, commit = True):
assert 'node_id' in self
assert self.table_name
- self.api.db.do("UPDATE %s SET %s = CURRENT_TIMESTAMP " % (self.table_name, col_name) + \
- " where node_id = %d" % (self['node_id']) )
+ self.api.db.do("UPDATE %s SET %s = %s" % (self.table_name, col_name, self[col_name]) + \
+ " where node_id = %d" % (self['node_id']) )
self.sync(commit)
+ def update_timestamp(self, col_name, commit = True):
+ """
+ Update col_name field with current time
+ """
+ assert 'node_id' in self
+ self[col_name] = datetime.now()
+ fields = {
+ 'node_id': self['node_id'],
+ col_name: datetime.now()
+ }
+ Node(self.api, fields).sync()
+
def update_last_boot(self, commit = True):
self.update_timestamp('last_boot', commit)
def update_last_download(self, commit = True):
DeleteSliceFromNodesWhitelist.__call__(DeleteSliceFromNodesWhitelist(self.api), auth, stale_slice, [self['node_id']])
+
+ def sync(self, commit=True, validate=True):
+ AlchemyObj.sync(self, commit=commit, validate=validate)
+ ts = datetime.now()
+ self['last_updated'] = ts
+ if 'node_id' not in self:
+ self['date_created'] = ts
+ AlchemyObj.insert(self, dict(self))
+ else:
+ AlchemyObj.update(self, {'node_id': self['node_id']}, dict(self))
+
def delete(self, commit = True):
"""
Delete existing node.
"""
assert 'node_id' in self
+ assert 'interface_ids' in self
+ Interface().delete(filter={'interface_id': self['interface_ids']})
+ AlchemyObj.delete(self, dict(self))
- # we need to clean up InterfaceTags, so handling interfaces as part of join_tables does not work
- # federated nodes don't have interfaces though so for smooth transition from 4.2 to 4.3
- if 'peer_id' in self and self['peer_id']:
- pass
- else:
- assert 'interface_ids' in self
- for interface in Interfaces(self.api,self['interface_ids']):
- interface.delete()
-
- # Clean up miscellaneous join tables
- for table in self.join_tables:
- self.api.db.do("DELETE FROM %s WHERE node_id = %d" % \
- (table, self['node_id']))
-
- # Mark as deleted
- self['deleted'] = True
- self.sync(commit)
-
-
-class Nodes(Table):
+class Nodes(list):
"""
Representation of row(s) from the nodes table in the
database.
"""
def __init__(self, api, node_filter = None, columns = None):
- Table.__init__(self, api, Node, columns)
-
- # the view that we're selecting upon: start with view_nodes
- view = "view_nodes"
+ self.api = api
+ self.refresh(api)
# as many left joins as requested tags
- for tagname in self.tag_columns:
- view= "%s left join %s using (%s)"%(view,Node.tagvalue_view_name(tagname),
- Node.primary_key)
-
- sql = "SELECT %s FROM %s WHERE deleted IS False" % \
- (", ".join(self.columns.keys()+self.tag_columns.keys()),view)
-
- if node_filter is not None:
- if isinstance(node_filter, (list, tuple, set)):
- # Separate the list into integers and strings
- ints = filter(lambda x: isinstance(x, (int, long)), node_filter)
- strs = filter(lambda x: isinstance(x, StringTypes), node_filter)
- node_filter = Filter(Node.fields, {'node_id': ints, 'hostname': strs})
- sql += " AND (%s) %s" % node_filter.sql(api, "OR")
- elif isinstance(node_filter, dict):
- allowed_fields=dict(Node.fields.items()+Node.tags.items())
- node_filter = Filter(allowed_fields, node_filter)
- sql += " AND (%s) %s" % node_filter.sql(api, "AND")
- elif isinstance (node_filter, StringTypes):
- node_filter = Filter(Node.fields, {'hostname':node_filter})
- sql += " AND (%s) %s" % node_filter.sql(api, "AND")
- elif isinstance (node_filter, (int, long)):
- node_filter = Filter(Node.fields, {'node_id':node_filter})
- sql += " AND (%s) %s" % node_filter.sql(api, "AND")
- else:
- raise PLCInvalidArgument, "Wrong node filter %r"%node_filter
-
- self.selectall(sql)
+ if not node_filter:
+ nodes = Node().select()
+ elif isinstance(node_filter, (list, tuple, set)):
+ # Separate the list into integers and strings
+ ints = filter(lambda x: isinstance(x, (int, long)), node_filter)
+ strs = filter(lambda x: isinstance(x, StringTypes), node_filter)
+ nodes = Node().select(filter={'node_id': ints, 'hostname': strs})
+ elif isinstance(node_filter, dict):
+ nodes = Node().select(filter={'node_id': ints, 'hostname': strs})
+ elif isinstance (node_filter, StringTypes):
+ nodes = Node().select(filter={'hostname': strs})
+ elif isinstance (node_filter, (int, long)):
+ nodes = Node().select(filter={'node_id': ints})
+ else:
+ raise PLCInvalidArgument, "Wrong node filter %r"%node_filter
+
+ for node in nodes:
+ node = Node(api, object=node)
+ if not columns or 'interface_ids' in columns:
+ interfaces = Interface().select(filter={'node_id': node['node_id']})
+ node['interface_ids'] = [rec.interface_id for rec in interfaces]
+ if not columns or 'conf_file_ids' in columns:
+ conf_files = ConfFileNode().select(filter={'node_id': node['node_id']})
+ node['conf_file_ids'] = [rec.conf_file_id for rec in conf_files]
+ if not columns or 'slice_ids' in columns:
+ slice_nodes = SliceNode().select(filter={'node_id': node['node_id']})
+ node['slice_ids'] = [rec.slice_id for rec in slices_nodes]
+ if not columns or 'slice_ids_whitelist' in columns:
+ slice_whitelist = SliceNodeWhitelist().select(filter={'node_id': node['node_id']})
+ node['slice_ids_whitelist'] = [rec.slice_id for rec in slice_whitelist]
+ if not columns or 'pcu_ids' in columns:
+ pcus = PCUNode().select(filter={'node_id': node['node_id']})
+ node['pcu_ids'] = [rec.pcu_id for rec in pcus]
+ if not columns or 'pcu_ports' in columns:
+ pcu_ports = PCUNodePort().select(filter={'node_id': node['node_id']})
+ node['pcu_ports'] = [rec.port for rec in pcu_ports]
+ if not columns or 'node_tag_ids' in columns:
+ node_tags = NodeTag().select(filter={'node_id': node['node_id']})
+ node['node_tag_ids'] = [rec.node_tag_id for rec in node_tags]
+ if not columns or 'nodegroup_ids' in columns:
+ nodegroups = NodeGroup().select(filter={'node_id': node['node_id']})
+ node['nodegroup_ids'] = [rec.nodegroup_id for rec in nodegroups]
+ self.append(node)
+
+ def refresh(self, api):
+ from PLC.Sites import Sites
+ default_site = Sites(api, site_filter={'login_base': 'default'})[0]
+ # get current list of compute nodes
+ hypervisors = api.client_shell.nova.hypervisors.list()
+ compute_hosts = [h.hypervisor_hostname for h in hypervisors]
+
+ nodes = Node().select()
+ hostnames = [node.hostname for node in nodes]
+
+ added_nodes = set(compute_hosts).difference(hostnames)
+ for added_node in added_nodes:
+ node = Node(api, {'hostname': added_node,
+ 'node_type': 'regular',
+ 'site_id': default_site['site_id']})
+ node.sync()
+
+
+