# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-# $Id: Nodes.py,v 1.14 2006/10/25 14:29:13 mlhuang Exp $
+# $Id: Nodes.py 5654 2007-11-06 03:43:55Z tmack $
#
from types import StringTypes
import re
from PLC.Faults import *
-from PLC.Parameter import Parameter
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
from PLC.Debug import profile
from PLC.Table import Row, Table
from PLC.NodeNetworks import NodeNetwork, NodeNetworks
table_name = 'nodes'
primary_key = 'node_id'
+ # Thierry -- we use delete on nodenetworks so the related NodeNetworkSettings get deleted too
+ join_tables = ['nodegroup_node', 'conf_file_node', 'pcu_node', 'slice_node', 'slice_attribute', 'node_session', 'peer_node','node_slice_whitelist']
fields = {
'node_id': Parameter(int, "Node identifier"),
'hostname': Parameter(str, "Fully qualified hostname", max = 255),
'site_id': Parameter(int, "Site at which this node is located"),
'boot_state': Parameter(str, "Boot state", max = 20),
- 'model': Parameter(str, "Make and model of the actual machine", max = 255),
+ 'model': Parameter(str, "Make and model of the actual machine", max = 255, nullok = True),
'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128),
'version': Parameter(str, "Apparent Boot CD version", max = 64),
'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024),
'date_created': Parameter(int, "Date and time when node entry was created", ro = True),
'last_updated': Parameter(int, "Date and time when node entry was created", ro = True),
+ 'last_contact': Parameter(int, "Date and time when node last contacted plc", ro = True),
'key': Parameter(str, "(Admin only) Node key", max = 256),
'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True),
- 'nodenetwork_ids': Parameter([int], "List of network interfaces that this node has", ro = True),
- 'nodegroup_ids': Parameter([int], "List of node groups that this node is in", ro = True),
- 'conf_file_ids': Parameter([int], "List of configuration files specific to this node", ro = True),
- # 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node", ro = True),
- 'slice_ids': Parameter([int], "List of slices on this node", ro = True),
- 'pcu_ids': Parameter([int], "List of PCUs that control this node", ro = True),
- 'ports': Parameter([int], "List of PCU ports that this node is connected to", ro = True),
+ 'nodenetwork_ids': Parameter([int], "List of network interfaces that this node has"),
+ 'nodegroup_ids': Parameter([int], "List of node groups that this node is in"),
+ 'conf_file_ids': Parameter([int], "List of configuration files specific to this node"),
+ # 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"),
+ 'slice_ids': Parameter([int], "List of slices on this node"),
+ 'slice_ids_whitelist': Parameter([int], "List of slices allowed on this node"),
+ 'pcu_ids': Parameter([int], "List of PCUs that control this node"),
+ 'ports': Parameter([int], "List of PCU ports that this node is connected to"),
+ 'peer_id': Parameter(int, "Peer to which this node belongs", nullok = True),
+ 'peer_node_id': Parameter(int, "Foreign node identifier at peer", nullok = True),
}
+ related_fields = {
+ 'nodenetworks': [Mixed(Parameter(int, "NodeNetwork identifier"),
+ Filter(NodeNetwork.fields))],
+ 'nodegroups': [Mixed(Parameter(int, "NodeGroup identifier"),
+ Parameter(str, "NodeGroup name"))],
+ 'conf_files': [Parameter(int, "ConfFile identifier")],
+ 'slices': [Mixed(Parameter(int, "Slice identifier"),
+ Parameter(str, "Slice name"))],
+ 'slices_whitelist': [Mixed(Parameter(int, "Slice identifier"),
+ Parameter(str, "Slice name"))]
+ }
+ # for Cache
+ class_key = 'hostname'
+ foreign_fields = ['boot_state','model','version']
+ # forget about these ones, they are read-only anyway
+ # handling them causes Cache to re-sync all over again
+ # 'date_created','last_updated'
+ foreign_xrefs = [
+ # in this case, we dont need the 'table' but Cache will look it up, so...
+ {'field' : 'site_id' , 'class' : 'Site' , 'table' : 'unused-on-direct-refs' } ,
+ ]
def validate_hostname(self, hostname):
if not valid_hostname(hostname):
raise PLCInvalidArgument, "Invalid hostname"
conflicts = Nodes(self.api, [hostname])
- for node_id, node in conflicts.iteritems():
- if 'node_id' not in self or self['node_id'] != node_id:
+ for node in conflicts:
+ if 'node_id' not in self or self['node_id'] != node['node_id']:
raise PLCInvalidArgument, "Hostname already in use"
return hostname
def validate_boot_state(self, boot_state):
- if boot_state not in BootStates(self.api):
+ boot_states = [row['boot_state'] for row in BootStates(self.api)]
+ if boot_state not in boot_states:
raise PLCInvalidArgument, "Invalid boot state"
return boot_state
+ validate_date_created = Row.validate_timestamp
+ validate_last_updated = Row.validate_timestamp
+ validate_last_contact = Row.validate_timestamp
+
+ def update_last_contact(self, commit = True):
+ """
+ Update last_contact field with current time
+ """
+
+ assert 'node_id' in self
+ assert self.table_name
+
+ self.api.db.do("UPDATE %s SET last_contact = CURRENT_TIMESTAMP " % (self.table_name) + \
+ " where node_id = %d" % ( self['node_id']) )
+ self.sync(commit)
+
+
+ def update_last_updated(self, commit = True):
+ """
+ Update last_updated field with current time
+ """
+
+ assert 'node_id' in self
+ assert self.table_name
+
+ self.api.db.do("UPDATE %s SET last_updated = CURRENT_TIMESTAMP " % (self.table_name) + \
+ " where node_id = %d" % (self['node_id']) )
+ self.sync(commit)
+
+ def associate_nodenetworks(self, auth, field, value):
+ """
+ Delete nodenetworks not found in value list (using DeleteNodeNetwor)k
+ Add nodenetworks found in value list (using AddNodeNetwork)
+ Updates nodenetworks found w/ nodenetwork_id in value list (using UpdateNodeNetwork)
+ """
+
+ assert 'nodenetworkp_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ (nodenetwork_ids, blank, nodenetworks) = self.separate_types(value)
+
+ if self['nodenetwork_ids'] != nodenetwork_ids:
+ from PLC.Methods.DeleteNodeNetwork import DeleteNodeNetwork
+
+ stale_nodenetworks = set(self['nodenetwork_ids']).difference(nodenetwork_ids)
+
+ for stale_nodenetwork in stale_nodenetworks:
+ DeleteNodeNetwork.__call__(DeleteNodeNetwork(self.api), auth, stale_nodenetwork['nodenetwork_id'])
+
+ def associate_nodegroups(self, auth, field, value):
+ """
+ Add node to nodegroups found in value list (AddNodeToNodegroup)
+ Delete node from nodegroup not found in value list (DeleteNodeFromNodegroup)
+ """
+
+ from PLC.NodeGroups import NodeGroups
+
+ assert 'nodegroup_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ (nodegroup_ids, nodegroup_names) = self.separate_types(value)[0:2]
+
+ if nodegroup_names:
+ nodegroups = NodeGroups(self.api, nodegroup_names, ['nodegroup_id']).dict('nodegroup_id')
+ nodegroup_ids += nodegroups.keys()
+
+ if self['nodegroup_ids'] != nodegroup_ids:
+ from PLC.Methods.AddNodeToNodeGroup import AddNodeToNodeGroup
+ from PLC.Methods.DeleteNodeFromNodeGroup import DeleteNodeFromNodeGroup
+
+ new_nodegroups = set(nodegroup_ids).difference(self['nodegroup_ids'])
+ stale_nodegroups = set(self['nodegroup_ids']).difference(nodegroup_ids)
+
+ for new_nodegroup in new_nodegroups:
+ AddNodeToNodeGroup.__call__(AddNodeToNodeGroup(self.api), auth, self['node_id'], new_nodegroup)
+ for stale_nodegroup in stale_nodegroups:
+ DeleteNodeFromNodeGroup.__call__(DeleteNodeFromNodeGroup(self.api), auth, self['node_id'], stale_nodegroup)
+
+
+
+ def associate_conf_files(self, auth, field, value):
+ """
+ Add conf_files found in value list (AddConfFileToNode)
+ Delets conf_files not found in value list (DeleteConfFileFromNode)
+ """
+
+ assert 'conf_file_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ conf_file_ids = self.separate_types(value)[0]
+
+ if self['conf_file_ids'] != conf_file_ids:
+ from PLC.Methods.AddConfFileToNode import AddConfFileToNode
+ from PLC.Methods.DeleteConfFileFromNode import DeleteConfFileFromNode
+ new_conf_files = set(conf_file_ids).difference(self['conf_file_ids'])
+ stale_conf_files = set(self['conf_file_ids']).difference(conf_file_ids)
+
+ for new_conf_file in new_conf_files:
+ AddConfFileToNode.__call__(AddConfFileToNode(self.api), auth, new_conf_file, self['node_id'])
+ for stale_conf_file in stale_conf_files:
+ DeleteConfFileFromNode.__call__(DeleteConfFileFromNode(self.api), auth, stale_conf_file, self['node_id'])
+
+
+ def associate_slices(self, auth, field, value):
+ """
+ Add slices found in value list to (AddSliceToNode)
+ Delete slices not found in value list (DeleteSliceFromNode)
+ """
+
+ from PLC.Slices import Slices
+
+ assert 'slice_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ (slice_ids, slice_names) = self.separate_types(value)[0:2]
+
+ if slice_names:
+ slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id')
+ slice_ids += slices.keys()
+
+ if self['slice_ids'] != slice_ids:
+ from PLC.Methods.AddSliceToNodes import AddSliceToNodes
+ from PLC.Methods.DeleteSliceFromNodes import DeleteSliceFromNodes
+ new_slices = set(slice_ids).difference(self['slice_ids'])
+ stale_slices = set(self['slice_ids']).difference(slice_ids)
+
+ for new_slice in new_slices:
+ AddSliceToNodes.__call__(AddSliceToNodes(self.api), auth, new_slice, [self['node_id']])
+ for stale_slice in stale_slices:
+ DeleteSliceFromNodes.__call__(DeleteSliceFromNodes(self.api), auth, stale_slice, [self['node_id']])
+
+ def associate_slices_whitelist(self, auth, field, value):
+ """
+ Add slices found in value list to whitelist (AddSliceToNodesWhitelist)
+ Delete slices not found in value list from whitelist (DeleteSliceFromNodesWhitelist)
+ """
+
+ from PLC.Slices import Slices
+
+ assert 'slice_ids_whitelist' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ (slice_ids, slice_names) = self.separate_types(value)[0:2]
+
+ if slice_names:
+ slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id')
+ slice_ids += slices.keys()
+
+ if self['slice_ids_whitelist'] != slice_ids:
+ from PLC.Methods.AddSliceToNodesWhitelist import AddSliceToNodesWhitelist
+ from PLC.Methods.DeleteSliceFromNodesWhitelist import DeleteSliceFromNodesWhitelist
+ new_slices = set(slice_ids).difference(self['slice_ids_whitelist'])
+ stale_slices = set(self['slice_ids_whitelist']).difference(slice_ids)
+
+ for new_slice in new_slices:
+ AddSliceToNodesWhitelist.__call__(AddSliceToNodesWhitelist(self.api), auth, new_slice, [self['node_id']])
+ for stale_slice in stale_slices:
+ DeleteSliceFromNodesWhitelist.__call__(DeleteSliceFromNodesWhitelist(self.api), auth, stale_slice, [self['node_id']])
+
+
def delete(self, commit = True):
"""
Delete existing node.
"""
assert 'node_id' in self
+ assert 'nodenetwork_ids' in self
- # Delete all nodenetworks
- nodenetworks = NodeNetworks(self.api, self['nodenetwork_ids'])
- for nodenetwork in nodenetworks.values():
- nodenetwork.delete(commit = False)
+ # we need to clean up NodeNetworkSettings, so handling nodenetworks as part of join_tables does not work
+ for nodenetwork in NodeNetworks(self.api,self['nodenetwork_ids']):
+ nodenetwork.delete()
# Clean up miscellaneous join tables
- for table in ['nodegroup_node', 'slice_node', 'slice_attribute', 'node_session']:
- self.api.db.do("DELETE FROM %s" \
- " WHERE node_id = %d" % \
+ for table in self.join_tables:
+ self.api.db.do("DELETE FROM %s WHERE node_id = %d" % \
(table, self['node_id']))
# Mark as deleted
self['deleted'] = True
self.sync(commit)
+
class Nodes(Table):
"""
Representation of row(s) from the nodes table in the
database.
"""
- def __init__(self, api, node_id_or_hostname_list = None):
- self.api = api
+ def __init__(self, api, node_filter = None, columns = None):
+ Table.__init__(self, api, Node, columns)
sql = "SELECT %s FROM view_nodes WHERE deleted IS False" % \
- ", ".join(Node.fields)
-
- if node_id_or_hostname_list:
- # Separate the list into integers and strings
- node_ids = filter(lambda node_id: isinstance(node_id, (int, long)),
- node_id_or_hostname_list)
- hostnames = filter(lambda hostname: isinstance(hostname, StringTypes),
- node_id_or_hostname_list)
- sql += " AND (False"
- if node_ids:
- sql += " OR node_id IN (%s)" % ", ".join(map(str, node_ids))
- if hostnames:
- sql += " OR hostname IN (%s)" % ", ".join(api.db.quote(hostnames)).lower()
- sql += ")"
-
- rows = self.api.db.selectall(sql)
-
- for row in rows:
- self[row['node_id']] = node = Node(api, row)
- for aggregate in ['nodenetwork_ids', 'nodegroup_ids',
- 'conf_file_ids', 'root_person_ids', 'slice_ids',
- 'pcu_ids']:
- if not node.has_key(aggregate) or node[aggregate] is None:
- node[aggregate] = []
- else:
- node[aggregate] = map(int, node[aggregate].split(','))
+ ", ".join(self.columns)
+
+ if node_filter is not None:
+ if isinstance(node_filter, (list, tuple, set)):
+ # Separate the list into integers and strings
+ ints = filter(lambda x: isinstance(x, (int, long)), node_filter)
+ strs = filter(lambda x: isinstance(x, StringTypes), node_filter)
+ node_filter = Filter(Node.fields, {'node_id': ints, 'hostname': strs})
+ sql += " AND (%s) %s" % node_filter.sql(api, "OR")
+ elif isinstance(node_filter, dict):
+ node_filter = Filter(Node.fields, node_filter)
+ sql += " AND (%s) %s" % node_filter.sql(api, "AND")
+ elif isinstance (node_filter, StringTypes):
+ node_filter = Filter(Node.fields, {'hostname':[node_filter]})
+ sql += " AND (%s) %s" % node_filter.sql(api, "AND")
+ elif isinstance (node_filter, int):
+ node_filter = Filter(Node.fields, {'node_id':[node_filter]})
+ sql += " AND (%s) %s" % node_filter.sql(api, "AND")
+ else:
+ raise PLCInvalidArgument, "Wrong node filter %r"%node_filter
+
+ self.selectall(sql)