# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-# $Id: Nodes.py,v 1.1 2006/09/06 15:36:07 mlhuang Exp $
+# $Id: Nodes.py,v 1.15 2006/10/27 15:32:43 mlhuang Exp $
#
from types import StringTypes
from PLC.NodeNetworks import NodeNetwork, NodeNetworks
from PLC.BootStates import BootStates
+def valid_hostname(hostname):
+ # 1. Each part begins and ends with a letter or number.
+ # 2. Each part except the last can contain letters, numbers, or hyphens.
+ # 3. Each part is between 1 and 64 characters, including the trailing dot.
+ # 4. At least two parts.
+ # 5. Last part can only contain between 2 and 6 letters.
+ good_hostname = r'^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\.)+' \
+ r'[a-z]{2,6}$'
+ return hostname and \
+ re.match(good_hostname, hostname, re.IGNORECASE)
+
class Node(Row):
"""
Representation of a row in the nodes table. To use, optionally
instantiate with a dict of values. Update as you would a
- dict. Commit to the database with flush().
+ dict. Commit to the database with sync().
"""
+ table_name = 'nodes'
+ primary_key = 'node_id'
fields = {
'node_id': Parameter(int, "Node identifier"),
'hostname': Parameter(str, "Fully qualified hostname", max = 255),
+ 'site_id': Parameter(int, "Site at which this node is located"),
'boot_state': Parameter(str, "Boot state", max = 20),
- 'model': Parameter(str, "Make and model of the actual machine", max = 255),
+ 'model': Parameter(str, "Make and model of the actual machine", max = 255, nullok = True),
'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128),
'version': Parameter(str, "Apparent Boot CD version", max = 64),
'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024),
- 'date_created': Parameter(str, "Date and time when node entry was created"),
- 'deleted': Parameter(bool, "Has been deleted"),
+ 'date_created': Parameter(int, "Date and time when node entry was created", ro = True),
+ 'last_updated': Parameter(int, "Date and time when node entry was created", ro = True),
'key': Parameter(str, "(Admin only) Node key", max = 256),
- 'session': Parameter(str, "(Admin only) Node session value", max = 256),
- }
-
- # These fields are derived from join tables and are not actually
- # in the nodes table.
- join_fields = {
- 'nodenetwork_ids': Parameter([int], "List of network interfaces that this node has"),
- }
-
- # These fields are derived from join tables and are not returned
- # by default unless specified.
- extra_fields = {
- 'nodegroup_ids': Parameter([int], "List of node groups that this node is in"),
- 'conf_file_ids': Parameter([int], "List of configuration files specific to this node"),
- 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"),
- # XXX Too inefficient
- # 'slice_ids': Parameter([int], "List of slices on this node"),
- 'pcu_ids': Parameter([int], "List of PCUs that control this node"),
- 'site_id': Parameter([int], "Site at which this node is located"),
+ 'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True),
+ 'nodenetwork_ids': Parameter([int], "List of network interfaces that this node has", ro = True),
+ 'nodegroup_ids': Parameter([int], "List of node groups that this node is in", ro = True),
+ 'conf_file_ids': Parameter([int], "List of configuration files specific to this node", ro = True),
+ # 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node", ro = True),
+ 'slice_ids': Parameter([int], "List of slices on this node", ro = True),
+ 'pcu_ids': Parameter([int], "List of PCUs that control this node", ro = True),
+ 'ports': Parameter([int], "List of PCU ports that this node is connected to", ro = True),
}
- # Primary interface values
- primary_nodenetwork_fields = dict(filter(lambda (key, value): \
- key not in ['node_id', 'is_primary', 'hostname'],
- NodeNetwork.fields.items()))
-
- extra_fields.update(primary_nodenetwork_fields)
-
- default_fields = dict(fields.items() + join_fields.items())
- all_fields = dict(default_fields.items() + extra_fields.items())
-
- def __init__(self, api, fields):
- Row.__init__(self, fields)
- self.api = api
-
def validate_hostname(self, hostname):
- # 1. Each part begins and ends with a letter or number.
- # 2. Each part except the last can contain letters, numbers, or hyphens.
- # 3. Each part is between 1 and 64 characters, including the trailing dot.
- # 4. At least two parts.
- # 5. Last part can only contain between 2 and 6 letters.
- good_hostname = r'^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\.)+' \
- r'[a-z]{2,6}$'
- if not hostname or \
- not re.match(good_hostname, hostname, re.IGNORECASE):
+ if not valid_hostname(hostname):
raise PLCInvalidArgument, "Invalid hostname"
conflicts = Nodes(self.api, [hostname])
for node_id, node in conflicts.iteritems():
- if not node['deleted'] and ('node_id' not in self or self['node_id'] != node_id):
- raise PLCInvalidArgument, "Hostname already in use"
-
- # Check for conflicts with a nodenetwork hostname
- conflicts = NodeNetworks(self.api, [hostname])
- for nodenetwork_id in conflicts:
- if 'nodenetwork_ids' not in self or nodenetwork_id not in self['nodenetwork_ids']:
+ if 'node_id' not in self or self['node_id'] != node_id:
raise PLCInvalidArgument, "Hostname already in use"
return hostname
return boot_state
- def flush(self, commit = True):
- """
- Flush changes back to the database.
- """
-
- self.validate()
-
- # Fetch a new node_id if necessary
- if 'node_id' not in self:
- rows = self.api.db.selectall("SELECT NEXTVAL('nodes_node_id_seq') AS node_id")
- if not rows:
- raise PLCDBError, "Unable to fetch new node_id"
- self['node_id'] = rows[0]['node_id']
- insert = True
- else:
- insert = False
-
- # Filter out fields that cannot be set or updated directly
- fields = dict(filter(lambda (key, value): key in self.fields,
- self.items()))
-
- # Parameterize for safety
- keys = fields.keys()
- values = [self.api.db.param(key, value) for (key, value) in fields.items()]
-
- if insert:
- # Insert new row in nodes table
- sql = "INSERT INTO nodes (%s) VALUES (%s)" % \
- (", ".join(keys), ", ".join(values))
- else:
- # Update existing row in nodes table
- columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
- sql = "UPDATE nodes SET " + \
- ", ".join(columns) + \
- " WHERE node_id = %(node_id)d"
-
- self.api.db.do(sql, fields)
-
- if commit:
- self.api.db.commit()
-
def delete(self, commit = True):
"""
Delete existing node.
nodenetwork.delete(commit = False)
# Clean up miscellaneous join tables
- for table in ['nodegroup_nodes', 'pod_hash', 'conf_assoc',
- 'node_root_access', 'dslice03_slicenode',
- 'pcu_ports']:
+ for table in ['nodegroup_node', 'slice_node', 'slice_attribute', 'node_session']:
self.api.db.do("DELETE FROM %s" \
" WHERE node_id = %d" % \
(table, self['node_id']))
# Mark as deleted
self['deleted'] = True
- self.flush(commit)
+ self.sync(commit)
class Nodes(Table):
"""
database.
"""
- def __init__(self, api, node_id_or_hostname_list = None, extra_fields = []):
+ def __init__(self, api, node_id_or_hostname_list = None):
self.api = api
- sql = "SELECT nodes.*, node_nodenetworks.nodenetwork_id"
-
- # For compatibility and convenience, support returning primary
- # interface values directly in the Node structure.
- extra_nodenetwork_fields = set(extra_fields).intersection(Node.primary_nodenetwork_fields)
-
- # N.B.: Joined IDs may be marked as deleted in their primary tables
- join_tables = {
- # extra_field: (extra_table, extra_column, join_using)
- 'nodegroup_ids': ('nodegroup_nodes', 'nodegroup_id', 'node_id'),
- 'conf_file_ids': ('conf_assoc', 'conf_file_id', 'node_id'),
- 'root_person_ids': ('node_root_access', 'person_id AS root_person_id', 'node_id'),
- 'slice_ids': ('dslice03_slicenode', 'slice_id', 'node_id'),
- 'pcu_ids': ('pcu_ports', 'pcu_id', 'node_id'),
- }
-
- extra_fields = filter(join_tables.has_key, extra_fields)
- extra_tables = ["%s USING (%s)" % \
- (join_tables[field][0], join_tables[field][2]) \
- for field in extra_fields]
- extra_columns = ["%s.%s" % \
- (join_tables[field][0], join_tables[field][1]) \
- for field in extra_fields]
-
- if extra_columns:
- sql += ", " + ", ".join(extra_columns)
-
- sql += " FROM nodes" \
- " LEFT JOIN node_nodenetworks USING (node_id)"
-
- if extra_tables:
- sql += " LEFT JOIN " + " LEFT JOIN ".join(extra_tables)
-
- sql += " WHERE deleted IS False"
+ sql = "SELECT %s FROM view_nodes WHERE deleted IS False" % \
+ ", ".join(Node.fields)
if node_id_or_hostname_list:
# Separate the list into integers and strings
sql += " OR hostname IN (%s)" % ", ".join(api.db.quote(hostnames)).lower()
sql += ")"
- # So that if the node has a primary interface, it is listed
- # first.
- if 'nodenetwork_ids' in extra_fields:
- sql += " ORDER BY node_nodenetworks.is_primary DESC"
-
rows = self.api.db.selectall(sql)
- for row in rows:
- if self.has_key(row['node_id']):
- node = self[row['node_id']]
- node.update(row)
- else:
- self[row['node_id']] = Node(api, row)
-
- # XXX Should instead have a site_node join table that is
- # magically taken care of above.
- if rows:
- sql = "SELECT node_id, sites.site_id FROM nodegroup_nodes" \
- " INNER JOIN sites USING (nodegroup_id)" \
- " WHERE node_id IN (%s)" % ", ".join(map(str, self.keys()))
-
- rows = self.api.db.selectall(sql, self)
- for row in rows:
- assert self.has_key(row['node_id'])
- node = self[row['node_id']]
- node.update(row)
- # Fill in optional primary interface fields for each node
- if extra_nodenetwork_fields:
- # More efficient to get all the nodenetworks at once
- nodenetwork_ids = []
- for node in self.values():
- nodenetwork_ids += node['nodenetwork_ids']
-
- # Remove duplicates
- nodenetwork_ids = set(nodenetwork_ids)
-
- # Get all nodenetwork information
- nodenetworks = NodeNetworks(self.api, nodenetwork_ids)
-
- for node in self.values():
- for nodenetwork_id in node['nodenetwork_ids']:
- nodenetwork = nodenetworks[nodenetwork_id]
- if nodenetwork['is_primary']:
- for field in extra_nodenetwork_fields:
- node[field] = nodenetwork[field]
- break
+ for row in rows:
+ self[row['node_id']] = node = Node(api, row)
+ for aggregate in ['nodenetwork_ids', 'nodegroup_ids',
+ 'conf_file_ids', 'root_person_ids', 'slice_ids',
+ 'pcu_ids']:
+ if not node.has_key(aggregate) or node[aggregate] is None:
+ node[aggregate] = []
+ else:
+ node[aggregate] = map(int, node[aggregate].split(','))