# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-# $Id$
-#
-
+from datetime import datetime
from types import StringTypes
import re
from PLC.Faults import *
-from PLC.Parameter import Parameter
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
from PLC.Debug import profile
-from PLC.Table import Row, Table
-from PLC.NodeNetworks import NodeNetwork, NodeNetworks
+from PLC.Storage.AlchemyObject import AlchemyObj
+from PLC.NodeTypes import NodeTypes
from PLC.BootStates import BootStates
-
-class Node(Row):
+from PLC.Interfaces import Interface, Interfaces
+from PLC.ConfFileNodes import ConfFileNode
+from PLC.SliceNodes import SliceNode
+from PLC.SliceNodeWhitelists import SliceNodeWhitelist
+from PLC.PCUNodes import PCUNode
+from PLC.PCUNodePorts import PCUNodePort
+from PLC.NodeTags import NodeTag
+from PLC.NodeGroups import NodeGroup
+
+def valid_hostname(hostname):
+ # 1. Each part begins and ends with a letter or number.
+ # 2. Each part except the last can contain letters, numbers, or hyphens.
+ # 3. Each part is between 1 and 64 characters, including the trailing dot.
+ # 4. At least two parts.
+ # 5. Last part can only contain between 2 and 6 letters.
+ good_hostname = r'^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\.)+' \
+ r'[a-z]{2,6}$'
+ return hostname and \
+ re.match(good_hostname, hostname, re.IGNORECASE)
+
+class Node(AlchemyObj):
"""
Representation of a row in the nodes table. To use, optionally
instantiate with a dict of values. Update as you would a
- dict. Commit to the database with flush().
+ dict. Commit to the database with sync().
"""
+ tablename = 'nodes'
+ join_tables = [ 'slice_node', 'peer_node', 'slice_tag',
+ 'node_session', 'node_slice_whitelist',
+ 'node_tag', 'conf_file_node', 'pcu_node', 'leases', ]
fields = {
- 'node_id': Parameter(int, "Node identifier"),
- 'hostname': Parameter(str, "Fully qualified hostname"),
- 'boot_state': Parameter(str, "Boot state"),
- 'model': Parameter(str, "Make and model of the actual machine"),
- 'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot"),
- 'version': Parameter(str, "Apparent Boot CD version"),
- 'ssh_rsa_key': Parameter(str, "Last known SSH host key"),
- 'date_created': Parameter(str, "Date and time when node entry was created"),
- 'deleted': Parameter(bool, "Has been deleted"),
- 'key': Parameter(str, "(Admin only) Node key"),
- 'session': Parameter(str, "(Admin only) Node session value"),
- }
-
- # These fields are derived from join tables and are not actually
- # in the nodes table.
- join_fields = {
- 'nodenetwork_ids': Parameter([int], "List of network interfaces that this node has"),
- }
-
- # These fields are derived from join tables and are not returned
- # by default unless specified.
- extra_fields = {
- 'nodegroup_ids': Parameter([int], "List of node groups that this node is in"),
- 'conf_file_ids': Parameter([int], "List of configuration files specific to this node"),
- 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"),
- # XXX Too inefficient
- # 'slice_ids': Parameter([int], "List of slices on this node"),
- 'pcu_ids': Parameter([int], "List of PCUs that control this node"),
- 'site_id': Parameter([int], "Site at which this node is located"),
+ 'node_id': Parameter(int, "Node identifier", primary_key=True),
+ 'node_type': Parameter(str,"Node type",max=20),
+ 'hostname': Parameter(str, "Fully qualified hostname", max = 255),
+ 'site_id': Parameter(int, "Site at which this node is located"),
+ 'boot_state': Parameter(str, "Boot state", max = 20, nullok=True),
+ 'run_level': Parameter(str, "Run level", max = 20, nullok=True),
+ 'model': Parameter(str, "Make and model of the actual machine", max = 255, nullok = True),
+ 'boot_nonce': Parameter(str, "(Admin only) Random value generated by the node at last boot", max = 128, nullok=True),
+ 'version': Parameter(str, "Apparent Boot CD version", max = 64, nullok=True),
+ 'ssh_rsa_key': Parameter(str, "Last known SSH host key", max = 1024, nullok=True),
+ 'date_created': Parameter(datetime, "Date and time when node entry was created", ro = True),
+ 'last_updated': Parameter(datetime, "Date and time when node entry was created", ro = True),
+ 'last_contact': Parameter(datetime, "Date and time when node last contacted plc", ro = True, nullok=True),
+ 'last_boot': Parameter(datetime, "Date and time when node last booted", ro = True, nullok=True),
+ 'last_download': Parameter(datetime, "Date and time when node boot image was created", ro = True, nullok=True),
+ 'last_pcu_reboot': Parameter(datetime, "Date and time when PCU reboot was attempted", ro = True, nullok=True),
+ 'last_pcu_confirmation': Parameter(datetime, "Date and time when PCU reboot was confirmed", ro = True, nullok=True),
+ 'last_time_spent_online': Parameter(datetime, "Length of time the node was last online before shutdown/failure", ro = True, nullok=True),
+ 'last_time_spent_offline': Parameter(datetime, "Length of time the node was last offline after failure and before reboot", ro = True, nullok=True),
+ 'verified': Parameter(bool, "Whether the node configuration is verified correct", ro=False, nullok=True),
+ 'key': Parameter(str, "(Admin only) Node key", max = 256, nullok=True),
+ 'session': Parameter(str, "(Admin only) Node session value", max = 256, ro = True, nullok=True),
+ 'interface_ids': Parameter([int], "List of network interfaces that this node has", joined=True),
+ 'conf_file_ids': Parameter([int], "List of configuration files specific to this node", joined=True),
+ # 'root_person_ids': Parameter([int], "(Admin only) List of people who have root access to this node"),
+ 'slice_ids': Parameter([int], "List of slices on this node", joined=True),
+ 'slice_ids_whitelist': Parameter([int], "List of slices allowed on this node", joined=True),
+ 'pcu_ids': Parameter([int], "List of PCUs that control this node", joined=True),
+ 'ports': Parameter([int], "List of PCU ports that this node is connected to", joined=True),
+ 'peer_id': Parameter(int, "Peer to which this node belongs", nullok = True),
+ 'peer_node_id': Parameter(int, "Foreign node identifier at peer", nullok = True),
+ 'node_tag_ids' : Parameter ([int], "List of tags attached to this node", joined=True),
+ 'nodegroup_ids': Parameter([int], "List of node groups that this node is in", joined=True),
}
-
- # Primary interface values
- primary_nodenetwork_fields = dict(filter(lambda (key, value): \
- key not in ['node_id', 'is_primary', 'hostname'],
- NodeNetwork.fields.items()))
-
- extra_fields.update(primary_nodenetwork_fields)
-
- default_fields = dict(fields.items() + join_fields.items())
- all_fields = dict(default_fields.items() + extra_fields.items())
-
- def __init__(self, api, fields):
- Row.__init__(self, fields)
- self.api = api
+ tags = { }
def validate_hostname(self, hostname):
- # 1. Each part begins and ends with a letter or number.
- # 2. Each part except the last can contain letters, numbers, or hyphens.
- # 3. Each part is between 1 and 64 characters, including the trailing dot.
- # 4. At least two parts.
- # 5. Last part can only contain between 2 and 6 letters.
- good_hostname = r'^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\.)+' \
- r'[a-z]{2,6}$'
- if not hostname or \
- not re.match(good_hostname, hostname, re.IGNORECASE):
+ hostname = hostname.lower()
+ if not valid_hostname(hostname):
raise PLCInvalidArgument, "Invalid hostname"
- conflicts = Nodes(self.api, [hostname])
- for node_id, node in conflicts.iteritems():
- if not node['deleted'] and ('node_id' not in self or self['node_id'] != node_id):
- raise PLCInvalidArgument, "Hostname already in use"
-
- # Check for conflicts with a nodenetwork hostname
- conflicts = NodeNetworks(self.api, [hostname])
- for nodenetwork_id in conflicts:
- if 'nodenetwork_ids' not in self or nodenetwork_id not in self['nodenetwork_ids']:
+ conflicts = Node().select(filter={'hostname': hostname})
+ for node in conflicts:
+ if 'node_id' not in self or self['node_id'] != node['node_id']:
raise PLCInvalidArgument, "Hostname already in use"
return hostname
- def validate_boot_state(self, boot_state):
- if boot_state not in BootStates(self.api):
- raise PLCInvalidArgument, "Invalid boot state"
+ def validate_node_type(self, node_type):
+ # Make sure node type does not alredy exist
+ conflicts = NodeTypes(self.api, [name])
+ if not conflicts:
+ raise PLCInvalidArgument, "Invalid node_type"
+ return node_type
+ def validate_boot_state(self, boot_state):
+ boot_states = [row['boot_state'] for row in BootStates(self.api)]
+ if boot_state not in boot_states:
+ raise PLCInvalidArgument, "Invalid boot state %r"%boot_state
return boot_state
- def flush(self, commit = True):
+ validate_date_created = AlchemyObj.validate_timestamp
+ validate_last_updated = AlchemyObj.validate_timestamp
+ validate_last_contact = AlchemyObj.validate_timestamp
+ validate_last_boot = AlchemyObj.validate_timestamp
+ validate_last_download = AlchemyObj.validate_timestamp
+ validate_last_pcu_reboot = AlchemyObj.validate_timestamp
+ validate_last_pcu_confirmation = AlchemyObj.validate_timestamp
+
+ def update_readonly_int(self, col_name, commit = True):
+
+ assert 'node_id' in self
+ assert self.table_name
+
+ self.api.db.do("UPDATE %s SET %s = %s" % (self.table_name, col_name, self[col_name]) + \
+ " where node_id = %d" % (self['node_id']) )
+ self.sync(commit)
+
+ def update_timestamp(self, col_name, commit = True):
"""
- Flush changes back to the database.
+ Update col_name field with current time
"""
+ assert 'node_id' in self
+ self[col_name] = datetime.now()
+ fields = {
+ 'node_id': self['node_id'],
+ col_name: datetime.now()
+ }
+ Node(self.api, fields).sync()
+
+ def update_last_boot(self, commit = True):
+ self.update_timestamp('last_boot', commit)
+ def update_last_download(self, commit = True):
+ self.update_timestamp('last_download', commit)
+ def update_last_pcu_reboot(self, commit = True):
+ self.update_timestamp('last_pcu_reboot', commit)
+ def update_last_pcu_confirmation(self, commit = True):
+ self.update_timestamp('last_pcu_confirmation', commit)
+
+ def update_last_contact(self, commit = True):
+ self.update_timestamp('last_contact', commit)
+ def update_last_updated(self, commit = True):
+ self.update_timestamp('last_updated', commit)
+
+ def update_tags(self, tags):
+ from PLC.Shell import Shell
+ from PLC.NodeTags import NodeTags
+ from PLC.Methods.AddNodeTag import AddNodeTag
+ from PLC.Methods.UpdateNodeTag import UpdateNodeTag
+ shell = Shell()
+ for (tagname,value) in tags.iteritems():
+ # the tagtype instance is assumed to exist, just check that
+ if not TagTypes(self.api,{'tagname':tagname}):
+ raise PLCInvalidArgument,"No such TagType %s"%tagname
+ node_tags=NodeTags(self.api,{'tagname':tagname,'node_id':node['node_id']})
+ if not node_tags:
+ AddNodeTag(self.api).__call__(shell.auth,node['node_id'],tagname,value)
+ else:
+ UpdateNodeTag(self.api).__call__(shell.auth,node_tags[0]['node_tag_id'],value)
- self.validate()
+ def associate_interfaces(self, auth, field, value):
+ """
+ Delete interfaces not found in value list (using DeleteInterface)
+ Add interfaces found in value list (using AddInterface)
+ Updates interfaces found w/ interface_id in value list (using UpdateInterface)
+ """
- # Fetch a new node_id if necessary
- if 'node_id' not in self:
- rows = self.api.db.selectall("SELECT NEXTVAL('nodes_node_id_seq') AS node_id")
- if not rows:
- raise PLCDBError, "Unable to fetch new node_id"
- self['node_id'] = rows[0]['node_id']
- insert = True
- else:
- insert = False
+ assert 'interface_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
- # Filter out fields that cannot be set or updated directly
- fields = dict(filter(lambda (key, value): key in self.fields,
- self.items()))
+ (interface_ids, blank, interfaces) = self.separate_types(value)
- # Parameterize for safety
- keys = fields.keys()
- values = [self.api.db.param(key, value) for (key, value) in fields.items()]
+ if self['interface_ids'] != interface_ids:
+ from PLC.Methods.DeleteInterface import DeleteInterface
- if insert:
- # Insert new row in nodes table
- sql = "INSERT INTO nodes (%s) VALUES (%s)" % \
- (", ".join(keys), ", ".join(values))
- else:
- # Update existing row in nodes table
- columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
- sql = "UPDATE nodes SET " + \
- ", ".join(columns) + \
- " WHERE node_id = %(node_id)d"
+ stale_interfaces = set(self['interface_ids']).difference(interface_ids)
- self.api.db.do(sql, fields)
+ for stale_interface in stale_interfaces:
+ DeleteInterface.__call__(DeleteInterface(self.api), auth, stale_interface['interface_id'])
- if commit:
- self.api.db.commit()
+ def associate_conf_files(self, auth, field, value):
+ """
+ Add conf_files found in value list (AddConfFileToNode)
+ Delets conf_files not found in value list (DeleteConfFileFromNode)
+ """
- def delete(self, commit = True):
+ assert 'conf_file_ids' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
+
+ conf_file_ids = self.separate_types(value)[0]
+
+ if self['conf_file_ids'] != conf_file_ids:
+ from PLC.Methods.AddConfFileToNode import AddConfFileToNode
+ from PLC.Methods.DeleteConfFileFromNode import DeleteConfFileFromNode
+ new_conf_files = set(conf_file_ids).difference(self['conf_file_ids'])
+ stale_conf_files = set(self['conf_file_ids']).difference(conf_file_ids)
+
+ for new_conf_file in new_conf_files:
+ AddConfFileToNode.__call__(AddConfFileToNode(self.api), auth, new_conf_file, self['node_id'])
+ for stale_conf_file in stale_conf_files:
+ DeleteConfFileFromNode.__call__(DeleteConfFileFromNode(self.api), auth, stale_conf_file, self['node_id'])
+
+ def associate_slices(self, auth, field, value):
"""
- Delete existing node.
+ Add slices found in value list to (AddSliceToNode)
+ Delete slices not found in value list (DeleteSliceFromNode)
"""
+ from PLC.Slices import Slices
+
+ assert 'slice_ids' in self
assert 'node_id' in self
+ assert isinstance(value, list)
- # Delete all nodenetworks
- nodenetworks = NodeNetworks(self.api, self['nodenetwork_ids'])
- for nodenetwork in nodenetworks.values():
- nodenetwork.delete(commit = False)
+ (slice_ids, slice_names) = self.separate_types(value)[0:2]
- # Clean up miscellaneous join tables
- for table in ['nodegroup_nodes', 'pod_hash', 'conf_assoc',
- 'node_root_access', 'dslice03_slicenode',
- 'pcu_ports']:
- self.api.db.do("DELETE FROM %s" \
- " WHERE node_id = %d" % \
- (table, self['node_id']))
+ if slice_names:
+ slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id')
+ slice_ids += slices.keys()
- # Mark as deleted
- self['deleted'] = True
- self.flush(commit)
+ if self['slice_ids'] != slice_ids:
+ from PLC.Methods.AddSliceToNodes import AddSliceToNodes
+ from PLC.Methods.DeleteSliceFromNodes import DeleteSliceFromNodes
+ new_slices = set(slice_ids).difference(self['slice_ids'])
+ stale_slices = set(self['slice_ids']).difference(slice_ids)
-class Nodes(Table):
- """
- Representation of row(s) from the nodes table in the
- database.
- """
+ for new_slice in new_slices:
+ AddSliceToNodes.__call__(AddSliceToNodes(self.api), auth, new_slice, [self['node_id']])
+ for stale_slice in stale_slices:
+ DeleteSliceFromNodes.__call__(DeleteSliceFromNodes(self.api), auth, stale_slice, [self['node_id']])
- def __init__(self, api, node_id_or_hostname_list = None, extra_fields = []):
- self.api = api
+ def associate_slices_whitelist(self, auth, field, value):
+ """
+ Add slices found in value list to whitelist (AddSliceToNodesWhitelist)
+ Delete slices not found in value list from whitelist (DeleteSliceFromNodesWhitelist)
+ """
- sql = "SELECT nodes.*, node_nodenetworks.nodenetwork_id"
+ from PLC.Slices import Slices
- # For compatibility and convenience, support returning primary
- # interface values directly in the Node structure.
- extra_nodenetwork_fields = set(extra_fields).intersection(Node.primary_nodenetwork_fields)
+ assert 'slice_ids_whitelist' in self
+ assert 'node_id' in self
+ assert isinstance(value, list)
- # N.B.: Joined IDs may be marked as deleted in their primary tables
- join_tables = {
- # extra_field: (extra_table, extra_column, join_using)
- 'nodegroup_ids': ('nodegroup_nodes', 'nodegroup_id', 'node_id'),
- 'conf_file_ids': ('conf_assoc', 'conf_file_id', 'node_id'),
- 'root_person_ids': ('node_root_access', 'person_id AS root_person_id', 'node_id'),
- 'slice_ids': ('dslice03_slicenode', 'slice_id', 'node_id'),
- 'pcu_ids': ('pcu_ports', 'pcu_id', 'node_id'),
- }
+ (slice_ids, slice_names) = self.separate_types(value)[0:2]
- extra_fields = filter(join_tables.has_key, extra_fields)
- extra_tables = ["%s USING (%s)" % \
- (join_tables[field][0], join_tables[field][2]) \
- for field in extra_fields]
- extra_columns = ["%s.%s" % \
- (join_tables[field][0], join_tables[field][1]) \
- for field in extra_fields]
+ if slice_names:
+ slices = Slices(self.api, slice_names, ['slice_id']).dict('slice_id')
+ slice_ids += slices.keys()
- if extra_columns:
- sql += ", " + ", ".join(extra_columns)
+ if self['slice_ids_whitelist'] != slice_ids:
+ from PLC.Methods.AddSliceToNodesWhitelist import AddSliceToNodesWhitelist
+ from PLC.Methods.DeleteSliceFromNodesWhitelist import DeleteSliceFromNodesWhitelist
+ new_slices = set(slice_ids).difference(self['slice_ids_whitelist'])
+ stale_slices = set(self['slice_ids_whitelist']).difference(slice_ids)
- sql += " FROM nodes" \
- " LEFT JOIN node_nodenetworks USING (node_id)"
+ for new_slice in new_slices:
+ AddSliceToNodesWhitelist.__call__(AddSliceToNodesWhitelist(self.api), auth, new_slice, [self['node_id']])
+ for stale_slice in stale_slices:
+ DeleteSliceFromNodesWhitelist.__call__(DeleteSliceFromNodesWhitelist(self.api), auth, stale_slice, [self['node_id']])
- if extra_tables:
- sql += " LEFT JOIN " + " LEFT JOIN ".join(extra_tables)
- sql += " WHERE deleted IS False"
- if node_id_or_hostname_list:
+ def sync(self, commit=True, validate=True):
+ AlchemyObj.sync(self, commit=commit, validate=validate)
+ ts = datetime.now()
+ self['last_updated'] = ts
+ if 'node_id' not in self:
+ self['date_created'] = ts
+ AlchemyObj.insert(self, dict(self))
+ else:
+ AlchemyObj.update(self, {'node_id': self['node_id']}, dict(self))
+
+ def delete(self, commit = True):
+ """
+ Delete existing node.
+ """
+
+ assert 'node_id' in self
+ assert 'interface_ids' in self
+ Interface().delete(filter={'interface_id': self['interface_ids']})
+ AlchemyObj.delete(self, dict(self))
+
+class Nodes(list):
+ """
+ Representation of row(s) from the nodes table in the
+ database.
+ """
+
+ def __init__(self, api, node_filter = None, columns = None):
+ self.api = api
+ self.refresh(api)
+ # as many left joins as requested tags
+ if not node_filter:
+ nodes = Node().select()
+ elif isinstance(node_filter, (list, tuple, set)):
# Separate the list into integers and strings
- node_ids = filter(lambda node_id: isinstance(node_id, (int, long)),
- node_id_or_hostname_list)
- hostnames = filter(lambda hostname: isinstance(hostname, StringTypes),
- node_id_or_hostname_list)
- sql += " AND (False"
- if node_ids:
- sql += " OR node_id IN (%s)" % ", ".join(map(str, node_ids))
- if hostnames:
- sql += " OR hostname IN (%s)" % ", ".join(api.db.quote(hostnames)).lower()
- sql += ")"
-
- # So that if the node has a primary interface, it is listed
- # first.
- if 'nodenetwork_ids' in extra_fields:
- sql += " ORDER BY node_nodenetworks.is_primary DESC"
-
- rows = self.api.db.selectall(sql)
- for row in rows:
- if self.has_key(row['node_id']):
- node = self[row['node_id']]
- node.update(row)
- else:
- self[row['node_id']] = Node(api, row)
-
- # XXX Should instead have a site_node join table that is
- # magically taken care of above.
- if rows:
- sql = "SELECT node_id, sites.site_id FROM nodegroup_nodes" \
- " INNER JOIN sites USING (nodegroup_id)" \
- " WHERE node_id IN (%s)" % ", ".join(map(str, self.keys()))
-
- rows = self.api.db.selectall(sql, self)
- for row in rows:
- assert self.has_key(row['node_id'])
- node = self[row['node_id']]
- node.update(row)
-
- # Fill in optional primary interface fields for each node
- if extra_nodenetwork_fields:
- # More efficient to get all the nodenetworks at once
- nodenetwork_ids = []
- for node in self.values():
- nodenetwork_ids += node['nodenetwork_ids']
-
- # Remove duplicates
- nodenetwork_ids = set(nodenetwork_ids)
-
- # Get all nodenetwork information
- nodenetworks = NodeNetworks(self.api, nodenetwork_ids)
-
- for node in self.values():
- for nodenetwork_id in node['nodenetwork_ids']:
- nodenetwork = nodenetworks[nodenetwork_id]
- if nodenetwork['is_primary']:
- for field in extra_nodenetwork_fields:
- node[field] = nodenetwork[field]
- break
+ ints = filter(lambda x: isinstance(x, (int, long)), node_filter)
+ strs = filter(lambda x: isinstance(x, StringTypes), node_filter)
+ nodes = Node().select(filter={'node_id': ints, 'hostname': strs})
+ elif isinstance(node_filter, dict):
+ nodes = Node().select(filter={'node_id': ints, 'hostname': strs})
+ elif isinstance (node_filter, StringTypes):
+ nodes = Node().select(filter={'hostname': strs})
+ elif isinstance (node_filter, (int, long)):
+ nodes = Node().select(filter={'node_id': ints})
+ else:
+ raise PLCInvalidArgument, "Wrong node filter %r"%node_filter
+
+ for node in nodes:
+ node = Node(api, object=node)
+ if not columns or 'interface_ids' in columns:
+ interfaces = Interface().select(filter={'node_id': node['node_id']})
+ node['interface_ids'] = [rec.interface_id for rec in interfaces]
+ if not columns or 'conf_file_ids' in columns:
+ conf_files = ConfFileNode().select(filter={'node_id': node['node_id']})
+ node['conf_file_ids'] = [rec.conf_file_id for rec in conf_files]
+ if not columns or 'slice_ids' in columns:
+ slice_nodes = SliceNode().select(filter={'node_id': node['node_id']})
+ node['slice_ids'] = [rec.slice_id for rec in slices_nodes]
+ if not columns or 'slice_ids_whitelist' in columns:
+ slice_whitelist = SliceNodeWhitelist().select(filter={'node_id': node['node_id']})
+ node['slice_ids_whitelist'] = [rec.slice_id for rec in slice_whitelist]
+ if not columns or 'pcu_ids' in columns:
+ pcus = PCUNode().select(filter={'node_id': node['node_id']})
+ node['pcu_ids'] = [rec.pcu_id for rec in pcus]
+ if not columns or 'pcu_ports' in columns:
+ pcu_ports = PCUNodePort().select(filter={'node_id': node['node_id']})
+ node['pcu_ports'] = [rec.port for rec in pcu_ports]
+ if not columns or 'node_tag_ids' in columns:
+ node_tags = NodeTag().select(filter={'node_id': node['node_id']})
+ node['node_tag_ids'] = [rec.node_tag_id for rec in node_tags]
+ if not columns or 'nodegroup_ids' in columns:
+ nodegroups = NodeGroup().select(filter={'node_id': node['node_id']})
+ node['nodegroup_ids'] = [rec.nodegroup_id for rec in nodegroups]
+ self.append(node)
+
+ def refresh(self, api):
+ from PLC.Sites import Sites
+ default_site = Sites(api, site_filter={'login_base': 'default'})[0]
+ # get current list of compute nodes
+ hypervisors = api.client_shell.nova.hypervisors.list()
+ compute_hosts = [h.hypervisor_hostname for h in hypervisors]
+
+ nodes = Node().select()
+ hostnames = [node.hostname for node in nodes]
+
+ added_nodes = set(compute_hosts).difference(hostnames)
+ for added_node in added_nodes:
+ node = Node(api, {'hostname': added_node,
+ 'node_type': 'regular',
+ 'site_id': default_site['site_id']})
+ node.sync()
+
+
+