+ if len(nodes_id) == 0:
+ # first attribute being matched
+ for node_tag in node_tags:
+
+ # check that matches the min or max restriction
+ if 'min' in attr_name and node_tag['value'] != 'n/a' and \
+ float(node_tag['value']) > attr_value:
+ nodes_id.append(node_tag['node_id'])
+
+ elif 'max' in attr_name and node_tag['value'] != 'n/a' and \
+ float(node_tag['value']) < attr_value:
+ nodes_id.append(node_tag['node_id'])
+ else:
+
+ # remove the nodes ids that don't match the new attribute
+ # that is being match
+ nodes_id_tmp = []
+ for node_tag in node_tags:
+
+ # check that matches the min or max restriction and was a
+ # matching previous filters
+ if 'min' in attr_name and node_tag['value'] != 'n/a' and \
+ float(node_tag['value']) > attr_value and \
+ node_tag['node_id'] in nodes_id:
+ nodes_id_tmp.append(node_tag['node_id'])
+
+ elif 'max' in attr_name and node_tag['value'] != 'n/a' and \
+ float(node_tag['value']) < attr_value and \
+ node_tag['node_id'] in nodes_id:
+ nodes_id_tmp.append(node_tag['node_id'])
+
+ if len(nodes_id_tmp):
+ nodes_id = set(nodes_id) & set(nodes_id_tmp)
+ else:
+ # no node from before match the new constraint
+ self.fail_discovery()
+
+ else: #TODO CHECK
+ # no nodes match the filter applied
+ self.fail_discovery()
+
+ return nodes_id
+
+ def _choose_random_node(self, nodes):
+ """
+ From the possible nodes for provision, choose randomly to decrese the
+ probability of different RMs choosing the same node for provision
+ """
+ size = len(nodes)
+ while size:
+ size = size - 1
+ index = randint(0, size)
+ node_id = nodes[index]
+ nodes[index] = nodes[size]
+
+ # check the node is not blacklisted or being provision by other RM
+ # and perform ping to check that is really alive
+ with PlanetlabNode.lock:
+
+ blist = self.plapi.blacklisted()
+ plist = self.plapi.reserved()
+ if node_id not in blist and node_id not in plist:
+ ping_ok = self._do_ping(node_id)
+ if not ping_ok:
+ self._set_hostname_attr(node_id)
+ self.warning(" Node not responding PING ")
+ self._blacklist_node(node_id)
+ else:
+ # discovered node for provision, added to provision list
+ self._put_node_in_provision(node_id)
+ return node_id
+
+ def _get_nodes_id(self, filters=None):
+ return self.plapi.get_nodes(filters, fields=['node_id'])
+
+ def _add_node_to_slice(self, node_id):
+ self.info(" Adding node to slice ")
+ slicename = self.get("username")
+ with PlanetlabNode.lock:
+ slice_nodes = self.plapi.get_slice_nodes(slicename)
+ self.debug(" Previous slice nodes %s " % slice_nodes)
+ slice_nodes.append(node_id)
+ self.plapi.add_slice_nodes(slicename, slice_nodes)
+
+ def _delete_node_from_slice(self, node):
+ self.warning(" Deleting node from slice ")
+ slicename = self.get("username")
+ self.plapi.delete_slice_node(slicename, [node])
+
+ def _get_hostname(self):
+ hostname = self.get("hostname")
+ if hostname:
+ return hostname
+ ip = self.get("ip")
+ if ip:
+ hostname = socket.gethostbyaddr(ip)[0]
+ self.set('hostname', hostname)
+ return hostname
+ else:
+ return None
+
+ def _set_hostname_attr(self, node):
+ """
+ Query PLCAPI for the hostname of a certain node id and sets the
+ attribute hostname, it will over write the previous value
+ """
+ hostname = self.plapi.get_nodes(node, ['hostname'])
+ self.set("hostname", hostname[0]['hostname'])
+
+ def _check_if_in_slice(self, nodes_id):
+ """
+ Query PLCAPI to find out if any node id from nodes_id is in the user's
+ slice
+ """
+ slicename = self.get("username")
+ slice_nodes = self.plapi.get_slice_nodes(slicename)
+ nodes_inslice = list(set(nodes_id) & set(slice_nodes))
+ return nodes_inslice
+
+ def _do_ping(self, node_id):
+ """
+ Perform ping command on node's IP matching node id
+ """
+ ping_ok = False
+ ip = self._get_ip(node_id)
+ if ip:
+ command = "ping -c4 %s" % ip
+ (out, err) = lexec(command)
+
+ m = re.search("(\d+)% packet loss", str(out))
+ if m and int(m.groups()[0]) < 50:
+ ping_ok = True
+
+ return ping_ok
+
+ def _blacklist_node(self, node):
+ """
+ Add node mal functioning node to blacklist
+ """
+ self.warning(" Blacklisting malfunctioning node ")
+ self.plapi.blacklist_host(node)
+ if not self._hostname:
+ self.set('hostname', None)
+
+ def _put_node_in_provision(self, node):
+ """
+ Add node to the list of nodes being provisioned, in order for other RMs
+ to not try to provision the same one again
+ """
+ self.plapi.reserve_host(node)
+
+ def _get_ip(self, node_id):
+ """
+ Query PLCAPI for the IP of a node with certain node id
+ """
+ hostname = self.get("hostname") or \
+ self.plapi.get_nodes(node_id, ['hostname'])[0]['hostname']
+ try:
+ ip = sshfuncs.gethostbyname(hostname)
+ except:
+ # Fail while trying to find the IP
+ return None
+ return ip
+
+ def fail_discovery(self):
+ msg = "Discovery failed. No candidates found for node"
+ self.error(msg)
+ raise RuntimeError(msg)
+
+ def fail_node_not_alive(self, hostname=None):
+ msg = "Node %s not alive" % hostname
+ raise RuntimeError(msg)
+
+ def fail_node_not_available(self, hostname):
+ msg = "Node %s not available for provisioning" % hostname
+ raise RuntimeError(msg)
+
+ def fail_not_enough_nodes(self):
+ msg = "Not enough nodes available for provisioning"
+ raise RuntimeError(msg)
+
+ def fail_plapi(self):
+ msg = "Failing while trying to instanciate the PLC API.\nSet the" + \
+ " attributes pluser and plpassword."
+ raise RuntimeError(msg)