#
# $Id$
+import os
+import sys
+import fcntl
import time
from PLC.Debug import log
def message_verbose(to_print=None):
message(to_print,verbose_only=True)
+
+class FileLock:
+ """
+ Lock/Unlock file
+ """
+ def __init__(self, file_path, expire = 60 * 60 * 2):
+ self.expire = expire
+ self.fpath = file_path
+ self.fd = None
+
+ def lock(self):
+ if os.path.exists(self.fpath):
+ if (time.time() - os.stat(self.fpath).st_ctime) > self.expire:
+ try:
+ os.unlink(self.fpath)
+ except Exception, e:
+ message('FileLock.lock(%s) : %s' % (self.fpath, e))
+ return False
+ try:
+ self.fd = open(self.fpath, 'w')
+ fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ message('FileLock.lock(%s) : %s' % (self.fpath, e))
+ return False
+ return True
+
+ def unlock(self):
+ try:
+ fcntl.flock(self.fd, fcntl.LOCK_UN | fcntl.LOCK_NB)
+ self.fd.close()
+ except IOError, e:
+ message('FileLock.unlock(%s) : %s' % (self.fpath, e))
+
+
class RefreshPeer(Method):
"""
Fetches site, node, slice, person and key data from the specified peer
returns = Parameter(int, "1 if successful")
def call(self, auth, peer_id_or_peername):
+ ret_val = None
+ peername = Peers(self.api, [peer_id_or_peername], ['peername'])[0]['peername']
+ file_lock = FileLock("/tmp/refresh-peer-%s.lock" % peername)
+ if not file_lock.lock():
+ raise Exception, "Another instance of RefreshPeer is running."
+ try:
+ ret_val = self.real_call(auth, peer_id_or_peername)
+ except Exception, e:
+ file_lock.unlock()
+ raise Exception, e
+ file_lock.unlock()
+ return ret_val
+
+
+ def real_call(self, auth, peer_id_or_peername):
# Get peer
peers = Peers(self.api, [peer_id_or_peername])
if not peers:
message('RefreshPeer starting up (commit_mode=%r)'%commit_mode)
message('Issuing GetPeerData')
peer_tables = peer.GetPeerData()
+ # for smooth federation with 4.2 - ignore fields that are useless anyway, and rewrite boot_state
+ boot_state_rewrite={'dbg':'safeboot','diag':'safeboot','disable':'disabled',
+ 'inst':'reinstall','rins':'reinstall','new':'reinstall','rcnf':'reinstall'}
+ for node in peer_tables['Nodes']:
+ for key in ['nodenetwork_ids','dummybox_id']:
+ if key in node:
+ del node[key]
+ if node['boot_state'] in boot_state_rewrite: node['boot_state']=boot_state_rewrite[node['boot_state']]
+ for slice in peer_tables['Slices']:
+ for key in ['slice_attribute_ids']:
+ if key in slice:
+ del slice[key]
timers['transport'] = time.time() - start - peer_tables['db_time']
timers['peer_db'] = peer_tables['db_time']
message_verbose('GetPeerData returned -> db=%d transport=%d'%(timers['peer_db'],timers['transport']))
for peer_object_id, peer_object in peer_objects.iteritems():
message_verbose ('DBG %s peer_object_id=%d (%d/%d)'%(classname,peer_object_id,count,total))
count += 1
+ if peer_object_id in synced:
+ message("Warning: %s Skipping already added %s: %r"%(
+ peer['peername'], classname, peer_object))
+ continue
if classname == 'Node':
message_verbose ('DBG>> hostname=%s'%peer_object['hostname'])
elif classname == "Slice":
if peer_tables['Nodes']:
columns = peer_tables['Nodes'][0].keys()
else:
- columns = None
+ # smooth federation with a 4.2 peer - ignore these fields that are useless anyway
+ columns = Node.fields
+ if 'interface_ids' in columns: columns.remove('interface_ids')
+ if 'dummybox_id' in columns: columns.remove('dummybox_id')
# Keyed on foreign node_id
old_peer_nodes = Nodes(self.api, {'peer_id': peer_id}, columns).dict('peer_node_id')
# Nodes that are currently part of the slice
old_slice_node_ids = [ node_transcoder[node_id] for node_id in slice['node_ids'] \
- if node_transcoder[node_id] in peer_nodes]
+ if node_id in node_transcoder and node_transcoder[node_id] in peer_nodes]
# Nodes that should be part of the slice
slice_node_ids = [ node_id for node_id in peer_slice['node_ids'] if node_id in peer_nodes]
# Update peer itself and commit
peer.sync(commit = True)
-
+
return timers