Bound queue so they don't grow without bound.
[nodemanager.git] / accounts.py
index cb41b67..1fd264f 100644 (file)
@@ -23,6 +23,7 @@ maximum stack size.
 import Queue
 import os
 import pwd
+from grp import getgrnam
 import threading
 
 import logger
@@ -86,12 +87,14 @@ class Account:
         if new_keys != self.keys:
             self.keys = new_keys
             dot_ssh = '/home/%s/.ssh' % self.name
-            def do_installation():
-                if not os.access(dot_ssh, os.F_OK): os.mkdir(dot_ssh)
-                os.chmod(dot_ssh, 0700)
-                tools.write_file(dot_ssh + '/authorized_keys', lambda f: f.write(new_keys))
-            logger.verbose('%s: installing ssh keys' % self.name)
-            tools.fork_as(self.name, do_installation)
+            if not os.access(dot_ssh, os.F_OK): os.mkdir(dot_ssh)
+            os.chmod(dot_ssh, 0700)
+            tools.write_file(dot_ssh + '/authorized_keys', lambda f: f.write(new_keys))
+            logger.log('%s: installing ssh keys' % self.name)
+            user = pwd.getpwnam(self.name)[2]
+            group = getgrnam("slices")[2]
+            os.chown(dot_ssh, user, group)
+            os.chown(dot_ssh + '/authorized_keys', user, group)
 
     def start(self, delay=0): pass
     def stop(self): pass
@@ -108,14 +111,14 @@ class Worker:
         # task list
         # outsiders request operations by putting (fn, args...) tuples on _q
         # the worker thread (created below) will perform these operations in order
-        self._q = Queue.Queue()
+        self._q = Queue.Queue(maxsize=4) # keep from overflowing and backing up.
         tools.as_daemon_thread(self._run)
 
     def ensure_created(self, rec):
         """Cause the account specified by <rec> to exist if it doesn't already."""
         if rec.has_key('name'):
             logger.verbose('Worker.ensure_created with name=%s'%rec['name'])
-        self._q.put((self._ensure_created, rec.copy(), Startingup))
+        self._enqueue((self._ensure_created, rec.copy(), Startingup))
         logger.verbose('Worker queue has %d item(s)'%self._q.qsize())
 
     def _ensure_created(self, rec, startingup):
@@ -138,18 +141,21 @@ class Worker:
         elif next_class != curr_class or self._acct.initscriptchanged:
             self._acct.start()
 
-    def ensure_destroyed(self): self._q.put((self._ensure_destroyed,))
+    def ensure_destroyed(self): self._enqueue((self._ensure_destroyed,))
     def _ensure_destroyed(self): self._destroy(self._get_class())
 
-    def start(self, delay=0): self._q.put((self._start, delay))
+    def start(self, delay=0): self._enqueue((self._start, delay))
     def _start(self, d): self._acct.start(delay=d)
 
-    def stop(self): self._q.put((self._stop,))
+    def stop(self): self._enqueue((self._stop,))
     def _stop(self): self._acct.stop()
 
     def is_running(self): 
-        status = self._acct.is_running()
-        if not status:  logger.verbose("Worker(%s): is not running" % self.name)
+        if self._acct.is_running():
+            status = True
+        else:
+            status = False
+            logger.verbose("Worker(%s): is not running" % self.name)
         return status
 
     def _destroy(self, curr_class):
@@ -173,3 +179,7 @@ class Worker:
                 cmd[0](*cmd[1:])
             except:
                 logger.log_exc(self.name)
+
+    def _enqueue(self, cmds):
+        try: self._q.put_nowait(cmds)
+        except Queue.Full:  logger.log("%s Worker queue full." % self.name)