1 """Functionality common to all account classes.
3 Each subclass of Account must provide five methods: create() and
4 destroy(), which are static; configure(), start(), and stop(), which
5 are not. configure(), which takes a record as its only argument, does
6 things like set up ssh keys. In addition, an Account subclass must
7 provide static member variables SHELL, which contains the unique shell
8 that it uses; and TYPE, a string that is used by the account creation
9 code. For no particular reason, TYPE is divided hierarchically by
10 periods; at the moment the only convention is that all sliver accounts
11 have type that begins with sliver.
13 There are any number of race conditions that may result from the fact
14 that account names are not unique over time. Moreover, it's a bad
15 idea to perform lengthy operations while holding the database lock.
16 In order to deal with both of these problems, we use a worker thread
17 for each account name that ever exists. On 32-bit systems with large
18 numbers of accounts, this may cause the NM process to run out of
19 *virtual* memory! This problem may be remedied by decreasing the
32 # When this variable is true, start after any ensure_created
34 # Cumulative delay for starts when Startingup is true
35 csd_lock = threading.Lock()
38 # shell path -> account class association
40 # account type -> account class association
43 def register_class(acct_class):
44 """Call once for each account class. This method adds the class to the dictionaries used to look up account classes by shell and type."""
45 shell_acct_class[acct_class.SHELL] = acct_class
46 type_acct_class[acct_class.TYPE] = acct_class
49 # private account name -> worker object association and associated lock
50 name_worker_lock = threading.Lock()
54 return [pw_ent for pw_ent in pwd.getpwall() if pw_ent[6] in shell_acct_class]
57 """Return the names of all accounts on the system with recognized shells."""
58 return [pw_ent[0] for pw_ent in allpwents()]
61 """Return the worker object for a particular username. If no such object exists, create it first."""
62 name_worker_lock.acquire()
64 if name not in name_worker: name_worker[name] = Worker(name)
65 return name_worker[name]
66 finally: name_worker_lock.release()
70 def __init__(self, rec):
71 logger.verbose('Initing account %s'%rec['name'])
72 self.name = rec['name']
74 self.initscriptchanged = False
78 def create(name, vref = None): abstract
80 def destroy(name): abstract
82 def configure(self, rec):
83 """Write <rec['keys']> to my authorized_keys file."""
84 logger.verbose('in accounts:configure for %s'%self.name)
85 new_keys = rec['keys']
86 if new_keys != self.keys:
88 dot_ssh = '/home/%s/.ssh' % self.name
89 if not os.access(dot_ssh, os.F_OK): os.mkdir(dot_ssh)
90 os.chmod(dot_ssh, 0700)
91 tools.write_file(dot_ssh + '/authorized_keys', lambda f: f.write(new_keys))
92 logger.verbose('%s: installing ssh keys' % self.name)
93 os.chown(dot_ssh + '/authorized_keys', pwd.getpwnam(self.name)[2], 504)
95 def start(self, delay=0): pass
97 def is_running(self): pass
100 # these semaphores are acquired before creating/destroying an account
101 _create_sem = threading.Semaphore(1)
102 _destroy_sem = threading.Semaphore(1)
104 def __init__(self, name):
105 self.name = name # username
106 self._acct = None # the account object currently associated with this worker
108 # outsiders request operations by putting (fn, args...) tuples on _q
109 # the worker thread (created below) will perform these operations in order
110 self._q = Queue.Queue()
111 tools.as_daemon_thread(self._run)
113 def ensure_created(self, rec):
114 """Cause the account specified by <rec> to exist if it doesn't already."""
115 if rec.has_key('name'):
116 logger.verbose('Worker.ensure_created with name=%s'%rec['name'])
117 self._q.put((self._ensure_created, rec.copy(), Startingup))
118 logger.verbose('Worker queue has %d item(s)'%self._q.qsize())
120 def _ensure_created(self, rec, startingup):
121 curr_class = self._get_class()
122 next_class = type_acct_class[rec['type']]
123 if next_class != curr_class:
124 self._destroy(curr_class)
125 self._create_sem.acquire()
126 try: next_class.create(self.name, rec['vref'])
127 finally: self._create_sem.release()
128 if not isinstance(self._acct, next_class): self._acct = next_class(rec)
129 else: self._acct.configure(rec)
130 if startingup or not self.is_running():
133 delay = cumstartdelay
136 self._acct.start(delay=delay)
137 elif next_class != curr_class or self._acct.initscriptchanged:
140 def ensure_destroyed(self): self._q.put((self._ensure_destroyed,))
141 def _ensure_destroyed(self): self._destroy(self._get_class())
143 def start(self, delay=0): self._q.put((self._start, delay))
144 def _start(self, d): self._acct.start(delay=d)
146 def stop(self): self._q.put((self._stop,))
147 def _stop(self): self._acct.stop()
149 def is_running(self):
150 if self._acct.is_running():
154 logger.verbose("Worker(%s): is not running" % self.name)
157 def _destroy(self, curr_class):
160 self._destroy_sem.acquire()
161 try: curr_class.destroy(self.name)
162 finally: self._destroy_sem.release()
164 def _get_class(self):
165 try: shell = pwd.getpwnam(self.name)[6]
166 except KeyError: return None
167 return shell_acct_class[shell]
170 """Repeatedly pull commands off the queue and execute. If memory usage becomes an issue, it might be wise to terminate after a while."""
173 logger.verbose('Worker:_run : getting - size is %d'%self._q.qsize())
177 logger.log_exc(self.name)