%define url $URL: svn+ssh://svn.planet-lab.org/svn/monitor/trunk/monitor.spec $
%define name monitor
+# keep this version in sync with monitor/monitor_version.py
%define version 3.0
%define taglevel 25
Requires: python
Requires: python-setuptools-devel
Requires: python-peak-util-extremes
-Requires: TurboGears
Requires: compat-libstdc++-296
Requires: openssh-clients
#################### CLIENT
#install -D -m 755 monitor-client.init $RPM_BUILD_ROOT/%{_initrddir}/monitor
#install -D -m 644 monitor.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor
-install -D -m 755 timeout.pl $RPM_BUILD_ROOT/usr/bin/timeout.pl
+install -D -m 755 tools/timeout.pl $RPM_BUILD_ROOT/usr/bin/timeout.pl
#################### SERVER
install -d $RPM_BUILD_ROOT/data/var/lib/%{name}/archive-pdb
install -d $RPM_BUILD_ROOT/var/lib/%{name}
install -d $RPM_BUILD_ROOT/var/lib/%{name}/archive-pdb
-#install -d $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/
install -d $RPM_BUILD_ROOT/var/www/html/monitorlog/
+install -d $RPM_BUILD_ROOT/etc/httpd/conf.d/
+install -d $RPM_BUILD_ROOT/%{python_sitearch}/monitor
+
+# pack monitor's dependencies in RPM to make it easier to deploy.
+export PYTHONPATH=$PYTHONPATH:$RPM_BUILD_ROOT/%{python_sitearch}/
+easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ http://files.turbogears.org/eggs/TurboGears-1.0.7-py2.5.egg
+easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.5.3.tar.gz
+easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ Elixir
+rm -rf $RPM_BUILD_ROOT/%{python_sitearch}/site.py*
+# plc.d scripts
install -D -m 644 monitor.functions $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/monitor.functions
install -D -m 755 monitor-server.init $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/monitor
install -D -m 755 zabbix/monitor-zabbix.init $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/zabbix
-echo " * Installing core scripts"
-rsync -a --exclude www --exclude archive-pdb --exclude .svn --exclude CVS \
- ./ $RPM_BUILD_ROOT/usr/share/%{name}/
+# cron job for automated polling
+install -D -m 644 monitor-server.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor-server.cron
-echo " * Installing web pages"
-#rsync -a www/ $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/
-rsync -a log/ $RPM_BUILD_ROOT/var/www/html/monitorlog/
+# apache configuration
+install -D -m 644 web/monitorweb-httpd.conf $RPM_BUILD_ROOT/etc/httpd/conf.d/
-echo " * Installing cron job for automated polling"
-install -D -m 644 monitor-server.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor-server.cron
-echo " * TODO: Setting up Monitor account in local MyPLC"
-# TODO:
+# we'll install monitor and pcucontrol in site-packages
+# install rest to /usr/share/monitor
+rsync -a --exclude archive-pdb --exclude .svn --exclude CVS \
+ --exclude monitor/ \
+ --exclude pcucontol/ \
+ ./ $RPM_BUILD_ROOT/usr/share/%{name}/
-install -d $RPM_BUILD_ROOT/%{python_sitearch}/monitor
-install -d -D -m 755 monitor $RPM_BUILD_ROOT/%{python_sitearch}/monitor
-# TODO: need a much better way to do this.
-rsync -a monitor/ $RPM_BUILD_ROOT/%{python_sitearch}/monitor/
-#for file in __init__.py database.py config.py ; do
-# install -D -m 644 monitor/$file $RPM_BUILD_ROOT/%{python_sitearch}/monitor/$file
-#done
-rsync -a pcucontrol/ $RPM_BUILD_ROOT/%{python_sitearch}/pcucontrol/
+# install monitor python package
+rsync -a --exclude .svn ./monitor/ $RPM_BUILD_ROOT/%{python_sitearch}/monitor/
+
+# and pcucontrol
+rsync -a --exclude .svn ./pcucontrol/ $RPM_BUILD_ROOT/%{python_sitearch}/pcucontrol/
+
+# install third-party module to site-packages
install -D -m 755 threadpool.py $RPM_BUILD_ROOT/%{python_sitearch}/threadpool.py
+# TODO:
+echo " * TODO: Setting up Monitor account in local MyPLC"
+
#touch $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/monitorconfig.php
#chmod 777 $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/monitorconfig.php
%{python_sitearch}/threadpool.pyc
%{python_sitearch}/threadpool.pyo
%{python_sitearch}/monitor
+%{python_sitearch}/Turbo*
+%{python_sitearch}/SQLAlchemy*
+%{python_sitearch}/Elixir*
+%{python_sitearch}/easy-install.pth
+%{python_sitearch}/tg-admin
%{_sysconfdir}/plc.d/monitor
%{_sysconfdir}/plc.d/monitor.functions
%{_sysconfdir}/plc.d/zabbix
+%{_sysconfdir}/httpd/conf.d
%files client
%defattr(-,root,root)
/%{_initrddir}/monitor-runlevelagent
%post server-deps
-#easy_install --build-directory /var/tmp -UZ ElementTree
-##easy_install --build-directory /var/tmp -UZ http://pypi.python.org/packages/2.5/E/Extremes/Extremes-1.1-py2.5.egg
-
-
-## TODO: something is bad wrong with this approach.
-easy_install --build-directory /var/tmp -UZ http://files.turbogears.org/eggs/TurboGears-1.0.7-py2.5.egg
-easy_install --build-directory /var/tmp -UZ http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.5.3.tar.gz
-easy_install --build-directory /var/tmp -UZ Elixir
# crazy openssl libs for racadm binary
ln -s /lib/libssl.so.0.9.8b /usr/lib/libssl.so.2
+++ /dev/null
-from NetProxy import NetProxyWrapper\r
-from Lib import raise_exception\r
-\r
-\r
-class InvalidAsyncResultState(Exception):\r
- pass\r
-\r
-\r
-class AsyncNetProxy(NetProxyWrapper):\r
- """wraps an exiting synchronous netproxy to make is asynchronous \r
- (remote operations return AsyncResult objects)"""\r
- __doc__ = NetProxyWrapper.__doc__\r
-\r
- def __request__(self, handler, *args):\r
- res = AsyncResult(self.__dict__["____conn"])\r
- self.__dict__["____conn"].async_request(res.callback, handler, self.__dict__["____oid"], *args)\r
- return res\r
-\r
- # must return a string... and it's not meaningful to return the repr of an async result\r
- def __repr__(self, *args):\r
- return self.__request__("handle_repr", *args).result\r
- def __str__(self, *args):\r
- return self.__request__("handle_str", *args).result \r
- \r
-\r
-class AsyncResult(object):\r
- """represents the result of an asynchronous operation"""\r
- STATE_PENDING = "pending"\r
- STATE_READY = "ready"\r
- STATE_EXCEPTION = "exception"\r
- STATE_UNKNOWN = "unknown"\r
- \r
- def __init__(self, conn):\r
- self.conn = conn\r
- self._state = self.STATE_PENDING\r
- self._result = None\r
- self._on_ready = None\r
- \r
- def __repr__(self):\r
- return "<AsyncResult (%s) at 0x%08x>" % (self._state, id(self))\r
- \r
- def callback(self, event, obj):\r
- if event == "result":\r
- self._state = self.STATE_READY\r
- self._result = obj\r
- elif event == "exception":\r
- self._state = self.STATE_EXCEPTION\r
- self._result = obj\r
- else:\r
- self._state = self.STATE_UNKNOWN\r
- self._result = obj\r
- \r
- if self._on_ready is not None:\r
- self._on_ready(self)\r
- \r
- def _get_on_ready(self):\r
- return self._ready_callback\r
-\r
- def _set_on_ready(self, obj):\r
- self._on_ready = obj\r
- if self._state != self.STATE_PENDING:\r
- self._on_ready(self)\r
- \r
- def _get_is_ready(self):\r
- if self._state == self.STATE_PENDING:\r
- self.conn.poll()\r
- return self._state != self.STATE_PENDING\r
- \r
- def _get_result(self):\r
- while self._state == self.STATE_PENDING:\r
- self.conn.serve()\r
- if self._state == self.STATE_READY:\r
- return self._result\r
- elif self._state == self.STATE_EXCEPTION:\r
- raise_exception(*self._result)\r
- else:\r
- raise InvalidAsyncResultState(self._state)\r
- \r
- is_ready = property(_get_is_ready, doc = "indicates whether or not the result is ready")\r
- result = property(_get_result, doc = "the value of the async result (may block)")\r
- on_ready = property(_get_on_ready, _set_on_ready, doc =\r
- "if not None, specifies a callback which is called when the result is ready")\r
+++ /dev/null
-"""\r
-Challenge-Response authentication algorithm over channels: the client sends the\r
-username, recvs a challenge, generates a response based on the challenge and \r
-password, sends it back to the server, which also calculates the expected response.\r
-the server returns "WRONG" or "OKAY". this way the password is never sent over\r
-the wire.\r
-\r
-* servers should call the accept function over a channel, with a dict of \r
-(username, password) pairs.\r
-* clients should call login over a channel, with the username and password\r
-"""\r
-import md5\r
-import random\r
-\r
-def get_bytes(num_bytes):\r
- ret = []\r
- for n in xrange(num_bytes):\r
- ret.append( chr(random.randint(0,255)) )\r
- return ''.join(ret)\r
-\r
-def accept(chan, users):\r
- username = chan.recv()\r
- challenge = get_bytes(16)\r
- chan.send(challenge)\r
- response = chan.recv()\r
- if username not in users:\r
- chan.send("WRONG")\r
- return False\r
- expected_response = md5.new(users[username] + challenge).digest()\r
- if response != expected_response:\r
- chan.send("WRONG")\r
- return False\r
- chan.send("OKAY")\r
- return True\r
-\r
-def login(chan, username, password):\r
- chan.send(username)\r
- challenge = chan.recv()\r
- response = md5.new(password + challenge).digest()\r
- chan.send(response)\r
- return chan.recv() == "OKAY"\r
-\r
+++ /dev/null
-import sys\r
-import traceback\r
-import cPickle as pickle\r
-from weakref import WeakValueDictionary\r
-from Lib import ImmDict\r
-from NetProxy import NetProxy, SyncNetProxy\r
-\r
-\r
-class BoxingError(Exception):\r
- pass\r
-class NestedException(Exception): \r
- pass\r
-\r
-PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL\r
-TYPE_SIMPLE = 0\r
-TYPE_PROXY = 1\r
-TYPE_TUPLE = 2\r
-TYPE_SLICE = 3\r
-TYPE_LOCAL_PROXY = 4\r
-TYPE_IMMDICT = 5\r
-simple_types = (\r
- bool, \r
- int, \r
- long, \r
- float, \r
- complex, \r
- basestring, \r
- type(None),\r
-)\r
-\r
-def dump_exception(typ, val, tb):\r
- """dumps the given exception using pickle (since not all exceptions are picklable)"""\r
- tbtext = "".join(traceback.format_exception(typ, val, tb))\r
- sys.last_type, sys.last_value, sys.last_traceback = typ, val, tb\r
- try:\r
- pickled_exc = pickle.dumps((typ, val, tbtext), PICKLE_PROTOCOL)\r
- except pickle.PicklingError, ex:\r
- newval = NestedException("pickling error %s\nexception type: %r\nexception object: %s" % (ex, typ, val))\r
- pickled_exc = pickle.dumps((NestedException, newval, tbtext), PICKLE_PROTOCOL)\r
- return pickled_exc\r
-\r
-def load_exception(package):\r
- """returns an exception object"""\r
- try:\r
- return pickle.loads(package)\r
- except pickle.PicklingError, ex:\r
- return NestedException("failed to unpickle remote exception -- %r" % (ex,))\r
-\r
-class Box(object):\r
- """a box is where local objects are stored, and remote proxies are created"""\r
- def __init__(self, conn):\r
- self.conn = conn\r
- self.objects = {}\r
- self.proxy_cache = WeakValueDictionary()\r
-\r
- def close(self):\r
- del self.conn\r
- del self.objects\r
- del self.proxy_cache\r
- \r
- def __getitem__(self, oid):\r
- return self.objects[oid][1]\r
-\r
- def _box(self, obj):\r
- if isinstance(obj, simple_types):\r
- return TYPE_SIMPLE, obj\r
- elif isinstance(obj, slice):\r
- return TYPE_SLICE, (obj.start, obj.stop, obj.step)\r
- elif isinstance(obj, NetProxy) and obj.__dict__["____conn"] is self.conn:\r
- return TYPE_LOCAL_PROXY, obj.__dict__["____oid"]\r
- elif isinstance(obj, tuple):\r
- if obj:\r
- return TYPE_TUPLE, [self._box(subobj) for subobj in obj]\r
- else:\r
- return TYPE_SIMPLE, ()\r
- elif isinstance(obj, ImmDict):\r
- if not obj.dict:\r
- return TYPE_SIMPLE, {}\r
- else:\r
- return TYPE_IMMDICT, [(self._box(k), self._box(v)) for k, v in obj.items()]\r
- else:\r
- oid = id(obj)\r
- if oid not in self.objects:\r
- self.objects[oid] = [0, obj]\r
- return TYPE_PROXY, oid\r
- \r
- def _unbox(self, (type, value)):\r
- if type == TYPE_SIMPLE:\r
- return value\r
- elif type == TYPE_TUPLE:\r
- return tuple([self._unbox(subobj) for subobj in value])\r
- elif type == TYPE_SLICE:\r
- return slice(*value)\r
- elif type == TYPE_LOCAL_PROXY:\r
- return self[value]\r
- elif type == TYPE_IMMDICT:\r
- return dict([(self._unbox(k), self._unbox(v)) for k, v in value])\r
- elif type == TYPE_PROXY:\r
- if value in self.proxy_cache:\r
- proxy = self.proxy_cache[value]\r
- else:\r
- proxy = SyncNetProxy(self.conn, value)\r
- self.proxy_cache[value] = proxy\r
- return proxy\r
- else:\r
- raise BoxingError("invalid boxed object type", type, value)\r
- \r
- def incref(self, oid):\r
- self.objects[oid][0] += 1\r
-\r
- def decref(self, oid):\r
- self.objects[oid][0] -= 1\r
- if self.objects[oid][0] <= 0:\r
- del self.objects[oid]\r
- \r
- def pack(self, obj):\r
- """packs an object (returns a package)"""\r
- return pickle.dumps(self._box(obj), PICKLE_PROTOCOL)\r
-\r
- def unpack(self, package):\r
- """unpacks a package (returns an object)"""\r
- return self._unbox(pickle.loads(package))\r
-\r
-\r
+++ /dev/null
-from threading import RLock\r
-import struct\r
-\r
-\r
-class Channel(object):\r
- """a channel transfers packages over a stream. a package is any blob of data,\r
- up to 4GB in size. channels are gauranteed to be thread-safe"""\r
- HEADER_FORMAT = ">L" # byte order must be the same at both sides!\r
- HEADER_SIZE = struct.calcsize(HEADER_FORMAT)\r
-\r
- def __init__(self, stream):\r
- self.lock = RLock()\r
- self.stream = stream\r
-\r
- def __repr__(self):\r
- return "<%s(%r)>" % (self.__class__.__name__, self.stream)\r
-\r
- def send(self, data):\r
- """sends a package"""\r
- try:\r
- self.lock.acquire()\r
- header = struct.pack(self.HEADER_FORMAT, len(data))\r
- self.stream.write(header + data)\r
- finally:\r
- self.lock.release()\r
- \r
- def recv(self):\r
- """receives a package (blocking)"""\r
- try:\r
- self.lock.acquire()\r
- length, = struct.unpack(self.HEADER_FORMAT, self.stream.read(self.HEADER_SIZE))\r
- return self.stream.read(length)\r
- finally:\r
- self.lock.release()\r
- \r
- def close(self):\r
- return self.stream.close()\r
-\r
- def fileno(self):\r
- return self.stream.fileno()\r
-\r
- def is_available(self):\r
- return self.stream.is_available()\r
-\r
- def wait(self):\r
- return self.stream.wait()\r
-\r
-\r
+++ /dev/null
-import sys\r
-from Boxing import Box, dump_exception, load_exception\r
-from ModuleNetProxy import RootImporter\r
-from Lib import raise_exception, AttrFrontend\r
-\r
-\r
-class Connection(object):\r
- """\r
- the rpyc connection layer (protocol and APIs). generally speaking, the only \r
- things you'll need to access directly from this object are:\r
- * modules - represents the remote python interprerer's modules namespace\r
- * execute - executes the given code on the other side of the connection\r
- * namespace - the namespace in which the code you `execute` resides\r
-\r
- the rest of the attributes should be of no intresent to you, except maybe for \r
- `remote_conn`, which represents the other side of the connection. it is unlikely,\r
- however, you'll need to use it (it is used interally).\r
- \r
- when you are done using a connection, and wish to release the resources it\r
- uses, you should call close(). you don't have to, but if you don't, the gc\r
- can't release the memory because of cyclic references.\r
- """\r
- \r
- def __init__(self, channel):\r
- self._closed = False\r
- self._local_namespace = {}\r
- self.channel = channel\r
- self.box = Box(self)\r
- self.async_replies = {}\r
- self.sync_replies = {}\r
- self.request_seq = 0\r
- self.module_cache = {}\r
- # user APIs:\r
- self.modules = RootImporter(self)\r
- self.remote_conn = self.sync_request("handle_getconn")\r
- self.namespace = AttrFrontend(self.remote_conn._local_namespace)\r
- \r
- def __repr__(self):\r
- if self._closed:\r
- return "<%s - closed>" % (self.__class__.__name__,)\r
- else:\r
- return "<%s(%r)>" % (self.__class__.__name__, self.channel)\r
-\r
- # \r
- # file api layer\r
- #\r
- def close(self):\r
- if self._closed:\r
- return\r
- self._closed = True\r
- self.box.close()\r
- self.channel.close()\r
- # untangle circular references\r
- del self._local_namespace\r
- del self.channel\r
- del self.box\r
- del self.async_replies\r
- del self.sync_replies\r
- del self.request_seq\r
- del self.module_cache\r
- del self.modules\r
- del self.remote_conn\r
- del self.namespace\r
- \r
- def fileno(self):\r
- return self.channel.fileno()\r
-\r
- #\r
- # protocol layer\r
- #\r
- def _recv(self):\r
- return self.box.unpack(self.channel.recv())\r
-\r
- def _send(self, *args):\r
- return self.channel.send(self.box.pack(args))\r
- \r
- def send_request(self, handlername, *args):\r
- try:\r
- self.channel.lock.acquire()\r
- # this must be atomic {\r
- self.request_seq += 1 \r
- self._send(handlername, self.request_seq, args)\r
- return self.request_seq\r
- # }\r
- finally:\r
- self.channel.lock.release()\r
-\r
- def send_exception(self, seq, exc_info):\r
- self._send("exception", seq, dump_exception(*exc_info))\r
-\r
- def send_result(self, seq, obj):\r
- self._send("result", seq, obj)\r
-\r
- def dispatch_result(self, seq, obj):\r
- if seq in self.async_replies:\r
- self.async_replies.pop(seq)("result", obj)\r
- else: \r
- self.sync_replies[seq] = obj\r
- \r
- def dispatch_exception(self, seq, obj):\r
- excobj = load_exception(obj)\r
- if seq in self.async_replies:\r
- self.async_replies.pop(seq)("exception", excobj)\r
- else:\r
- raise_exception(*excobj)\r
-\r
- def dispatch_request(self, handlername, seq, args):\r
- try:\r
- res = getattr(self, handlername)(*args)\r
- except SystemExit:\r
- raise\r
- except:\r
- self.send_exception(seq, sys.exc_info())\r
- else:\r
- self.send_result(seq, res)\r
-\r
- def sync_request(self, *args):\r
- """performs a synchronous (blocking) request"""\r
- seq = self.send_request(*args)\r
- while seq not in self.sync_replies:\r
- self.serve()\r
- return self.sync_replies.pop(seq)\r
- \r
- def async_request(self, callback, *args):\r
- """performs an asynchronous (non-blocking) request"""\r
- seq = self.send_request(*args)\r
- self.async_replies[seq] = callback\r
- \r
- #\r
- # servers api\r
- #\r
- def poll(self):\r
- """if available, serves a single request, otherwise returns (non-blocking serve)"""\r
- if self.channel.is_available():\r
- self.serve()\r
- return True\r
- else:\r
- return False\r
- \r
- def serve(self):\r
- """serves a single request or reply (may block)"""\r
- self.channel.wait()\r
- handler, seq, obj = self._recv()\r
- if handler == "result":\r
- self.dispatch_result(seq, obj)\r
- elif handler == "exception":\r
- self.dispatch_exception(seq, obj)\r
- else:\r
- self.dispatch_request(handler, seq, obj) \r
-\r
- #\r
- # root requests\r
- #\r
- def rimport(self, modulename):\r
- """imports a module by name (as a string)"""\r
- if modulename not in self.module_cache:\r
- module = self.sync_request("handle_import", modulename)\r
- self.module_cache[modulename] = module\r
- return self.module_cache[modulename] \r
-\r
- def execute(self, expr, mode = "exec"):\r
- """execute the given code at the remote side of the connection"""\r
- return self.sync_request("handle_execute", expr, mode)\r
-\r
- #\r
- # handlers layer\r
- #\r
- def handle_incref(self, oid):\r
- self.box.incref(oid)\r
- \r
- def handle_decref(self, oid):\r
- self.box.decref(oid)\r
- \r
- def handle_delattr(self, oid, name):\r
- delattr(self.box[oid], name)\r
-\r
- def handle_getattr(self, oid, name):\r
- return getattr(self.box[oid], name)\r
-\r
- def handle_setattr(self, oid, name, value):\r
- setattr(self.box[oid], name, value)\r
-\r
- def handle_delitem(self, oid, index):\r
- del self.box[oid][index]\r
-\r
- def handle_getitem(self, oid, index):\r
- return self.box[oid][index]\r
-\r
- def handle_setitem(self, oid, index, value):\r
- self.box[oid][index] = value\r
-\r
- def handle_call(self, oid, args, kwargs):\r
- return self.box[oid](*args, **kwargs)\r
-\r
- def handle_repr(self, oid):\r
- return repr(self.box[oid])\r
-\r
- def handle_str(self, oid):\r
- return str(self.box[oid])\r
-\r
- def handle_bool(self, oid):\r
- return bool(self.box[oid])\r
-\r
- def handle_import(self, modulename):\r
- return __import__(modulename, None, None, modulename.split(".")[-1])\r
-\r
- def handle_getconn(self):\r
- return self\r
-\r
- def handle_execute(self, expr, mode):\r
- codeobj = compile(expr, "<from %s>" % (self,), mode)\r
- return eval(codeobj, self._local_namespace)\r
-\r
-\r
-\r
+++ /dev/null
-#\r
-# welcome to RPyC. this demo serves as an introduction. i believe in learning through\r
-# showcases, and that's why this package comes with a demo subpackage, instead of\r
-# documentation\r
-# \r
-# so, the first thing we're gonna do is import the SocketConnection. this is a factory\r
-# function that returns us a new Connection object over a socket stream. we dont need\r
-# to get into details here.\r
-#\r
-from Rpyc.Factories import SocketConnection\r
-\r
-#\r
-# next, we'll get all the helpful utilities. the utilities include wrappers for builtin\r
-# functions, like dir(), so they'd work as expected with netproxies. \r
-#\r
-from Rpyc.Utils import *\r
-\r
-#\r
-# by now you should have an rpyc server running. if you dont, go to the Servers directory\r
-# and choose your favorite version of a socket server. for unixes i'd recommend the \r
-# forking server; for windows -- the threaded server.\r
-#\r
-# so let's connect to the server\r
-#\r
-c = SocketConnection("localhost", 22222)\r
-\r
-#\r
-# now it's time to explain a little about how rpyc works. it's quite simple really. the\r
-# magic comes from a concept called NetProxy. a NetProxy object delivers all of the\r
-# operations performed on it to the remote object. so if you get a list from your host,\r
-# what you're are really getting is a NetProxy to that list. it looks and works just\r
-# like a real list -- but everytime you do something on it, it actually performs a \r
-# request on the list object stored on the host. this is called boxing. this means\r
-# you can change the object you get locally, and the remote object changes, etc.\r
-#\r
-# however, for efficiency and other reason, not all objects you get are NetProxies.\r
-# all immutable and pickle-able objects pass by value (through pickle). these types\r
-# of objects include ints, longs, strings, and some other types. all other types are\r
-# passed by boxing.\r
-#\r
-# this boxing mechanism works on everything -- objects, functions, classes, and modules,\r
-# which is why rpyc is considered transparent. your code looks just as if it was meant \r
-# to run locally.\r
-#\r
-\r
-#\r
-# let's start with something simple -- getting a remote module. accessing the remote \r
-# namespace always starts with the `modules` attribute, then the module (or package) \r
-# name, and then the attribute you want to get.\r
-#\r
-\r
-print c.modules.sys\r
-print c.modules.sys.path \r
-c.modules.sys.path.append("lucy")\r
-print c.modules.sys.path[-1]\r
-\r
-#\r
-# these remote objects are first class objects, like all others in python. this means\r
-# you can store them in variables, pass them as parameters, etc.\r
-#\r
-rsys = c.modules.sys\r
-rpath = rsys.path\r
-rpath.pop(-1)\r
-\r
-#\r
-# and as you might expect, netproxies also look like the real objects\r
-#\r
-print dir(rpath)\r
-\r
-#\r
-# but there are a couple of issues with netproxies. the type(), isinstance(), and \r
-# issubclass() classes dont work on them... as they query the underlying object, not\r
-# the remote one. so:\r
-#\r
-print type(rsys.maxint) # <int> -- because it's a simple type which is passed by value)\r
-print type(rsys.path) # <SyncNetProxy> -- because, after all, it's a netproxy, not a list\r
-\r
-#\r
-# now for a demo of packages\r
-# (which looks very much like 'from xml.dom.minidom import parseString')\r
-#\r
-parseString = c.modules.xml.dom.minidom.parseString\r
-x = parseString("<a>lala</a>")\r
-print x\r
-x.toxml()\r
-print x.firstChild.nodeName\r
-\r
-#\r
-# however, there's a catch when working with packages like that. the way it works is\r
-# trying to find an attribute with that name, and if not found, trying to import a sub-\r
-# module. \r
-#\r
-# now in english:\r
-# c.module.xml is the xml module of the server. when you do c.module.xml.dom, rpyc looks\r
-# for an attribute named 'dom' inside the xml module. since there's no such attribute,\r
-# it tries to import a subpackage called xml.dom, which succeeds. then it does the same\r
-# for xml.dom.minidom, and xml.dom.minidom.parseString.\r
-#\r
-# but there are times when that's ambiguous. this mean that the module has both a sub-\r
-# module called 'X', and an attribute called 'X'. according to rpyc's algorithm, the\r
-# attribute 'X' is returned, not the sub-module.\r
-#\r
-# but if you need to be explicit, you can, and it works like this:\r
-#\r
-\r
-c.modules["xml.dom.minidom"].parseString("<a></a>")\r
-\r
-#\r
-# this will make sure the module 'xml.dom.minidom' is returned, and not an attribute.\r
-# in general, it's better to use this form, unless you know there are no such conflicts.\r
-# remeber that "Explicit is better than implicit", although it requires four more key\r
-# strokes. perhaps in a later version it will raise an exception if there's a conflict.\r
-#\r
-\r
-#\r
-# and now for a little demo of working with files (a common task)\r
-#\r
-f = c.modules.__builtin__.open("lala.txt", "w")\r
-f.write("lucy")\r
-f.close()\r
-c.modules.os.remove("lala.txt")\r
-\r
-#\r
-# now to a bitter part of life: exceptions. as you could expect, they work just like\r
-# regular exceptions\r
-#\r
-try:\r
- a = c.modules.sys.nonexistent_attribute\r
-except AttributeError:\r
- pass\r
-else:\r
- assert False\r
-\r
-try:\r
- a = c.modules.__builtin__.open("**\\//##~~..::!@#$%^&*()_+\n <,>?")\r
-except IOError:\r
- pass\r
-else:\r
- assert False\r
-\r
-print "goodbye"\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
+++ /dev/null
-#\r
-# okay, this demo is more advanced. here we'll learn about:\r
-# * redirecting standard files\r
-# * synchronous callbacks\r
-# * the ulitities module\r
-#\r
-import sys\r
-import os \r
-from Rpyc.Factories import SocketConnection\r
-from Rpyc.Utils import hasattr, getattr, reload, upload, remote_interpreter\r
-\r
-c = SocketConnection("localhost", 22222)\r
-\r
-#\r
-# redirect our stdout to the server\r
-#\r
-sys.stdout = c.modules.sys.stdout\r
-print "this time we focus on `the seatle music`"\r
-\r
-#\r
-# and the other way round\r
-#\r
-sys.stdout = sys.__stdout__\r
-c.modules.sys.stdout = sys.stdout\r
-c.modules.sys.stdout.write("alice in chains\n")\r
-\r
-#\r
-# but you dont believe me, so \r
-#\r
-c.modules.Rpyc.Demo.testmodule.printer("tool")\r
-\r
-#\r
-# and restore that\r
-#\r
-c.modules.sys.stdout = c.modules.sys.__stdout__\r
-\r
-#\r
-# now let's play with callbacks\r
-#\r
-def f(text):\r
- print text\r
-\r
-c.modules.Rpyc.Demo.testmodule.caller(f, "nirvana")\r
-\r
-#\r
-# and if you insist\r
-#\r
-def g(func, text):\r
- c.modules.Rpyc.Demo.testmodule.caller(func, text)\r
-\r
-c.modules.Rpyc.Demo.testmodule.caller(g, f, "soundgarden")\r
-\r
-#\r
-# now for the utilities module. it gives us the following cool functions:\r
-# * dir, getattr, hasattr, help, reload -- overriding builtins \r
-# * upload, download -- transfering files/directories to/from the client/server (all the permutations)\r
-# * remote_shell, remote_interpreter -- running remote processess and debugging\r
-#\r
-print hasattr(sys, "path")\r
-print hasattr(c.modules.sys, "path")\r
-\r
-print getattr(sys, "maxint")\r
-print getattr(c.modules.sys, "maxint")\r
-\r
-print reload(sys)\r
-print reload(c.modules.sys)\r
-\r
-f=open("lala.txt", "w")\r
-f.write("king crimson")\r
-f.close()\r
-upload(c, "lala.txt", "../lala.txt")\r
-os.remove("lala.txt")\r
-c.modules.os.remove("../lala.txt")\r
-\r
-remote_interpreter(c)\r
-\r
-\r
-print "goodbye"\r
-\r
-\r
-\r
+++ /dev/null
-#\r
-# this is the grand finale: asynchronous proxies as super-events\r
-#\r
-from Rpyc.Factories import SocketConnection, Async\r
-from Rpyc.Utils import *\r
-\r
-c = SocketConnection("localhost")\r
-\r
-#\r
-# this is the remote int type\r
-#\r
-rint = c.modules.__builtin__.int\r
-\r
-#\r
-# and we'll wrap it in an asynchronous wrapper\r
-#\r
-rint = Async(rint)\r
-\r
-#\r
-# now it still looks like a normal proxy... but operations on it return something called\r
-# an AsyncResult -- it's an object that represents the would-be result of the operation.\r
-# it has a .is_ready property, which indicates whether or not the result is ready, and \r
-# a .result property, which holds the result of the operations. when you access the .result\r
-# property, it will block until the result is returned\r
-#\r
-a = rint("123")\r
-b = rint("metallica")\r
-print a\r
-print b.is_ready\r
-print a.result\r
-print a\r
-\r
-#\r
-# and when an exception occurs, it looks like that\r
-#\r
-try:\r
- b.result\r
-except ValueError:\r
- pass\r
-\r
-#\r
-# only when you access the result you get the exception, which may look weird, but hey,\r
-# it's an asynchronous world out there.\r
-#\r
-\r
-#\r
-# there's another methodology for async proxies -- on_ready callbacks. instead of \r
-# getting the async result, you can register a callback to collect it, when it arrives.\r
-#\r
-def f(res):\r
- print "the result is",\r
- try:\r
- print res.result\r
- except:\r
- print "an exception"\r
-\r
-rint = Async(c.modules.__builtin__.int)\r
-\r
-ar = rint("123")\r
-ar.on_ready = f\r
-\r
-# this will cause an exception\r
-ar = rint("a perfect circle")\r
-ar.on_ready = f\r
-\r
-# or when you dont need to keep the async result \r
-rint("456").on_ready = f\r
-\r
-# and it's not limited to calling it. anything you do to the async proxy is asynchronous.\r
-# for example, you can also get attributes asynchronously:\r
-ar = rint.__str__\r
-\r
-#\r
-# now we'll do some other request, which will cause the results to arrive, and the callback \r
-# to be called. \r
-#\r
-print c.modules.sys\r
-\r
-############################################################################################\r
-#\r
-# this is where we get hardcore: threads and event callbacks\r
-#\r
-xxx = 0\r
-def blah():\r
- global xxx\r
- xxx += 1\r
-\r
-#\r
-# we'll start a thread on the server which on threadfunc (which is defined in the testmodule).\r
-# this function will call the callback we give it every second, but will ignore the result.\r
-# this practically means it's like an event -- trigger and forget. on the client side, the\r
-# callback will increment `xxx` every time it's called\r
-#\r
-c.modules.thread.start_new_thread(c.modules.Rpyc.Demo.testmodule.threadfunc, (blah,))\r
-\r
-#\r
-# we'll wait a little\r
-#\r
-import time\r
-time.sleep(5)\r
-\r
-#\r
-# and do some operation, which, along with it, will pull all incoming requests\r
-#\r
-print c.modules.sys\r
-print xxx\r
-\r
-#\r
-# and we can start a thread of our own to pull the requests in the background\r
-#\r
-import thread\r
-worker_running = True\r
-\r
-def worker(conn):\r
- while worker_running:\r
- conn.serve()\r
-\r
-thread.start_new_thread(worker, (c,))\r
-\r
-time.sleep(5)\r
-worker_running = False\r
-\r
-print xxx\r
-print "goodbye"\r
-\r
-#\r
-# L33TN3SS\r
-#\r
-\r
-\r
+++ /dev/null
-import time\r
-from Rpyc.Factories import SocketConnection, Async\r
-\r
-c = SocketConnection("localhost")\r
-c2 = SocketConnection("localhost")\r
-\r
-huge_xml = "<blah a='5' b='6'> " * 50000 + " </blah> " * 50000\r
-parseString = Async(c.modules.xml.dom.minidom.parseString)\r
-res = parseString(huge_xml)\r
-\r
-print "while we're waiting for the server to complete, we do other stuff"\r
-t = time.time()\r
-while not res.is_ready:\r
- time.sleep(0.5)\r
- # we dont want to use `c`, because it would block us (as the server is blocking)\r
- # but `c2` runs on another thread/process, so it wouldn't block\r
- print c2.modules.os.getpid()\r
-\r
-t = time.time() - t\r
-print "it took %d seconds" % (t,)\r
-\r
-print res.result\r
-\r
-\r
-#\r
-# note: to improve performance, delete the result when you no longer need it.\r
-# this should be done because the server might (as in this case) hold enormous \r
-# amounts of memory, which will slow it down\r
-#\r
-# if you do this:\r
-# res = parseString(huge_xml)\r
-# res = parseString(huge_xml)\r
-# res will be deleted only after the second operation finishes, because only when\r
-# the second result is assigned, the first is released -- server still holds \r
-# around 160MB of the old xml tree for nothing. so it's a good idea to `del res` \r
-# when you dont need it.\r
-#\r
-# also, there's a memory leak on the server, which i'm working on solving.\r
-#\r
-\r
-\r
+++ /dev/null
-#\r
-# this demo will show you working with asynch proxies and callback\r
-# verison 2.3 removes the AsyncCallback factory, and instead provides a mechanism\r
-# where async results can provide a callback. it simplifies the design, so i\r
-# went for it.\r
-#\r
-import time\r
-from Rpyc.Factories import SocketConnection, Async\r
-\r
-c1 = SocketConnection("localhost")\r
-\r
-# f1 is an async proxy to the server's sleep function\r
-f1 = Async(c1.modules.time.sleep)\r
-\r
-# this would block the server for 9 seconds\r
-r1 = f1(9)\r
-# and this would block it for 11\r
-r2 = f1(11)\r
-\r
-# of course the client isnt affected (that's the whole point of Async)\r
-# but since the same server can't block simultaneously, the second request is\r
-# queued. this is a good example of queuing.\r
-\r
-# now we'll wait for both results to finish. this should print around 20 lines\r
-# (more or less, depending on the phase)\r
-while not r1.is_ready or not r2.is_ready:\r
- print "!"\r
- time.sleep(1)\r
-\r
-print "---"\r
-\r
-# now we'll dig in the h4xx0r shit -- running things simultaneously\r
-# for this, we'll need another connection, and another proxy:\r
-c2 = SocketConnection("localhost")\r
-f2 = Async(c2.modules.time.sleep)\r
-\r
-# now we'll do the same as the above, but this time, it will happen simulatenously\r
-# becuase f1 and f2 work on different connections\r
-r1 = f1(9)\r
-r2 = f2(11)\r
-\r
-# so this time, it will print around 11 lines\r
-while not r1.is_ready or not r2.is_ready:\r
- print "!"\r
- time.sleep(1)\r
-\r
-print "---"\r
-\r
-# very haxxor indeed. now, we'll see how to use the on_ready callback\r
-r1 = f1(9)\r
-r2 = f2(11)\r
-\r
-def blah(res):\r
- print "look mama, no hands! res = %r" % (res.result,)\r
-\r
-# set the on_ready callback -- when r1 is becomes ready, the callback will\r
-# be called automagically\r
-r1.on_ready = blah \r
-\r
-# this should print 9 "!", then "look mama", then two more "!"\r
-while not r1.is_ready or not r2.is_ready:\r
- print "!"\r
- time.sleep(1)\r
-\r
-\r
-\r
+++ /dev/null
-# as you can see - the import line now requires even less typing!\r
-from Rpyc import *\r
-c = SocketConnection("localhost")\r
-\r
-#------------------------------------------------------------------------------ \r
-# this demo shows the new `execute` and `namespace` features of rpyc\r
-#------------------------------------------------------------------------------ \r
-\r
-\r
-# the code below will run AT THE OTHER SIDE OF THE CONNECTION... so you'll see\r
-# 'hello world' on the server's console\r
-c.execute("print 'hello world'")\r
-\r
-import sys\r
-c.modules.sys.stdout = sys.stdout\r
-\r
-# and this time, on our console\r
-c.execute("print 'brave new world'")\r
-\r
-# restore that\r
-c.modules.sys.stdout = c.modules.sys.__stdout__\r
-\r
-# anyway, the `execute` method runs the given code at the other side of the connection\r
-# and works in the `namespace` dict. what?\r
-c.execute("x = [1,2,3]")\r
-print c.namespace.x\r
-\r
-# now it makes sense, doesn't it? the 'namespace' attribute is something i called \r
-# AttrFrontend -- it wraps a dict with the attribute protocol, so you can access\r
-# it with the dot notation, instead of the braces notation (more intuitive).\r
-# this namespace works both ways -- executing code affects the namespace, while\r
-# altering the namespace directly also affects it:\r
-c.namespace.x.append(4)\r
-c.execute("x.append(5)")\r
-print c.namespace.x\r
-\r
-# but you should not assign complex objects (not int/float/str, etc) to this namespace\r
-# directy, or NetProxies will be created. there's nothing wrong with that, but keep\r
-# in mind it might cause blocking (and even deadlocks), as i'll explain later.\r
-\r
-# another cool thing i want to show is the second, optional parameter to execute: mode.\r
-# the mode controls how the code is compiled. the default mode is "exec", which means \r
-# it executes the code as a module. the other option is "eval" which returns a value.\r
-# so if you want to _do_ something, like printing of assigning a variable, you do it \r
-# with "exec", and if you want to evaluate something, you do it with "eval"\r
-# for example:\r
-\r
-# this will print None\r
-print c.execute("1+2")\r
-\r
-# while this will print 3\r
-print c.execute("1+2", "eval")\r
-\r
-# but there's a time in a man's life when he asks himself, why the heck? you can, as i \r
-# showed in other places, just do this:\r
-# c.modules.__builtin__.eval("1+2")\r
-# so what's the point? \r
-#\r
-# well, i've been waiting for this question. the rationale behind this seemingly useless \r
-# feature is for times you NEED to have the code executing remotely, but writing a \r
-# dedicated module for it is overdoing it:\r
-# * more files to update ==> more chance that you'll forget to update\r
-# * distributing the module to all of the machines\r
-# * making a mess on the file system\r
-# * it's really not a module... it's just some code that logically belongs to one single \r
-# module, but technical difficulties prevent it\r
-#\r
-# and to show you what i mean -- i want to start a thread on the server, like it did in \r
-# several places over the demos. this thread will send me an event every second. what i \r
-# used to do was, creating another module, like testmodule.py to define the thread \r
-# function, so it will exist on the server, and i could call it.\r
-# if i defined thread_func at the client side, then the thread will block when trying \r
-# to execute the code, because the client holds it. so this new mechanism lets you \r
-# distribute code in a volatile fashion:\r
-# * when the connection is closed, everything you defined is gone\r
-# * no file-system mess\r
-# * no need to distribute files across the network\r
-# * only one place to maintain\r
-\r
-c.execute("""\r
-my_thread_active = True\r
-\r
-def my_thread_func(callback):\r
- import time\r
- from Rpyc import Async\r
-\r
- callback = Async(callback)\r
- while my_thread_active:\r
- callback(time.time())\r
- time.sleep(1)\r
- print "the thread says goodbye"\r
-""")\r
-\r
-def callback(timestamp):\r
- print "the timestamp is", timestamp\r
-\r
-c.modules.thread.start_new_thread(c.namespace.my_thread_func, (callback,))\r
-c.modules.time.sleep(5)\r
-c.namespace.my_thread_active = False\r
-c.close()\r
-\r
-# it's not only for threads of course. there are many times when you NEED the code/objects \r
-# on the remote side. for example:\r
-# * situations that would block (like having the thread func on the client)\r
-# * code that check the type of the object (type or isinstance), and a NetProxy would make\r
-# it cry. DONT CHECK THE TYPE OF OBJECTS, PEOPLE, JUST USE THEM! that's why they invented \r
-# duck-typing. argh.\r
-# * other places i didnt think of as of yet. i want to sleep. leave me alone ;) zzzZZZ\r
-#\r
-# so enjoy!\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
+++ /dev/null
-import sys\r
-from Rpyc import PipeConnection\r
-\r
-c = PipeConnection(sys.stdin, sys.stdout)\r
-c.modules.sys.path.append("i love lucy")\r
-\r
-\r
-# child dies
\ No newline at end of file
+++ /dev/null
-# a demo for parent/child over pipes
-
-import sys
-from popen2 import popen3
-from Rpyc import PipeConnection
-
-cout, cin, cerr = popen3("python pipe-child.py")
-conn = PipeConnection(cout, cin)
-
-try:
- while True:
- conn.serve()
-except EOFError:
- print "goodbye child"
-
-print sys.path[-1]
-
+++ /dev/null
-import time\r
-from Rpyc.Factories import Async\r
-\r
-def threadfunc(callback):\r
- """this function will call the callback every second"""\r
- callback = Async(callback)\r
- try:\r
- while True:\r
- callback()\r
- time.sleep(1)\r
- except:\r
- print "thread exiting"\r
-\r
-def printer(text):\r
- print text\r
-\r
-def caller(func, *args):\r
- func(*args)\r
-
\ No newline at end of file
+++ /dev/null
-python demo-1.py\r
-python demo-2.py\r
-python demo-3.py\r
-python demo-4.py\r
-python demo-5.py\r
-python demo-6.py\r
+++ /dev/null
-"""\r
-the factory: \r
-exposes a nice and easy interface to the internals of rpyc. \r
-this module, along with Utils, are the only modules most clients will need.\r
-"""\r
-\r
-from Stream import SocketStream, PipeStream\r
-from Channel import Channel\r
-from Connection import Connection\r
-from AsyncNetProxy import AsyncNetProxy\r
-from weakref import WeakValueDictionary\r
-from Lib import DEFAULT_PORT\r
-\r
-\r
-__all__ = ["SocketConnection", "AuthSocketConnection", "PipeConnection", "Async"]\r
-_async_proxy_cache = WeakValueDictionary()\r
-\r
-class LoginError(Exception):\r
- pass\r
-\r
-def SocketConnection(host, port = DEFAULT_PORT, **kw):\r
- """shorthand for creating a conneciton over a socket to a server"""\r
- return Connection(Channel(SocketStream.from_new_socket(host, port, **kw)))\r
-\r
-def _create_auth_connection(chan, username, password):\r
- from Authentication import login\r
- if not login(chan, username, password):\r
- raise LoginError("the server did not accept the login")\r
- return Connection(chan)\r
- \r
-def AuthSocketConnection(host, username, password, port = DEFAULT_PORT, **kw):\r
- """shorthand for creating a conneciton over a socket to a server, with authentication"""\r
- chan = Channel(SocketStream.from_new_socket(host, port, **kw))\r
- return _create_auth_connection(chan, username, password)\r
-\r
-def PipeConnection(incoming, outgoing):\r
- """shorthand for creating a conneciton over a pipe"""\r
- return Connection(Channel(PipeStream(incoming, outgoing)))\r
-\r
-def AuthPipeConnection(incoming, outgoing, username, password):\r
- """shorthand for creating a conneciton over a pipe"""\r
- chan = Channel(PipeStream(incoming, outgoing))\r
- return _create_auth_connection(chan, username, password)\r
-\r
-def Async(proxy):\r
- """a factory for creating asynchronous proxies for existing synchronous ones"""\r
- key = id(proxy)\r
- if key in _async_proxy_cache:\r
- return _async_proxy_cache[key]\r
- else:\r
- new_proxy = AsyncNetProxy(proxy)\r
- _async_proxy_cache[key] = new_proxy\r
- return new_proxy\r
-\r
-\r
-\r
-\r
+++ /dev/null
-"""\r
-shared types, functions and constants\r
-"""\r
-import sys\r
-\r
-\r
-DEFAULT_PORT = 18812\r
-\r
-def raise_exception(typ, val, tbtext):\r
- """a helper for raising remote exceptions"""\r
- if type(typ) == str:\r
- raise typ\r
- else:\r
- val._remote_traceback = tbtext\r
- raise val\r
-\r
-class ImmDict(object):\r
- """immutable dict (passes by value)"""\r
- def __init__(self, dict):\r
- self.dict = dict\r
- def items(self):\r
- return self.dict.items()\r
-\r
-class AttrFrontend(object):\r
- """a wrapper that implements the attribute protocol for a dict backend"""\r
- def __init__(self, dict):\r
- self.__dict__["____dict"] = dict\r
- def __delattr__(self, name):\r
- del self.__dict__["____dict"][name]\r
- def __getattr__(self, name):\r
- return self.__dict__["____dict"][name]\r
- def __setattr__(self, name, value):\r
- self.__dict__["____dict"][name] = value\r
- def __repr__(self):\r
- return "<AttrFrontend %s>" % (self.__dict__["____dict"].keys(),)\r
-\r
-# installs an rpyc-enabled exception hook. this happens automatically when the module\r
-# is imported. also, make sure the current excepthook is the original one, so we dont \r
-# install our hook twice (and thus cause infinite recursion) in case the module is reloaded \r
-def rpyc_excepthook(exctype, value, traceback):\r
- if hasattr(value, "_remote_traceback"):\r
- print >> sys.stderr, "======= Remote traceback ======="\r
- print >> sys.stderr, value._remote_traceback\r
- print >> sys.stderr, "======= Local exception ======="\r
- orig_excepthook(exctype, value, traceback)\r
- else:\r
- orig_excepthook(exctype, value, traceback)\r
-\r
-if sys.excepthook.__name__ != "rpyc_excepthook": \r
- orig_excepthook = sys.excepthook\r
- sys.excepthook = rpyc_excepthook\r
-\r
-\r
+++ /dev/null
-from NetProxy import NetProxyWrapper\r
-\r
-\r
-class ModuleNetProxy(NetProxyWrapper):\r
- """a netproxy specialzied for exposing remote modules (first tries to getattr,\r
- if it fails tries to import)"""\r
- __doc__ = NetProxyWrapper.__doc__\r
- \r
- def __init__(self, proxy, base):\r
- NetProxyWrapper.__init__(self, proxy)\r
- self.__dict__["____base"] = base\r
- self.__dict__["____cache"] = {}\r
-\r
- def __request__(self, handler, *args):\r
- return self.__dict__["____conn"].sync_request(handler, self.__dict__["____oid"], *args)\r
-\r
- def __getattr__(self, name):\r
- if name in self.__dict__["____cache"]:\r
- return self.__dict__["____cache"][name]\r
-\r
- try:\r
- return self.__request__("handle_getattr", name)\r
- except AttributeError:\r
- pass\r
- \r
- try:\r
- fullname = self.__dict__["____base"] + "." + name\r
- obj = self.__dict__["____conn"].rimport(fullname)\r
- module = ModuleNetProxy(obj, fullname)\r
- self.__dict__["____cache"][name] = module\r
- return module\r
- except ImportError:\r
- raise AttributeError("'module' object has not attribute or submodule %r" % (name,))\r
-\r
-class RootImporter(object):\r
- """the root of the interpreter's import hierarchy"""\r
- \r
- def __init__(self, conn):\r
- self.__dict__["____conn"] = conn\r
- \r
- def __getitem__(self, name):\r
- return self.__dict__["____conn"].rimport(name)\r
-\r
- def __getattr__(self, name):\r
- return ModuleNetProxy(self[name], name)\r
-\r
+++ /dev/null
-from Lib import ImmDict\r
-\r
-\r
-def fully_dynamic_metaclass(name, bases, dict):\r
- """\r
- a meta class that enables special methods to be accessed like regular names \r
- (via __getattr__), like it used to be in old-style classes.\r
- """\r
-\r
- special_methods = [\r
- "__hash__", "__len__", "__iter__", "next", "__reversed__",\r
- "__add__", "__iadd__", "__radd__", "__sub__", "__isub__", "__rsub__", "__mul__",\r
- "__imul__", "__rmul__", "__div__", "__idiv__", "__rdiv__", "__truediv__", \r
- "__itruediv__", "__rtruediv__", "__floordiv__", "__ifloordiv__", "__rfloorfiv__", \r
- "__pow__", "__ipow__", "__rpow__", "__lshift__", "__ilshift__", "__rlshift__",\r
- "__rshift__", "__irshift__", "__rrshift__", "__and__", "__iand__", "__rand__",\r
- "__or__", "__ior__", "__ror__", "__xor__", "__ixor__", "__rxor__", "__mod__", \r
- "__imod__", "__rmod__", "__divmod__", "__idivmod__", "__rdivmod__", "__pos__", \r
- "__neg__", "__int__", "__float__", "__long__", "__oct__", "__hex__", "__coerce__",\r
- "__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__", "__cmp__",\r
- ]\r
- \r
- # i added '__class__' to the special attributes, but it broke some things \r
- # (like dir), so we'll have to live without it\r
- special_attributes = ["__doc__", "__module__"] \r
-\r
- def make_method(name):\r
- def caller(self, *a, **k):\r
- return self.__getattr__(name)(*a, **k)\r
- return caller\r
-\r
- def make_property(name):\r
- def getter(self):\r
- return self.__getattr__(name)\r
- def setter(self, value):\r
- self.__setattr__(name, value)\r
- def deller(self):\r
- self.__delattr__(name)\r
- return property(getter, setter, deller)\r
-\r
- classdict = {}\r
- for sn in special_methods:\r
- classdict[sn] = make_method(sn)\r
- classdict.update(dict)\r
- for sa in special_attributes:\r
- classdict[sa] = make_property(sa)\r
- return type(name, bases, classdict)\r
-\r
-class NetProxy(object):\r
- """NetProxy - convey local operations to the remote object. this is an abstract class"""\r
- __metaclass__ = fully_dynamic_metaclass\r
- \r
- def __init__(self, conn, oid):\r
- self.__dict__["____conn"] = conn\r
- self.__dict__["____oid"] = oid\r
-\r
- def __request__(self, handler, *args):\r
- raise NotImplementedError()\r
-\r
- def __call__(self, *args, **kwargs):\r
- return self.__request__("handle_call", args, ImmDict(kwargs))\r
-\r
- def __delattr__(self, *args):\r
- return self.__request__("handle_delattr", *args)\r
- def __getattr__(self, *args):\r
- return self.__request__("handle_getattr", *args)\r
- def __setattr__(self, *args):\r
- return self.__request__("handle_setattr", *args)\r
- \r
- def __delitem__(self, *args):\r
- return self.__request__("handle_delitem", *args)\r
- def __getitem__(self, *args):\r
- return self.__request__("handle_getitem", *args)\r
- def __setitem__(self, *args):\r
- return self.__request__("handle_setitem", *args)\r
- \r
- # special cases\r
- def __repr__(self, *args):\r
- return self.__request__("handle_repr", *args)\r
- def __str__(self, *args):\r
- return self.__request__("handle_str", *args)\r
- def __nonzero__(self, *args):\r
- return self.__request__("handle_bool", *args)\r
-\r
-class NetProxyWrapper(NetProxy):\r
- """a netproxy that wraps an inner netproxy"""\r
- __doc__ = NetProxy.__doc__\r
-\r
- def __init__(self, proxy):\r
- NetProxy.__init__(self, proxy.__dict__["____conn"], proxy.__dict__["____oid"])\r
- # we must keep the original proxy alive\r
- self.__dict__["____original_proxy"] = proxy \r
-\r
-def _dummy_callback(*args, **kw):\r
- pass\r
-\r
-class SyncNetProxy(NetProxy):\r
- """the default, synchronous netproxy"""\r
- __doc__ = NetProxy.__doc__\r
-\r
- def __init__(self, conn, oid):\r
- NetProxy.__init__(self, conn, oid)\r
- self.__dict__["____conn"].sync_request("handle_incref", self.__dict__["____oid"])\r
-\r
- def __del__(self):\r
- try:\r
- # decref'ing is done asynchronously, because we dont need to wait for the remote \r
- # object to die. moreover, we dont care if it fails, because that would mean the \r
- # connection is broken, so the remote object is already dead\r
- self.__dict__["____conn"].async_request(_dummy_callback, "handle_decref", self.__dict__["____oid"])\r
- except:\r
- pass\r
- \r
- def __request__(self, handler, *args):\r
- return self.__dict__["____conn"].sync_request(handler, self.__dict__["____oid"], *args)\r
-\r
-\r
-\r
-\r
+++ /dev/null
-import os\r
-import socket\r
-import sys\r
-import gc\r
-from threading import Thread\r
-from Rpyc.Connection import Connection\r
-from Rpyc.Stream import SocketStream, PipeStream\r
-from Rpyc.Channel import Channel\r
-from Rpyc.Lib import DEFAULT_PORT\r
-\r
-\r
-class Logger(object):\r
- def __init__(self, logfile = None, active = True):\r
- self.logfile = logfile\r
- self.active = active\r
- def __call__(self, *args):\r
- if not self.logfile:\r
- return\r
- if not self.active:\r
- return\r
- text = " ".join([str(a) for a in args])\r
- self.logfile.write("[%d] %s\n" % (os.getpid(), text))\r
- self.logfile.flush()\r
- \r
-log = Logger(sys.stdout)\r
-\r
-def _serve(chan):\r
- conn = Connection(chan)\r
- try:\r
- try:\r
- while True:\r
- conn.serve()\r
- except EOFError:\r
- pass\r
- finally:\r
- conn.close()\r
- gc.collect()\r
-\r
-def serve_stream(stream, authenticate = False, users = None):\r
- chan = Channel(stream)\r
- \r
- if authenticate:\r
- from Rpyc.Authentication import accept\r
- log("requiring authentication")\r
- if accept(chan, users):\r
- log("authenication successful")\r
- else:\r
- log("authentication failed")\r
- return\r
- \r
- _serve(chan)\r
-\r
-def create_listener_socket(port):\r
- sock = socket.socket()\r
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r
- #sock.bind(("", port))\r
- sock.bind(("localhost", port))\r
- sock.listen(4)\r
- log("listening on", sock.getsockname())\r
- return sock\r
-\r
-def serve_socket(sock, **kw):\r
- sockname = sock.getpeername()\r
- log("welcome", sockname)\r
- try:\r
- try:\r
- serve_stream(SocketStream(sock), **kw)\r
- except socket.error:\r
- pass\r
- finally:\r
- log("goodbye", sockname)\r
-\r
-def serve_pipes(incoming, outgoing, **kw):\r
- serve_stream(PipeStream(incoming, outgoing), **kw)\r
-\r
-def threaded_server(port = DEFAULT_PORT, **kwargs):\r
- sock = create_listener_socket(port)\r
- while True:\r
- newsock, name = sock.accept()\r
- t = Thread(target = serve_socket, args = (newsock,), kwargs = kwargs)\r
- t.setDaemon(True)\r
- t.start()\r
-\r
-def start_threaded_server(*args, **kwargs):\r
- """starts the threaded_server on a separate thread. this turns the \r
- threaded_server into a mix-in you can place anywhere in your code"""\r
- t = Thread(target = threaded_server, args = args, kwargs = kwargs)\r
- t.setDaemon(True)\r
- t.start()\r
-\r
+++ /dev/null
-from ServerUtils import DEFAULT_PORT, threaded_server\r
-\r
-#\r
-# define user:password pairs of your own\r
-#\r
-users = {\r
- "johnnash" : "t3hgam3r",\r
- "tolkien" : "1ring",\r
- "yossarian" : "catch22",\r
-}\r
-\r
-def main(port = DEFAULT_PORT):\r
- threaded_server(port, authenticate = True, users = users)\r
-\r
-if __name__ == "__main__":\r
- main()\r
-\r
-\r
-\r
+++ /dev/null
-import sys\r
-import os\r
-from ServerUtils import serve_socket, create_listener_socket, DEFAULT_PORT\r
-\r
-\r
-def serve_in_child(sock):\r
- """forks a child to run the server in. the parent doesnt wait() for the child, \r
- so if you do a `ps`, you'll see zombies. but who cares. i used to do a doublefork()\r
- for that, but it's really meaningless. anyway, when the parent dies, the zombies\r
- die as well."""\r
- if os.fork() == 0:\r
- try:\r
- serve_socket(sock)\r
- finally:\r
- sys.exit()\r
-\r
-def main(port = DEFAULT_PORT):\r
- sock = create_listener_socket(port)\r
- while True:\r
- newsock, name = sock.accept()\r
- serve_in_child(newsock)\r
-\r
-if __name__ == "__main__":\r
- main()\r
-\r
-
\ No newline at end of file
+++ /dev/null
-import select\r
-import socket\r
-from ServerUtils import log, create_listener_socket, DEFAULT_PORT, SocketStream, Connection\r
-\r
-\r
-def main(port = DEFAULT_PORT):\r
- sock = create_listener_socket(port)\r
- connections = []\r
- \r
- while True:\r
- rlist, wlist, xlist = select.select([sock] + connections, [], [])\r
- \r
- if sock in rlist:\r
- rlist.remove(sock)\r
- newsock, name = sock.accept()\r
- conn = Connection(SocketStream(newsock))\r
- conn.sockname = name\r
- connections.append(conn)\r
- log("welcome", conn.sockname)\r
- \r
- for conn in rlist:\r
- try:\r
- conn.serve()\r
- except (EOFError, socket.error):\r
- connections.remove(conn)\r
- log("goodbyte", conn.sockname)\r
-\r
-if __name__ == "__main__":\r
- main()\r
+++ /dev/null
-from ServerUtils import serve_socket, create_listener_socket, DEFAULT_PORT\r
-\r
-\r
-def main(port = DEFAULT_PORT):\r
- sock = create_listener_socket(port)\r
- while True:\r
- newsock, name = sock.accept()\r
- serve_socket(newsock)\r
-\r
-if __name__ == "__main__":\r
- main()\r
-\r
-
\ No newline at end of file
+++ /dev/null
-#!/usr/bin/env python
-#
-# installation instructions
-# * add a service in /etc/services for rpyc: tcp port 18812
-# * add "rpyc .... /usr/lib/pythonXX/site-packages/Rpyc/Servers/std_server.py"
-# to /etc/inetd.conf (i dont remember syntax, rtfm)
-# * dont forget to chmod +x this file
-# * restart inetd with sighup
-#
-import sys
-import time
-from traceback import format_exception
-from ServerUtils import log, serve_pipes
-
-
-def main(filename = "/tmp/rpyc-server.log"):
- log.logfile = open(filename, "a")
- log("-" * 80)
- log("started serving at", time.asctime())
- try:
- try:
- serve_pipes(sys.stdin, sys.stdout)
- except:
- log(*format_exception(*sys.exc_info()))
- finally:
- log("server exits at", time.asctime())
-
-if __name__ == "__main__":
- main()
-
-
\ No newline at end of file
+++ /dev/null
-from ServerUtils import DEFAULT_PORT, threaded_server\r
-\r
-\r
-def main(port = DEFAULT_PORT):\r
- threaded_server(port)\r
-\r
-if __name__ == "__main__":\r
- main()\r
-\r
+++ /dev/null
-import select\r
-import socket\r
-\r
-\r
-class Stream(object):\r
- """\r
- a stream is a file-like object that is used to expose a consistent and uniform interface\r
- to the 'physical' file-like objects (like sockets and pipes), which have many quirks (sockets\r
- may recv() less than `count`, pipes are simplex and don't flush, etc.).\r
- a stream is always in blocking mode.\r
- """\r
- \r
- def close(self):\r
- raise NotImplementedError()\r
-\r
- def fileno(self):\r
- raise NotImplementedError()\r
-\r
- def is_available(self):\r
- rlist, wlist, xlist = select.select([self], [], [], 0)\r
- return bool(rlist)\r
-\r
- def wait(self):\r
- select.select([self], [], [])\r
-\r
- def read(self, count):\r
- raise NotImplementedError()\r
-\r
- def write(self, data):\r
- raise NotImplementedError()\r
- \r
- \r
-class SocketStream(Stream):\r
- """\r
- a stream that operates over a socket. note: \r
- * the socket is expected to be reliable (i.e., TCP)\r
- * the socket is expected to be in blocking mode\r
- """\r
- def __init__(self, sock):\r
- self.sock = sock\r
- \r
- def __repr__(self):\r
- host, port = self.sock.getpeername()\r
- return "<%s(%s:%d)>" % (self.__class__.__name__, host, port)\r
-\r
- def from_new_socket(cls, host, port, **kw):\r
- sock = socket.socket(**kw)\r
- sock.connect((host, port))\r
- return cls(sock)\r
- from_new_socket = classmethod( from_new_socket )\r
-\r
- def fileno(self):\r
- return self.sock.fileno()\r
- \r
- def close(self):\r
- self.sock.close()\r
- \r
- def read(self, count):\r
- data = []\r
- while count > 0:\r
- buf = self.sock.recv(count)\r
- if not buf:\r
- raise EOFError()\r
- count -= len(buf)\r
- data.append(buf)\r
- return "".join(data)\r
- \r
- def write(self, data):\r
- while data:\r
- count = self.sock.send(data)\r
- data = data[count:]\r
-\r
-\r
-class PipeStream(Stream):\r
- """\r
- a stream that operates over two simplex pipes. \r
- note: the pipes are expected to be in blocking mode\r
- """\r
- \r
- def __init__(self, incoming, outgoing):\r
- self.incoming = incoming\r
- self.outgoing = outgoing\r
-\r
- def fileno(self):\r
- return self.incoming.fileno()\r
- \r
- def close(self):\r
- self.incoming.close()\r
- self.outgoing.close()\r
- \r
- def read(self, count):\r
- data = []\r
- while count > 0:\r
- buf = self.incoming.read(count)\r
- if not buf:\r
- raise EOFError()\r
- count -= len(buf)\r
- data.append(buf)\r
- return "".join(data)\r
- \r
- def write(self, data):\r
- self.outgoing.write(data)\r
- self.outgoing.flush()\r
-\r
- # win32: stubs\r
- import sys\r
- if sys.platform == "win32":\r
- def is_available(self):\r
- return True\r
-\r
- def wait(self):\r
- pass\r
-\r
-\r
-\r
-\r
-\r
+++ /dev/null
-"""\r
-convenience utilities:\r
- * provides dir(), getattr(), hasattr(), help() and reload() that support netproxies\r
- * provides obtain() for really transfering remote objects\r
- * provides upload() and download() for working with files\r
- * provides a nice interface for remote shell operations (like os.system)\r
- and a openning a remote python interpreter (for debugging, etc.)\r
-\r
-i removed lots of stuff from the __all__, keeping only the useful things, \r
-so that import * wouldnt mess your namespace too much\r
-"""\r
-import __builtin__\r
-import sys\r
-import os\r
-import inspect\r
-from NetProxy import NetProxy\r
-\r
-__all__ = [\r
- "dir", "getattr", "hasattr", "help", "reload", "obtain",\r
- "upload", "download",\r
-]\r
-\r
-CHUNK_SIZE = 4096\r
-\r
-class UtilityError(Exception): \r
- pass\r
-\r
-#\r
-# working with netproxies\r
-#\r
-def dir(*obj):\r
- """a version of dir() that supports NetProxies"""\r
- if not obj:\r
- inspect_stack = [inspect.stack()[1][0].f_locals.keys()]\r
- inspect_stack.sort()\r
- return inspect_stack\r
- if not len(obj) == 1:\r
- raise TypeError("dir expected at most 1 arguments, got %d" % (len(obj),))\r
- obj = obj[0]\r
- if isinstance(obj, NetProxy):\r
- return obj.__dict__["____conn"].modules["__builtin__"].dir(obj)\r
- else:\r
- return __builtin__.dir(obj)\r
-\r
-def getattr(obj, name, *default):\r
- """a version of getattr() that supports NetProxies"""\r
- if len(default) > 1:\r
- raise TypeError("getattr expected at most 3 arguments, got %d" % (2 + len(default),))\r
- if isinstance(obj, NetProxy):\r
- try:\r
- return obj.__getattr__(name)\r
- except AttributeError:\r
- if not default:\r
- raise\r
- return default[0]\r
- else:\r
- return __builtin__.getattr(obj, name, *default)\r
-\r
-def hasattr(obj, name):\r
- """a version of hasattr() that supports NetProxies"""\r
- try:\r
- getattr(obj, name)\r
- except AttributeError:\r
- return False\r
- else:\r
- return True\r
-\r
-class _Helper(object):\r
- """a version of help() that supports NetProxies"""\r
- def __repr__(self):\r
- return repr(__builtin__.help)\r
- def __call__(self, obj = None):\r
- if isinstance(obj, NetProxy):\r
- print "Help on NetProxy object for an instance of %r:" % (obj.__getattr__("__class__").__name__,)\r
- print\r
- print "Doc:"\r
- print obj.__doc__\r
- print\r
- print "Members:"\r
- print dir(obj)\r
- else:\r
- __builtin__.help(obj)\r
-help = _Helper()\r
-\r
-def reload(module):\r
- """a version of reload() that supports NetProxies"""\r
- if isinstance(module, NetProxy):\r
- return module.__dict__["____conn"].modules["__builtin__"].reload(module)\r
- else:\r
- return __builtin__.reload(module)\r
-\r
-def obtain(proxy):\r
- """transfers a remote object to this process. this is done by pickling it, so it\r
- must be a picklable object, and should be immutable (otherwise the local object\r
- may be different from the remote one, which may cause problems). generally speaking, \r
- you should not obtain remote objects, as NetProxies provide a stronger mechanism.\r
- but there are times when you want to get the real object in your hand, for pickling\r
- it locally (e.g., storing test results in a file), or if the connection is too slow."""\r
- import cPickle\r
- dumped = proxy.__dict__["____conn"].modules.cPickle.dumps(proxy)\r
- return cPickle.loads(dumped)\r
-\r
-def getconn(obj):\r
- """returns the connection of a NetProxy"""\r
- if "____conn" not in obj.__dict__:\r
- raise TypeError("`obj` is not a NetProxy")\r
- return proxy.__dict__["____conn"]\r
-\r
-#\r
-# working with files\r
-#\r
-def upload(conn, localpath, remotepath, *a, **k):\r
- """uploads a file or a directory recursively (depending on what `localpath` is)"""\r
- if os.path.isdir(localpath):\r
- upload_dir(conn, localpath, remotepath, *a, **k)\r
- elif os.path.isfile(localpath):\r
- upload_file(conn, localpath, remotepath, *a, **k)\r
- else:\r
- raise UtilityError("can only upload files or directories")\r
-\r
-def download(conn, remotepath, localpath, *a, **k):\r
- """downloads a file or a directory recursively (depending on what `remotepath` is)"""\r
- if conn.modules.os.path.isdir(remotepath):\r
- download_dir(conn, remotepath, localpath, *a, **k)\r
- elif conn.modules.os.path.isfile(remotepath):\r
- download_file(conn, remotepath, localpath, *a, **k)\r
- else:\r
- raise UtilityError("can only download files or directories")\r
-\r
-def upload_file(conn, localpath, remotepath):\r
- lf = open(localpath, "rb")\r
- rf = conn.modules.__builtin__.open(remotepath, "wb")\r
- while True:\r
- chunk = lf.read(CHUNK_SIZE)\r
- if not chunk:\r
- break\r
- rf.write(chunk)\r
- lf.close()\r
- rf.close()\r
-\r
-def download_file(conn, remotepath, localpath):\r
- lf = open(localpath, "wb")\r
- rf = conn.modules.__builtin__.open(remotepath, "rb")\r
- while True:\r
- chunk = rf.read(CHUNK_SIZE)\r
- if not chunk:\r
- break\r
- lf.write(chunk)\r
- lf.close()\r
- rf.close()\r
- \r
-def upload_dir(conn, localpath, remotepath, extensions = [""]):\r
- if not conn.modules.os.path.exists(remotepath):\r
- conn.modules.os.makedirs(remotepath)\r
- for fn in os.listdir(localpath):\r
- lfn = os.path.join(localpath, fn)\r
- rfn = conn.modules.os.path.join(remotepath, fn)\r
- if os.path.isdir(lfn):\r
- upload_dir(conn, lfn, rfn, extensions)\r
- elif os.path.isfile(lfn):\r
- for ext in extensions:\r
- if fn.endswith(ext):\r
- upload_file(conn, lfn, rfn)\r
- break\r
-\r
-def download_dir(conn, remotepath, localpath, extensions = [""]):\r
- if not os.path.exists(localpath):\r
- os.makedirs(localpath)\r
- for fn in conn.modules.os.listdir(remotepath):\r
- lfn = os.path.join(localpath, fn)\r
- rfn = conn.modules.os.path.join(remotepath, fn)\r
- if conn.modules.os.path.isdir(lfn):\r
- download_dir(conn, rfn, lfn, extensions)\r
- elif conn.modules.os.path.isfile(lfn):\r
- for ext in extensions:\r
- if fn.endswith(ext):\r
- download_file(conn, rfn, lfn)\r
- break\r
-\r
-#\r
-# distributing modules between hosts\r
-#\r
-def upload_package(conn, module, remotepath = None):\r
- """uploads the given package to the server, storing it in `remotepath`. if \r
- remotepath is None, it defaults to the server's site-packages. if the package\r
- already exists, it is overwritten.\r
- usage:\r
- import xml\r
- upload_package(conn, xml)"""\r
- if remotepath is None:\r
- remotepath = conn.modules["distutils.sysconfig"].get_python_lib()\r
- localpath = os.path.dirname(module.__file__)\r
- upload_dir(conn, localpath, remotepath, [".py", ".pyd", ".dll", ".so", ".zip"])\r
-\r
-def update_module(conn, module):\r
- """updates an existing module on the server. the local module is transfered to the\r
- server, overwriting the old one, and is reloaded. \r
- usage:\r
- import xml.dom.minidom\r
- upload_module(conn, xml.dom.minidom)"""\r
- remote_module = conn.modules[module.__name__]\r
- local_file = inspect.getsourcefile(module)\r
- remote_file = inspect.getsourcefile(remote_module)\r
- upload_file(conn, local_filem, remote_file)\r
- reload(remote_module)\r
-\r
-#\r
-# remote shell and interpreter\r
-#\r
-def _redirect_std(conn):\r
- rsys = conn.modules.sys\r
- orig = (rsys.stdin, rsys.stdout, rsys.stderr)\r
- rsys.stdin = sys.stdin\r
- rsys.stdout = sys.stdout\r
- rsys.stderr = sys.stderr\r
- return orig\r
-\r
-def _restore_std(conn, (stdin, stdout, stderr)):\r
- rsys = conn.modules.sys\r
- rsys.stdin = stdin\r
- rsys.stdout = stdout\r
- rsys.stderr = stderr\r
- \r
-def remote_shell(conn, command = None):\r
- """runs the given command on the server, just like os.system, but takes care\r
- of redirecting the server's stdout/stdin to the client"""\r
- # BUG: on windows, there's a problem with redirecting the output of spawned command.\r
- # it runs fine and all, just the client can't see the output. again, windows sucks.\r
- if command is None:\r
- if sys.platform == "win32":\r
- command = "%COMSPEC%"\r
- else:\r
- command = "/bin/sh"\r
- try:\r
- orig = _redirect_std(conn)\r
- return conn.modules.os.system(command)\r
- finally:\r
- _restore_std(conn, orig)\r
- \r
-def remote_interpreter(conn, namespace = None):\r
- """starts an interactive interpreter on the server"""\r
- if namespace is None:\r
- #namespace = inspect.stack()[1][0].f_globals.copy()\r
- #namespace.update(inspect.stack()[1][0].f_locals)\r
- namespace = {"conn" : conn}\r
- try:\r
- orig = _redirect_std(conn)\r
- return conn.modules["Rpyc.Utils"]._remote_interpreter_server_side(**namespace)\r
- finally:\r
- _restore_std(conn, orig)\r
-\r
-def _remote_interpreter_server_side(**namespace):\r
- import code\r
- namespace.update(globals())\r
- code.interact(local = namespace)\r
-\r
-def remote_post_mortem(conn):\r
- """a version of pdb.pm() that operates on exceptions at the remote side of the connection"""\r
- import pdb\r
- pdb.post_mortem(c.modules.sys.last_traceback)\r
-\r
-\r
-\r
-\r
-\r
+++ /dev/null
-"""\r
-RPyC \r
-by tomer filiba (tomerfiliba at gmail dot com)\r
-"""\r
-from Factories import *\r
-from Utils import *\r
-from Factories import __all__ as Factories_exports\r
-from Utils import __all__ as Utils_exports\r
-\r
-__all__ = Factories_exports + Utils_exports\r
+++ /dev/null
-1.2:\r
------\r
-the first 1.XX release. supports synchronous operations\r
-\r
-1.21:\r
------\r
-bugfix release: fixed the problem with objects that don't have a __repr__ or \r
-__str__\r
-\r
-1.6:\r
------\r
-this version never came public. added thread synchronization and events,\r
-which allow the server to inform the client of events, without blocking\r
-for client response.\r
-\r
-2.2:\r
------\r
-non backwards-compatible!\r
-first 2.XX release, added asynchronous proxies and thread synchronization\r
-this has brought with it a new kind of server -- the threaded_server,\r
-which performs well on both linux and windows. the older 'events' mechanism\r
-was removed as asynchornous proxies are much better, and also allow \r
-distributed computing.\r
-also, added the Utils module, which provide many convenience functions.\r
-in version 1.XX, i just overridden __builtin__.xxx, which was something\r
-you might not have wanted. so now you do "from Utils import *"\r
-also: revised demos\r
-note: the selecing and simple servers and deprecated, and are there only\r
-for systems that don't support threads (some older flavors of unix).\r
-\r
-knonw bugs:\r
- * windows bug: the pipe parent/child dont work on windows\r
-\r
-2.22:\r
------\r
-some bug fixes to the servers, etc.\r
-the selecting server turned out buggy. don't use it.\r
-added a new demo\r
-\r
-known bugs:\r
- * the selecting server\r
- * windows bug: the Utils.remote_shell doesnt redirect the output of the\r
-spawned command.\r
-\r
-2.25:\r
------\r
-fixed the selecting server\r
-fixed a bug in download (the problem with copy-paste). thanks go to steff.\r
-added two new utils: upload_package and update_module. they allow you to\r
-upload packages to the server, and update and existing module. i dont think \r
-they are very useful, but what the heck.\r
-\r
-2.26:\r
------\r
-fixed a bug when the remote object does not provide __nonzero__\r
-\r
-2.30:\r
------\r
-fixed several minor bugs (mostly semantic)\r
-added protection for installing excepthook more than once\r
-added the `obtain` funcion, which "brings forth" a remote object \r
-added remote_post_mortem (debugging the traceback of a remote exception)\r
-added an optional callback for Async proxies (which is called when they are ready)\r
-therefore, the AsyncCallback mechanism was removed. \r
-changed demo-3 and added demo-5.\r
-fixed a bug: __del__ should not be synchronous\r
-connection objects now have a `remote_conn` property, letting you mess around with\r
-the remote side of the connection\r
-\r
-2.31:\r
------\r
-when you close() a connection, it releases all resources as well (this is done to \r
-untangle cyclic-dependencies and make the gc happy)\r
-thanks to that, servers now release resources better and sooner\r
-\r
-2.32:\r
------\r
-fixed a bug in __all__ of factories.py\r
-removed all the stuff from __init__.py (just useless)\r
-cleanups\r
-semantic corrections\r
-\r
-2.35:\r
------\r
-fixed a potential bug in Connection.close\r
-converted FullyDyanmicMetaClass to a function (instead of a class). metaclasses are \r
-too magical by themselves, so i didn't want to over-do it. this caused trouble with\r
-__doc__ of netproxies, but i found a way around it.\r
-added docstrings throughout the code\r
-updated the servers\r
-i will add an ssl-enabled server when i have time\r
-w00t! rpyc is getting very stable (no real bugs)\r
-\r
-2.36:\r
------\r
-added 'threaded_server' to ServerUtils. it's a mix-in you can use wherever you\r
-like, instead of writing a server by yourself.\r
-improved the logging mechanism of ServerUtils\r
-\r
-2.40:\r
------\r
-added imports to __init__.py, so you can now do "from Rpyc import *". this\r
-is backwards compatible however, "from Rpyc.Factories import SocketConnection"\r
-still works. \r
-cleaned a little the __all__ of Utils.py\r
-new feature: added 'execute' and 'namespace'. 'execute' lets you execute code\r
-on the remote side of the connection, and 'namespace' is the namespace in which\r
-'execute' evaluates.\r
-added demo-6.py to show how to use it\r
-fixed demo-2.py (now that remote_interpreter isn't a part of Utils.__al__)\r
-\r
-2.45:\r
------\r
-cleanups: improved the unboxing of ImmDicts, some other minor things\r
-bugfix: PipeStream.write expected write to return the number of bytes written,\r
-as is the case with sockets. this is not the case with file objects, however,\r
-which meant the operation blocked indefinitely. thanks to rotem yaari for\r
-reporting the bug\r
-this also solves a long time bug with windows: the pipe demo now works with\r
-windows. i had to stub stream.wait() and stream.is_available() on windows,\r
-so it's less efficient and Async wouldn't work properly, but that's life.\r
-changed a little the semantics of Stream and Channel\r
-added authentication: this will make several people happy. the auth_server \r
-was added, which supports authentication, so you can run the server over the\r
-internet. only authentication is added, not encryption. you are still encouraged\r
-to use SSL/VPNs. this was added because many people had trouble with python SSL.\r
-\r
-\r
-\r
PID=`cat $MONITOR_PID`
rm -f $MONITOR_PID
if [ -z $PID ] ; then
- ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID
+ ${MONITOR_SCRIPT_ROOT}/tools/kill.cmd.sh $PID
echo "done."
else
echo "No PID to be killed."
#TODO: should add a call to ssh-add -l to check if the keys are loaded or not.
source ${MONITOR_SCRIPT_ROOT}/agent.sh
-#${MONITOR_SCRIPT_ROOT}/checksync.py $DATE || :
-${MONITOR_SCRIPT_ROOT}/syncwithplc.py $DATE || :
+${MONITOR_SCRIPT_ROOT}/tools/syncwithplc.py $DATE || :
service plc restart monitor
echo "Performing FindAll Nodes"
#########################
# 1. FINDBAD NODES
-${MONITOR_SCRIPT_ROOT}/findall.py $DATE || :
+${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || :
ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || :
# clean up stray 'locfg' processes that hang around inappropriately...
ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || :
${MONITOR_SCRIPT_ROOT}/policy.py $DATE || :
-${MONITOR_SCRIPT_ROOT}/statistics/add-record.py || :
curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || :
cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log
loginbases_q = BlacklistRecord.getLoginbaseBlacklist()
hostnames = [ h.hostname for h in hostnames_q ]
loginbases = [ h.loginbase for h in loginbases_q ]
- hostnames_exp = [ (h.hostname,h.date_created+timedelta(0,h.expires),h.date_created+timedelta(0,h.expires) < datetime.now() and h.expires != 0) for h in hostnames_q ]
- #loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires)) for h in loginbases_q ]
- loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires),h.date_created+timedelta(0,h.expires) < datetime.now() and h.expires != 0) for h in loginbases_q ]
+ hostnames_exp = [ (h.hostname,h.date_created+timedelta(0,h.expires)) for h in hostnames_q ]
+ loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires)) for h in loginbases_q ]
if config.add:
print "Blacklisting nodes: ", l_nodes
objlist = hostnames_exp
for i in objlist:
- if i[2]:
- print i[0], i[1], "<-- expired"
- elif i[1] > datetime.now():
+ if i[1] > datetime.now():
print i[0], i[1]
else:
print i[0]
--- /dev/null
+#!/usr/bin/python
+
+from monitor.reboot import *
+
+def main():
+ logger.setLevel(logging.DEBUG)
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.DEBUG)
+ formatter = logging.Formatter('LOGGER - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ try:
+ if "test" in sys.argv:
+ dryrun = True
+ else:
+ dryrun = False
+
+ for node in sys.argv[1:]:
+ if node == "test": continue
+
+ print "Rebooting %s" % node
+ if reboot_policy(node, True, dryrun):
+ print "success"
+ else:
+ print "failed"
+ except Exception, err:
+ import traceback; traceback.print_exc()
+ from monitor.common import email_exception
+ email_exception(node)
+ print err
+
+if __name__ == '__main__':
+ main()
+ f = open("/tmp/rebootlog", 'a')
+ f.write("reboot %s\n" % sys.argv)
+ f.close()
+++ /dev/null
-#!/usr/bin/python
-
-import os
-import sys
-import string
-import time
-import xml, xmlrpclib
-try:
- from monitor import config
- auth = {'Username' : config.API_AUTH_USER,
- 'AuthMethod' : "password",
- 'AuthString' : config.API_AUTH_PASSWORD}
-except:
- import traceback
- print traceback.print_exc()
- auth = {'AuthMethod' : "anonymous"}
-
-args = {}
-args['known_hosts'] = os.environ['HOME'] + os.sep + ".ssh" + os.sep + "known_hosts"
-try:
- from monitor import config
- args['XMLRPC_SERVER'] = config.API_SERVER
-except:
- args['XMLRPC_SERVER'] = 'https://boot.planet-lab.org/PLCAPI/'
- print "Using default API server %s" % args['XMLRPC_SERVER']
-
-class SSHKnownHosts:
- def __init__(self, args = args):
- self.args = args
- self.read_knownhosts()
- self.auth = auth
- self.api = xmlrpclib.Server(args['XMLRPC_SERVER'], verbose=False, allow_none=True)
- self.nodenetworks = {}
-
- def _split_kh_entry(self, line):
- s = line.split(' ')
- try:
- (host,ip) = s[0].split(',')
- except:
- ip = s[0]
- host = ""
-
- key = ' '.join(s[1:3])
- comment = ' '.join(s[3:])
- return (host, ip, key, comment)
-
- def _get_index(self, host, ip):
- index = ""
- if host is not "":
- index = "%s,%s" % (host,ip)
- else:
- index = ip
- return index
-
- def read_knownhosts(self):
- kh_read = open(self.args["known_hosts"], 'r')
- self.pl_keys = {}
- self.other_keys = {}
- for line in kh_read:
- (host, ip, key, comment) = self._split_kh_entry(line[:-1])
- rec = { self._get_index(host, ip) : "%s %s" % (key, comment) }
- if 'PlanetLab' in comment:
- self.pl_keys.update(rec)
- else:
- self.other_keys.update(rec)
-
- #for i in self.pl_keys:
- # print i
- # print self.pl_keys[i]
-
- return
-
- def write(self):
- self.write_knownhosts()
-
- def write_knownhosts(self):
- f = open(self.args['known_hosts'], 'w')
- for index in self.pl_keys:
- print >>f, "%s %s" % (index, self.pl_keys[index])
- for index in self.other_keys:
- print >>f, "%s %s" % (index, self.other_keys[index])
- f.close()
-
- def updateAll(self):
- l_nodes = self.getNodes()
- d_nodes = {}
- nokey_list = []
- for node in l_nodes:
- name = node['hostname']
- d_nodes[name] = node
-
- for host in d_nodes:
- node = d_nodes[host]
- (host, ip, key, comment) = self._record_from_node(node, nokey_list)
- rec = { "%s,%s" % (host,ip) : "%s %s" % (key, comment) }
- self.pl_keys.update(rec)
-
- return nokey_list
-
- def delete(self, host):
- node = self.getNodes(host)
- if len(node) > 0:
- (host, ip, _, _) = self._record_from_node(node[0])
- index = "%s,%s" % (host,ip)
- if index in self.pl_keys:
- del self.pl_keys[index]
- if index in self.other_keys:
- del self.other_keys[index]
- return node
-
- def updateDirect(self, host):
- cmd = os.popen("/usr/bin/ssh-keyscan -t rsa %s 2>/dev/null" % host)
- line = cmd.read()
- (h, ip, key, comment) = self._split_kh_entry(line[:-1])
- node = self.getNodes(host)
- (host2, ip2, x, x) = self._record_from_node(node[0])
- rec = { self._get_index(host2, ip2) : "%s %s" % (key, "DIRECT") }
-
- self.delete(host)
- self.other_keys.update(rec)
-
- def update(self, host):
- node = self.delete(host)
- #node = self.getNodes(host)
- if node is not []:
- ret = self._record_from_node(node[0])
- (host, ip, key, comment) = ret
- if ip == None:
- self.updateDirect(host)
- else:
- rec = { "%s,%s" % (host,ip) : "%s %s" % (key, comment) }
- self.pl_keys.update(rec)
-
- def getNodes(self, host=None):
- if type(host) == type(""): host = [host]
-
- # get the node(s) info
- nodes = self.api.GetNodes(self.auth,host,["hostname","ssh_rsa_key","interface_ids"])
-
- # for each node's node network, update the self.nodenetworks cache
- nodenetworks = []
- for node in nodes:
- for net in node["interface_ids"]:
- nodenetworks.append(net)
-
- plcnodenetworks = self.api.GetInterfaces(self.auth,nodenetworks,["interface_id","ip"])
- for n in plcnodenetworks:
- self.nodenetworks[n["interface_id"]]=n
- return nodes
-
- def _record_from_node(self, node, nokey_list=None):
- host = node['hostname']
- key = node['ssh_rsa_key']
-
- nodenetworks = node['interface_ids']
- if len(nodenetworks)==0: return (host, None, None, None)
-
- # the [0] subscript to node['interface_ids'] means
- # that this function wont work with multihomed nodes
- l_nw = self.nodenetworks.get(nodenetworks[0],None)
- if l_nw is None: return (host, None, None, None)
- ip = l_nw['ip']
-
- if key == None:
- if nokey_list is not None: nokey_list += [node]
- return (host, ip, None, None)
-
- key = key.strip()
- # TODO: check for '==' at end of key.
- if len(key) > 0 and key[-1] != '=':
- print "Host with corrupt key! for %s %s" % (node['boot_state'], node['hostname'])
-
- s_date = time.strftime("%Y/%m/%d_%H:%M:%S",time.gmtime(time.time()))
- #rec = { "%s,%s" % (host,ip) : "%s %s" % (key, "PlanetLab_%s" % (s_date)) }
- #return rec
- return (host, ip, key, "PlanetLab_%s" % s_date)
-
-
-def main(hosts):
- k = SSHKnownHosts()
- if len (hosts) > 0:
- for host in hosts:
- k.updateDirect(host)
- else:
- k.updateAll()
- k.write()
-
-if __name__ == '__main__':
- main(sys.argv[1:])
# mailing list copied on all out-going messages
cc_email=monitor-list@lists.planet-lab.org
+zabbix_enabled=0
+
[monitordatabase]
monitor_dburi=postgres://user:passwd@localhost:5432/infovacuum
zabbix_dburi=postgres://zabbixuser:<...>@localhost:5432/zabbix
done
}
-function check_monitor_schema_and_data()
+function check_monitor_schema_and_data_init()
{
- # NOTE: call create_all() to setup the database from the info model.
- python -c "from monitor.database.info.model import *; from elixir import create_all; create_all()"
+ # from monitor.functions
+ check_monitor_schema_and_data
+
$MONITORPATH/config.d/init-bootman-sequence.py
}
dialog "$MESSAGE"
fi
- check_monitor_schema_and_data
+ check_monitor_schema_and_data_init
# create /etc/httpd/conf.d/monitorweb.conf
create_httpd_conf
import subprocess
from sets import Set
-from monitor.getsshkeys import SSHKnownHosts
+from monitor.util.sshknownhosts import SSHKnownHosts
from monitor.Rpyc import SocketConnection, Async
from monitor.Rpyc.Utils import *
return bootman_action
-# MAIN -------------------------------------------------------------------
-
-def main():
- from monitor import parser as parsermodule
- parser = parsermodule.getParser()
-
- parser.set_defaults(child=False, collect=False, nosetup=False, verbose=False,
- force=None, quiet=False)
- parser.add_option("", "--child", dest="child", action="store_true",
- help="This is the child mode of this process.")
- parser.add_option("", "--force", dest="force", metavar="boot_state",
- help="Force a boot state passed to BootManager.py.")
- parser.add_option("", "--quiet", dest="quiet", action="store_true",
- help="Extra quiet output messages.")
- parser.add_option("", "--verbose", dest="verbose", action="store_true",
- help="Extra debug output messages.")
- parser.add_option("", "--nonet", dest="nonet", action="store_true",
- help="Do not setup the network, use existing log files to re-run a test pass.")
- parser.add_option("", "--collect", dest="collect", action="store_true",
- help="No action, just collect dmesg, and bm.log")
- parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
- help="Do not perform the orginary setup phase.")
-
- parser = parsermodule.getParser(['nodesets', 'defaults'], parser)
- config = parsermodule.parse_args(parser)
-
- if config.nodelist:
- nodes = config.getListFromFile(config.nodelist)
- elif config.node:
- nodes = [ config.node ]
- else:
- parser.print_help()
- sys.exit(1)
-
- for node in nodes:
- # get sitehist
- lb = plccache.plcdb_hn2lb[node]
- sitehist = SiteInterface.get_or_make(loginbase=lb)
- #reboot(node, config)
- restore(sitehist, node, config=None, forced_action=None)
-
if __name__ == "__main__":
- main()
+ print "ERROR: Can not execute module as a command! Please use commands/%s.py" % os.path.splitext(__file__)[0]
from monitor.database.info.findbad import *
from monitor.database.info.history import *
from monitor.database.info.plc import *
+from monitor.database.info.version import *
setup_all()
--- /dev/null
+from elixir import Entity, Field
+from elixir import Integer
+from elixir import options_defaults, using_options, setup_all
+
+from monitor.database.dborm import mon_metadata, mon_session
+__metadata__ = mon_metadata
+__session__ = mon_session
+
+from monitor.monitor_version import monitor_version
+
+major_version = int(monitor_version.split('.')[0])
+minor_version = int(monitor_version.split('.')[-1])
+
+class DBVersion(Entity):
+ major = Field(Integer, required=True, default=major_version)
+ minor = Field(Integer, required=True, default=minor_version)
+
--- /dev/null
+
+monitor_version="3.0"
+
+pcucontrol_version=monitor_version
print "return true"
return True
-def main():
- logger.setLevel(logging.DEBUG)
- ch = logging.StreamHandler()
- ch.setLevel(logging.DEBUG)
- formatter = logging.Formatter('LOGGER - %(message)s')
- ch.setFormatter(formatter)
- logger.addHandler(ch)
+if __name__ == "__main__":
+ print "ERROR: Can not execute module as a command! Please use commands/%s.py" % os.path.splitext(__file__)[0]
- try:
- if "test" in sys.argv:
- dryrun = True
- else:
- dryrun = False
-
- for node in sys.argv[1:]:
- if node == "test": continue
-
- print "Rebooting %s" % node
- if reboot_policy(node, True, dryrun):
- print "success"
- else:
- print "failed"
- except Exception, err:
- import traceback; traceback.print_exc()
- from monitor.common import email_exception
- email_exception(node)
- print err
-
-if __name__ == '__main__':
- logger = logging.getLogger("monitor")
- main()
- f = open("/tmp/rebootlog", 'a')
- f.write("reboot %s\n" % sys.argv)
- f.close()
-__all__=["writepid","removepid","daemonize"]
import time
import math
#return rec
return (host, ip, key, "PlanetLab_%s" % s_date)
-
-def main(hosts):
- k = SSHKnownHosts()
- if len (hosts) > 0:
- for host in hosts:
- k.updateDirect(host)
- else:
- k.updateAll()
- k.write()
-
-if __name__ == '__main__':
- main(sys.argv[1:])
+if __name__ == "__main__":
+ print "ERROR: Can not execute module as a command! Please use tools/getsshkey.py"
+++ /dev/null
-RT_DB_HOST=rt.planet-lab.org
-RT_DB_USER=rt_ro
-RT_DB_PASSWORD=rtropassword
-RT_DB_NAME=rt3
from distutils.core import setup
-packages=[ 'monitor',
- 'monitor.database',
- 'monitor.Rpyc',
- 'monitor.database.zabbixapi',
- 'monitor.database.info',
- 'monitor.sources',
- 'monitor.util',
- 'monitor.wrapper' ]
+from monitor.monitor_version import *
+
+packages=['monitor',
+ 'monitor.database',
+ 'monitor.Rpyc',
+ 'monitor.database.zabbixapi',
+ 'monitor.database.info',
+ 'monitor.sources',
+ 'monitor.util',
+ 'monitor.wrapper']
print packages
setup(name='MonitorModule',
- version='2.0',
+ version=monitor_version,
description='Monitor Utility Module',
author='Stephen Soltesz',
author_email='soltesz@cs.princeton.edu',
url='http://www.planet-lab.org',
- packages=packages,
-)
+ packages=packages)
packages=['pcucontrol',
- 'pcucontrol.util',
- 'pcucontrol.transports',
- 'pcucontrol.transports.ssh',
- 'pcucontrol.transports.pyssh',
- 'pcucontrol.models',
- 'pcucontrol.models.hpilo',
- 'pcucontrol.models.hpilo.iloxml',
- 'pcucontrol.models.intelamt',
- 'pcucontrol.models.intelamt',
-
- ]
+ 'pcucontrol.util',
+ 'pcucontrol.transports',
+ 'pcucontrol.transports.ssh',
+ 'pcucontrol.transports.pyssh',
+ 'pcucontrol.models',
+ 'pcucontrol.models.hpilo',
+ 'pcucontrol.models.hpilo.iloxml',
+ 'pcucontrol.models.intelamt',
+ 'pcucontrol.models.intelamt']
# TODO: add data dir for intelamt and hpilo stuff
print packages
setup(name='PCUControlModule',
- version='2.0',
+ version=pcucontrol_version,
description='PCU Control Module',
author='Stephen Soltesz',
author_email='soltesz@cs.princeton.edu',
url='http://www.planet-lab.org',
- packages=packages,
-)
+ packages=packages)
--- /dev/null
+#!/usr/bin/python
+
+import sys
+sys.path.append('.')
+sys.path.append('..')
+
+from monitor.util.file import *
+
+def main(hosts):
+ k = SSHKnownHosts()
+ if len (hosts) > 0:
+ for host in hosts:
+ k.updateDirect(host)
+ else:
+ k.updateAll()
+ k.write()
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
-NameVirtualHost *
-
-<VirtualHost *>
- ServerName pl-virtual-06.cs.princeton.edu
- ServerAdmin soltesz@cs.princeton.edu
- UseCanonicalName Off
- ServerSignature Off
-
- #DocumentRoot /usr/share/monitor/web/MonitorWeb/monitorweb
-
- #<Directory "/usr/share/monitor/web/MonitorWeb/monitorweb">
- # Options Indexes FollowSymLinks
- # AllowOverride None
- # Order allow,deny
- # Allow from all
- #</Directory>
-
- Errorlog /var/log/httpd/monitorwebapp-error_log
- Customlog /var/log/httpd/monitorwebapp-access_log common
-
- AddDefaultCharset utf-8
-
- #NOTE: This doesn't work as expected.
- # Load everything out of the DocumentRoot that is static
- # ProxyPass /monitor/static !
-
- ProxyPass /tg_js http://127.0.0.1:8080/tg_js
- ProxyPassReverse /tg_js http://127.0.0.1:8080/tg_js
-
- ProxyPass /monitor http://127.0.0.1:8080
- ProxyPassReverse /monitor http://127.0.0.1:8080
-
- ProxyPreserveHost On
- ProxyRequests Off
-
-</VirtualHost>
+# NOTE: I've tried other means of redirection, including mod_rewrite, but did
+# not have any success. The means below is not idea, b/c it does not keep
+# non-ssl session as non-ssl. But it works.
+
+# NOTE: redirect path without trailing '/' to path with.
+Redirect /monitor /monitor/
+
+# NOTE: this directive strips '/monitor/' from the requested path and pastes
+# the remaining part to the end of the ProxyPass url below. All TG urls
+# should be relative to their current position, or the absolute path
+# that includes /monitor/ at the beginning.
+# TODO: make location configurable.
+<Location '/monitor/'>
+ #LogLevel debug
+ #Errorlog /var/log/httpd/monitorwebapp-error_log
+ #Customlog /var/log/httpd/monitorwebapp-access_log common
+
+ ProxyPass http://127.0.0.1:8082/
+ ProxyPassReverse http://127.0.0.1:8082/
+</Location>