From: Barış Metin Date: Mon, 14 Dec 2009 15:57:04 +0000 (+0000) Subject: hope I got the merge wright... X-Git-Tag: Monitor-3.0-26~4 X-Git-Url: http://git.onelab.eu/?p=monitor.git;a=commitdiff_plain;h=e637272100e8e03884188cb2118b21158e739bb0 hope I got the merge wright... svn merge -r 15903:16132 https://svn.planet-lab.org/svn/Monitor/branches/monitor-20091130 . --- diff --git a/Monitor.spec b/Monitor.spec index 9d47dec..a2a5439 100644 --- a/Monitor.spec +++ b/Monitor.spec @@ -5,6 +5,7 @@ %define url $URL: svn+ssh://svn.planet-lab.org/svn/monitor/trunk/monitor.spec $ %define name monitor +# keep this version in sync with monitor/monitor_version.py %define version 3.0 %define taglevel 25 @@ -54,7 +55,6 @@ Group: Applications/System Requires: python Requires: python-setuptools-devel Requires: python-peak-util-extremes -Requires: TurboGears Requires: compat-libstdc++-296 Requires: openssh-clients @@ -131,7 +131,7 @@ rm -rf $RPM_BUILD_ROOT #################### CLIENT #install -D -m 755 monitor-client.init $RPM_BUILD_ROOT/%{_initrddir}/monitor #install -D -m 644 monitor.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor -install -D -m 755 timeout.pl $RPM_BUILD_ROOT/usr/bin/timeout.pl +install -D -m 755 tools/timeout.pl $RPM_BUILD_ROOT/usr/bin/timeout.pl #################### SERVER @@ -140,36 +140,47 @@ install -d $RPM_BUILD_ROOT/data/var/lib/%{name} install -d $RPM_BUILD_ROOT/data/var/lib/%{name}/archive-pdb install -d $RPM_BUILD_ROOT/var/lib/%{name} install -d $RPM_BUILD_ROOT/var/lib/%{name}/archive-pdb -#install -d $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/ install -d $RPM_BUILD_ROOT/var/www/html/monitorlog/ +install -d $RPM_BUILD_ROOT/etc/httpd/conf.d/ +install -d $RPM_BUILD_ROOT/%{python_sitearch}/monitor + +# pack monitor's dependencies in RPM to make it easier to deploy. +export PYTHONPATH=$PYTHONPATH:$RPM_BUILD_ROOT/%{python_sitearch}/ +easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ http://files.turbogears.org/eggs/TurboGears-1.0.7-py2.5.egg +easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.5.3.tar.gz +easy_install --build-directory /var/tmp -d $RPM_BUILD_ROOT/%{python_sitearch}/ -UZ Elixir +rm -rf $RPM_BUILD_ROOT/%{python_sitearch}/site.py* +# plc.d scripts install -D -m 644 monitor.functions $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/monitor.functions install -D -m 755 monitor-server.init $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/monitor install -D -m 755 zabbix/monitor-zabbix.init $RPM_BUILD_ROOT/%{_sysconfdir}/plc.d/zabbix -echo " * Installing core scripts" -rsync -a --exclude www --exclude archive-pdb --exclude .svn --exclude CVS \ - ./ $RPM_BUILD_ROOT/usr/share/%{name}/ +# cron job for automated polling +install -D -m 644 monitor-server.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor-server.cron -echo " * Installing web pages" -#rsync -a www/ $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/ -rsync -a log/ $RPM_BUILD_ROOT/var/www/html/monitorlog/ +# apache configuration +install -D -m 644 web/monitorweb-httpd.conf $RPM_BUILD_ROOT/etc/httpd/conf.d/ -echo " * Installing cron job for automated polling" -install -D -m 644 monitor-server.cron $RPM_BUILD_ROOT/%{_sysconfdir}/cron.d/monitor-server.cron -echo " * TODO: Setting up Monitor account in local MyPLC" -# TODO: +# we'll install monitor and pcucontrol in site-packages +# install rest to /usr/share/monitor +rsync -a --exclude archive-pdb --exclude .svn --exclude CVS \ + --exclude monitor/ \ + --exclude pcucontol/ \ + ./ $RPM_BUILD_ROOT/usr/share/%{name}/ -install -d $RPM_BUILD_ROOT/%{python_sitearch}/monitor -install -d -D -m 755 monitor $RPM_BUILD_ROOT/%{python_sitearch}/monitor -# TODO: need a much better way to do this. -rsync -a monitor/ $RPM_BUILD_ROOT/%{python_sitearch}/monitor/ -#for file in __init__.py database.py config.py ; do -# install -D -m 644 monitor/$file $RPM_BUILD_ROOT/%{python_sitearch}/monitor/$file -#done -rsync -a pcucontrol/ $RPM_BUILD_ROOT/%{python_sitearch}/pcucontrol/ +# install monitor python package +rsync -a --exclude .svn ./monitor/ $RPM_BUILD_ROOT/%{python_sitearch}/monitor/ + +# and pcucontrol +rsync -a --exclude .svn ./pcucontrol/ $RPM_BUILD_ROOT/%{python_sitearch}/pcucontrol/ + +# install third-party module to site-packages install -D -m 755 threadpool.py $RPM_BUILD_ROOT/%{python_sitearch}/threadpool.py +# TODO: +echo " * TODO: Setting up Monitor account in local MyPLC" + #touch $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/monitorconfig.php #chmod 777 $RPM_BUILD_ROOT/var/www/cgi-bin/monitor/monitorconfig.php @@ -202,9 +213,15 @@ rm -rf $RPM_BUILD_ROOT %{python_sitearch}/threadpool.pyc %{python_sitearch}/threadpool.pyo %{python_sitearch}/monitor +%{python_sitearch}/Turbo* +%{python_sitearch}/SQLAlchemy* +%{python_sitearch}/Elixir* +%{python_sitearch}/easy-install.pth +%{python_sitearch}/tg-admin %{_sysconfdir}/plc.d/monitor %{_sysconfdir}/plc.d/monitor.functions %{_sysconfdir}/plc.d/zabbix +%{_sysconfdir}/httpd/conf.d %files client %defattr(-,root,root) @@ -222,14 +239,6 @@ rm -rf $RPM_BUILD_ROOT /%{_initrddir}/monitor-runlevelagent %post server-deps -#easy_install --build-directory /var/tmp -UZ ElementTree -##easy_install --build-directory /var/tmp -UZ http://pypi.python.org/packages/2.5/E/Extremes/Extremes-1.1-py2.5.egg - - -## TODO: something is bad wrong with this approach. -easy_install --build-directory /var/tmp -UZ http://files.turbogears.org/eggs/TurboGears-1.0.7-py2.5.egg -easy_install --build-directory /var/tmp -UZ http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.5.3.tar.gz -easy_install --build-directory /var/tmp -UZ Elixir # crazy openssl libs for racadm binary ln -s /lib/libssl.so.0.9.8b /usr/lib/libssl.so.2 diff --git a/Rpyc/AsyncNetProxy.py b/Rpyc/AsyncNetProxy.py deleted file mode 100644 index 0c1fc05..0000000 --- a/Rpyc/AsyncNetProxy.py +++ /dev/null @@ -1,82 +0,0 @@ -from NetProxy import NetProxyWrapper -from Lib import raise_exception - - -class InvalidAsyncResultState(Exception): - pass - - -class AsyncNetProxy(NetProxyWrapper): - """wraps an exiting synchronous netproxy to make is asynchronous - (remote operations return AsyncResult objects)""" - __doc__ = NetProxyWrapper.__doc__ - - def __request__(self, handler, *args): - res = AsyncResult(self.__dict__["____conn"]) - self.__dict__["____conn"].async_request(res.callback, handler, self.__dict__["____oid"], *args) - return res - - # must return a string... and it's not meaningful to return the repr of an async result - def __repr__(self, *args): - return self.__request__("handle_repr", *args).result - def __str__(self, *args): - return self.__request__("handle_str", *args).result - - -class AsyncResult(object): - """represents the result of an asynchronous operation""" - STATE_PENDING = "pending" - STATE_READY = "ready" - STATE_EXCEPTION = "exception" - STATE_UNKNOWN = "unknown" - - def __init__(self, conn): - self.conn = conn - self._state = self.STATE_PENDING - self._result = None - self._on_ready = None - - def __repr__(self): - return "" % (self._state, id(self)) - - def callback(self, event, obj): - if event == "result": - self._state = self.STATE_READY - self._result = obj - elif event == "exception": - self._state = self.STATE_EXCEPTION - self._result = obj - else: - self._state = self.STATE_UNKNOWN - self._result = obj - - if self._on_ready is not None: - self._on_ready(self) - - def _get_on_ready(self): - return self._ready_callback - - def _set_on_ready(self, obj): - self._on_ready = obj - if self._state != self.STATE_PENDING: - self._on_ready(self) - - def _get_is_ready(self): - if self._state == self.STATE_PENDING: - self.conn.poll() - return self._state != self.STATE_PENDING - - def _get_result(self): - while self._state == self.STATE_PENDING: - self.conn.serve() - if self._state == self.STATE_READY: - return self._result - elif self._state == self.STATE_EXCEPTION: - raise_exception(*self._result) - else: - raise InvalidAsyncResultState(self._state) - - is_ready = property(_get_is_ready, doc = "indicates whether or not the result is ready") - result = property(_get_result, doc = "the value of the async result (may block)") - on_ready = property(_get_on_ready, _set_on_ready, doc = - "if not None, specifies a callback which is called when the result is ready") diff --git a/Rpyc/Authentication.py b/Rpyc/Authentication.py deleted file mode 100644 index afa1172..0000000 --- a/Rpyc/Authentication.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Challenge-Response authentication algorithm over channels: the client sends the -username, recvs a challenge, generates a response based on the challenge and -password, sends it back to the server, which also calculates the expected response. -the server returns "WRONG" or "OKAY". this way the password is never sent over -the wire. - -* servers should call the accept function over a channel, with a dict of -(username, password) pairs. -* clients should call login over a channel, with the username and password -""" -import md5 -import random - -def get_bytes(num_bytes): - ret = [] - for n in xrange(num_bytes): - ret.append( chr(random.randint(0,255)) ) - return ''.join(ret) - -def accept(chan, users): - username = chan.recv() - challenge = get_bytes(16) - chan.send(challenge) - response = chan.recv() - if username not in users: - chan.send("WRONG") - return False - expected_response = md5.new(users[username] + challenge).digest() - if response != expected_response: - chan.send("WRONG") - return False - chan.send("OKAY") - return True - -def login(chan, username, password): - chan.send(username) - challenge = chan.recv() - response = md5.new(password + challenge).digest() - chan.send(response) - return chan.recv() == "OKAY" - diff --git a/Rpyc/Boxing.py b/Rpyc/Boxing.py deleted file mode 100644 index 93e89ba..0000000 --- a/Rpyc/Boxing.py +++ /dev/null @@ -1,124 +0,0 @@ -import sys -import traceback -import cPickle as pickle -from weakref import WeakValueDictionary -from Lib import ImmDict -from NetProxy import NetProxy, SyncNetProxy - - -class BoxingError(Exception): - pass -class NestedException(Exception): - pass - -PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL -TYPE_SIMPLE = 0 -TYPE_PROXY = 1 -TYPE_TUPLE = 2 -TYPE_SLICE = 3 -TYPE_LOCAL_PROXY = 4 -TYPE_IMMDICT = 5 -simple_types = ( - bool, - int, - long, - float, - complex, - basestring, - type(None), -) - -def dump_exception(typ, val, tb): - """dumps the given exception using pickle (since not all exceptions are picklable)""" - tbtext = "".join(traceback.format_exception(typ, val, tb)) - sys.last_type, sys.last_value, sys.last_traceback = typ, val, tb - try: - pickled_exc = pickle.dumps((typ, val, tbtext), PICKLE_PROTOCOL) - except pickle.PicklingError, ex: - newval = NestedException("pickling error %s\nexception type: %r\nexception object: %s" % (ex, typ, val)) - pickled_exc = pickle.dumps((NestedException, newval, tbtext), PICKLE_PROTOCOL) - return pickled_exc - -def load_exception(package): - """returns an exception object""" - try: - return pickle.loads(package) - except pickle.PicklingError, ex: - return NestedException("failed to unpickle remote exception -- %r" % (ex,)) - -class Box(object): - """a box is where local objects are stored, and remote proxies are created""" - def __init__(self, conn): - self.conn = conn - self.objects = {} - self.proxy_cache = WeakValueDictionary() - - def close(self): - del self.conn - del self.objects - del self.proxy_cache - - def __getitem__(self, oid): - return self.objects[oid][1] - - def _box(self, obj): - if isinstance(obj, simple_types): - return TYPE_SIMPLE, obj - elif isinstance(obj, slice): - return TYPE_SLICE, (obj.start, obj.stop, obj.step) - elif isinstance(obj, NetProxy) and obj.__dict__["____conn"] is self.conn: - return TYPE_LOCAL_PROXY, obj.__dict__["____oid"] - elif isinstance(obj, tuple): - if obj: - return TYPE_TUPLE, [self._box(subobj) for subobj in obj] - else: - return TYPE_SIMPLE, () - elif isinstance(obj, ImmDict): - if not obj.dict: - return TYPE_SIMPLE, {} - else: - return TYPE_IMMDICT, [(self._box(k), self._box(v)) for k, v in obj.items()] - else: - oid = id(obj) - if oid not in self.objects: - self.objects[oid] = [0, obj] - return TYPE_PROXY, oid - - def _unbox(self, (type, value)): - if type == TYPE_SIMPLE: - return value - elif type == TYPE_TUPLE: - return tuple([self._unbox(subobj) for subobj in value]) - elif type == TYPE_SLICE: - return slice(*value) - elif type == TYPE_LOCAL_PROXY: - return self[value] - elif type == TYPE_IMMDICT: - return dict([(self._unbox(k), self._unbox(v)) for k, v in value]) - elif type == TYPE_PROXY: - if value in self.proxy_cache: - proxy = self.proxy_cache[value] - else: - proxy = SyncNetProxy(self.conn, value) - self.proxy_cache[value] = proxy - return proxy - else: - raise BoxingError("invalid boxed object type", type, value) - - def incref(self, oid): - self.objects[oid][0] += 1 - - def decref(self, oid): - self.objects[oid][0] -= 1 - if self.objects[oid][0] <= 0: - del self.objects[oid] - - def pack(self, obj): - """packs an object (returns a package)""" - return pickle.dumps(self._box(obj), PICKLE_PROTOCOL) - - def unpack(self, package): - """unpacks a package (returns an object)""" - return self._unbox(pickle.loads(package)) - - diff --git a/Rpyc/Channel.py b/Rpyc/Channel.py deleted file mode 100644 index 106f936..0000000 --- a/Rpyc/Channel.py +++ /dev/null @@ -1,48 +0,0 @@ -from threading import RLock -import struct - - -class Channel(object): - """a channel transfers packages over a stream. a package is any blob of data, - up to 4GB in size. channels are gauranteed to be thread-safe""" - HEADER_FORMAT = ">L" # byte order must be the same at both sides! - HEADER_SIZE = struct.calcsize(HEADER_FORMAT) - - def __init__(self, stream): - self.lock = RLock() - self.stream = stream - - def __repr__(self): - return "<%s(%r)>" % (self.__class__.__name__, self.stream) - - def send(self, data): - """sends a package""" - try: - self.lock.acquire() - header = struct.pack(self.HEADER_FORMAT, len(data)) - self.stream.write(header + data) - finally: - self.lock.release() - - def recv(self): - """receives a package (blocking)""" - try: - self.lock.acquire() - length, = struct.unpack(self.HEADER_FORMAT, self.stream.read(self.HEADER_SIZE)) - return self.stream.read(length) - finally: - self.lock.release() - - def close(self): - return self.stream.close() - - def fileno(self): - return self.stream.fileno() - - def is_available(self): - return self.stream.is_available() - - def wait(self): - return self.stream.wait() - - diff --git a/Rpyc/Connection.py b/Rpyc/Connection.py deleted file mode 100644 index 81bed23..0000000 --- a/Rpyc/Connection.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from Boxing import Box, dump_exception, load_exception -from ModuleNetProxy import RootImporter -from Lib import raise_exception, AttrFrontend - - -class Connection(object): - """ - the rpyc connection layer (protocol and APIs). generally speaking, the only - things you'll need to access directly from this object are: - * modules - represents the remote python interprerer's modules namespace - * execute - executes the given code on the other side of the connection - * namespace - the namespace in which the code you `execute` resides - - the rest of the attributes should be of no intresent to you, except maybe for - `remote_conn`, which represents the other side of the connection. it is unlikely, - however, you'll need to use it (it is used interally). - - when you are done using a connection, and wish to release the resources it - uses, you should call close(). you don't have to, but if you don't, the gc - can't release the memory because of cyclic references. - """ - - def __init__(self, channel): - self._closed = False - self._local_namespace = {} - self.channel = channel - self.box = Box(self) - self.async_replies = {} - self.sync_replies = {} - self.request_seq = 0 - self.module_cache = {} - # user APIs: - self.modules = RootImporter(self) - self.remote_conn = self.sync_request("handle_getconn") - self.namespace = AttrFrontend(self.remote_conn._local_namespace) - - def __repr__(self): - if self._closed: - return "<%s - closed>" % (self.__class__.__name__,) - else: - return "<%s(%r)>" % (self.__class__.__name__, self.channel) - - # - # file api layer - # - def close(self): - if self._closed: - return - self._closed = True - self.box.close() - self.channel.close() - # untangle circular references - del self._local_namespace - del self.channel - del self.box - del self.async_replies - del self.sync_replies - del self.request_seq - del self.module_cache - del self.modules - del self.remote_conn - del self.namespace - - def fileno(self): - return self.channel.fileno() - - # - # protocol layer - # - def _recv(self): - return self.box.unpack(self.channel.recv()) - - def _send(self, *args): - return self.channel.send(self.box.pack(args)) - - def send_request(self, handlername, *args): - try: - self.channel.lock.acquire() - # this must be atomic { - self.request_seq += 1 - self._send(handlername, self.request_seq, args) - return self.request_seq - # } - finally: - self.channel.lock.release() - - def send_exception(self, seq, exc_info): - self._send("exception", seq, dump_exception(*exc_info)) - - def send_result(self, seq, obj): - self._send("result", seq, obj) - - def dispatch_result(self, seq, obj): - if seq in self.async_replies: - self.async_replies.pop(seq)("result", obj) - else: - self.sync_replies[seq] = obj - - def dispatch_exception(self, seq, obj): - excobj = load_exception(obj) - if seq in self.async_replies: - self.async_replies.pop(seq)("exception", excobj) - else: - raise_exception(*excobj) - - def dispatch_request(self, handlername, seq, args): - try: - res = getattr(self, handlername)(*args) - except SystemExit: - raise - except: - self.send_exception(seq, sys.exc_info()) - else: - self.send_result(seq, res) - - def sync_request(self, *args): - """performs a synchronous (blocking) request""" - seq = self.send_request(*args) - while seq not in self.sync_replies: - self.serve() - return self.sync_replies.pop(seq) - - def async_request(self, callback, *args): - """performs an asynchronous (non-blocking) request""" - seq = self.send_request(*args) - self.async_replies[seq] = callback - - # - # servers api - # - def poll(self): - """if available, serves a single request, otherwise returns (non-blocking serve)""" - if self.channel.is_available(): - self.serve() - return True - else: - return False - - def serve(self): - """serves a single request or reply (may block)""" - self.channel.wait() - handler, seq, obj = self._recv() - if handler == "result": - self.dispatch_result(seq, obj) - elif handler == "exception": - self.dispatch_exception(seq, obj) - else: - self.dispatch_request(handler, seq, obj) - - # - # root requests - # - def rimport(self, modulename): - """imports a module by name (as a string)""" - if modulename not in self.module_cache: - module = self.sync_request("handle_import", modulename) - self.module_cache[modulename] = module - return self.module_cache[modulename] - - def execute(self, expr, mode = "exec"): - """execute the given code at the remote side of the connection""" - return self.sync_request("handle_execute", expr, mode) - - # - # handlers layer - # - def handle_incref(self, oid): - self.box.incref(oid) - - def handle_decref(self, oid): - self.box.decref(oid) - - def handle_delattr(self, oid, name): - delattr(self.box[oid], name) - - def handle_getattr(self, oid, name): - return getattr(self.box[oid], name) - - def handle_setattr(self, oid, name, value): - setattr(self.box[oid], name, value) - - def handle_delitem(self, oid, index): - del self.box[oid][index] - - def handle_getitem(self, oid, index): - return self.box[oid][index] - - def handle_setitem(self, oid, index, value): - self.box[oid][index] = value - - def handle_call(self, oid, args, kwargs): - return self.box[oid](*args, **kwargs) - - def handle_repr(self, oid): - return repr(self.box[oid]) - - def handle_str(self, oid): - return str(self.box[oid]) - - def handle_bool(self, oid): - return bool(self.box[oid]) - - def handle_import(self, modulename): - return __import__(modulename, None, None, modulename.split(".")[-1]) - - def handle_getconn(self): - return self - - def handle_execute(self, expr, mode): - codeobj = compile(expr, "" % (self,), mode) - return eval(codeobj, self._local_namespace) - - - diff --git a/Rpyc/Demo/__init__.py b/Rpyc/Demo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/Rpyc/Demo/demo-1.py b/Rpyc/Demo/demo-1.py deleted file mode 100644 index ea1c5a1..0000000 --- a/Rpyc/Demo/demo-1.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# welcome to RPyC. this demo serves as an introduction. i believe in learning through -# showcases, and that's why this package comes with a demo subpackage, instead of -# documentation -# -# so, the first thing we're gonna do is import the SocketConnection. this is a factory -# function that returns us a new Connection object over a socket stream. we dont need -# to get into details here. -# -from Rpyc.Factories import SocketConnection - -# -# next, we'll get all the helpful utilities. the utilities include wrappers for builtin -# functions, like dir(), so they'd work as expected with netproxies. -# -from Rpyc.Utils import * - -# -# by now you should have an rpyc server running. if you dont, go to the Servers directory -# and choose your favorite version of a socket server. for unixes i'd recommend the -# forking server; for windows -- the threaded server. -# -# so let's connect to the server -# -c = SocketConnection("localhost", 22222) - -# -# now it's time to explain a little about how rpyc works. it's quite simple really. the -# magic comes from a concept called NetProxy. a NetProxy object delivers all of the -# operations performed on it to the remote object. so if you get a list from your host, -# what you're are really getting is a NetProxy to that list. it looks and works just -# like a real list -- but everytime you do something on it, it actually performs a -# request on the list object stored on the host. this is called boxing. this means -# you can change the object you get locally, and the remote object changes, etc. -# -# however, for efficiency and other reason, not all objects you get are NetProxies. -# all immutable and pickle-able objects pass by value (through pickle). these types -# of objects include ints, longs, strings, and some other types. all other types are -# passed by boxing. -# -# this boxing mechanism works on everything -- objects, functions, classes, and modules, -# which is why rpyc is considered transparent. your code looks just as if it was meant -# to run locally. -# - -# -# let's start with something simple -- getting a remote module. accessing the remote -# namespace always starts with the `modules` attribute, then the module (or package) -# name, and then the attribute you want to get. -# - -print c.modules.sys -print c.modules.sys.path -c.modules.sys.path.append("lucy") -print c.modules.sys.path[-1] - -# -# these remote objects are first class objects, like all others in python. this means -# you can store them in variables, pass them as parameters, etc. -# -rsys = c.modules.sys -rpath = rsys.path -rpath.pop(-1) - -# -# and as you might expect, netproxies also look like the real objects -# -print dir(rpath) - -# -# but there are a couple of issues with netproxies. the type(), isinstance(), and -# issubclass() classes dont work on them... as they query the underlying object, not -# the remote one. so: -# -print type(rsys.maxint) # -- because it's a simple type which is passed by value) -print type(rsys.path) # -- because, after all, it's a netproxy, not a list - -# -# now for a demo of packages -# (which looks very much like 'from xml.dom.minidom import parseString') -# -parseString = c.modules.xml.dom.minidom.parseString -x = parseString("lala") -print x -x.toxml() -print x.firstChild.nodeName - -# -# however, there's a catch when working with packages like that. the way it works is -# trying to find an attribute with that name, and if not found, trying to import a sub- -# module. -# -# now in english: -# c.module.xml is the xml module of the server. when you do c.module.xml.dom, rpyc looks -# for an attribute named 'dom' inside the xml module. since there's no such attribute, -# it tries to import a subpackage called xml.dom, which succeeds. then it does the same -# for xml.dom.minidom, and xml.dom.minidom.parseString. -# -# but there are times when that's ambiguous. this mean that the module has both a sub- -# module called 'X', and an attribute called 'X'. according to rpyc's algorithm, the -# attribute 'X' is returned, not the sub-module. -# -# but if you need to be explicit, you can, and it works like this: -# - -c.modules["xml.dom.minidom"].parseString("") - -# -# this will make sure the module 'xml.dom.minidom' is returned, and not an attribute. -# in general, it's better to use this form, unless you know there are no such conflicts. -# remeber that "Explicit is better than implicit", although it requires four more key -# strokes. perhaps in a later version it will raise an exception if there's a conflict. -# - -# -# and now for a little demo of working with files (a common task) -# -f = c.modules.__builtin__.open("lala.txt", "w") -f.write("lucy") -f.close() -c.modules.os.remove("lala.txt") - -# -# now to a bitter part of life: exceptions. as you could expect, they work just like -# regular exceptions -# -try: - a = c.modules.sys.nonexistent_attribute -except AttributeError: - pass -else: - assert False - -try: - a = c.modules.__builtin__.open("**\\//##~~..::!@#$%^&*()_+\n <,>?") -except IOError: - pass -else: - assert False - -print "goodbye" - - - - - - - - - - - - - - - diff --git a/Rpyc/Demo/demo-2.py b/Rpyc/Demo/demo-2.py deleted file mode 100644 index 7ed797c..0000000 --- a/Rpyc/Demo/demo-2.py +++ /dev/null @@ -1,81 +0,0 @@ -# -# okay, this demo is more advanced. here we'll learn about: -# * redirecting standard files -# * synchronous callbacks -# * the ulitities module -# -import sys -import os -from Rpyc.Factories import SocketConnection -from Rpyc.Utils import hasattr, getattr, reload, upload, remote_interpreter - -c = SocketConnection("localhost", 22222) - -# -# redirect our stdout to the server -# -sys.stdout = c.modules.sys.stdout -print "this time we focus on `the seatle music`" - -# -# and the other way round -# -sys.stdout = sys.__stdout__ -c.modules.sys.stdout = sys.stdout -c.modules.sys.stdout.write("alice in chains\n") - -# -# but you dont believe me, so -# -c.modules.Rpyc.Demo.testmodule.printer("tool") - -# -# and restore that -# -c.modules.sys.stdout = c.modules.sys.__stdout__ - -# -# now let's play with callbacks -# -def f(text): - print text - -c.modules.Rpyc.Demo.testmodule.caller(f, "nirvana") - -# -# and if you insist -# -def g(func, text): - c.modules.Rpyc.Demo.testmodule.caller(func, text) - -c.modules.Rpyc.Demo.testmodule.caller(g, f, "soundgarden") - -# -# now for the utilities module. it gives us the following cool functions: -# * dir, getattr, hasattr, help, reload -- overriding builtins -# * upload, download -- transfering files/directories to/from the client/server (all the permutations) -# * remote_shell, remote_interpreter -- running remote processess and debugging -# -print hasattr(sys, "path") -print hasattr(c.modules.sys, "path") - -print getattr(sys, "maxint") -print getattr(c.modules.sys, "maxint") - -print reload(sys) -print reload(c.modules.sys) - -f=open("lala.txt", "w") -f.write("king crimson") -f.close() -upload(c, "lala.txt", "../lala.txt") -os.remove("lala.txt") -c.modules.os.remove("../lala.txt") - -remote_interpreter(c) - - -print "goodbye" - - - diff --git a/Rpyc/Demo/demo-3.py b/Rpyc/Demo/demo-3.py deleted file mode 100644 index 220681e..0000000 --- a/Rpyc/Demo/demo-3.py +++ /dev/null @@ -1,130 +0,0 @@ -# -# this is the grand finale: asynchronous proxies as super-events -# -from Rpyc.Factories import SocketConnection, Async -from Rpyc.Utils import * - -c = SocketConnection("localhost") - -# -# this is the remote int type -# -rint = c.modules.__builtin__.int - -# -# and we'll wrap it in an asynchronous wrapper -# -rint = Async(rint) - -# -# now it still looks like a normal proxy... but operations on it return something called -# an AsyncResult -- it's an object that represents the would-be result of the operation. -# it has a .is_ready property, which indicates whether or not the result is ready, and -# a .result property, which holds the result of the operations. when you access the .result -# property, it will block until the result is returned -# -a = rint("123") -b = rint("metallica") -print a -print b.is_ready -print a.result -print a - -# -# and when an exception occurs, it looks like that -# -try: - b.result -except ValueError: - pass - -# -# only when you access the result you get the exception, which may look weird, but hey, -# it's an asynchronous world out there. -# - -# -# there's another methodology for async proxies -- on_ready callbacks. instead of -# getting the async result, you can register a callback to collect it, when it arrives. -# -def f(res): - print "the result is", - try: - print res.result - except: - print "an exception" - -rint = Async(c.modules.__builtin__.int) - -ar = rint("123") -ar.on_ready = f - -# this will cause an exception -ar = rint("a perfect circle") -ar.on_ready = f - -# or when you dont need to keep the async result -rint("456").on_ready = f - -# and it's not limited to calling it. anything you do to the async proxy is asynchronous. -# for example, you can also get attributes asynchronously: -ar = rint.__str__ - -# -# now we'll do some other request, which will cause the results to arrive, and the callback -# to be called. -# -print c.modules.sys - -############################################################################################ -# -# this is where we get hardcore: threads and event callbacks -# -xxx = 0 -def blah(): - global xxx - xxx += 1 - -# -# we'll start a thread on the server which on threadfunc (which is defined in the testmodule). -# this function will call the callback we give it every second, but will ignore the result. -# this practically means it's like an event -- trigger and forget. on the client side, the -# callback will increment `xxx` every time it's called -# -c.modules.thread.start_new_thread(c.modules.Rpyc.Demo.testmodule.threadfunc, (blah,)) - -# -# we'll wait a little -# -import time -time.sleep(5) - -# -# and do some operation, which, along with it, will pull all incoming requests -# -print c.modules.sys -print xxx - -# -# and we can start a thread of our own to pull the requests in the background -# -import thread -worker_running = True - -def worker(conn): - while worker_running: - conn.serve() - -thread.start_new_thread(worker, (c,)) - -time.sleep(5) -worker_running = False - -print xxx -print "goodbye" - -# -# L33TN3SS -# - - diff --git a/Rpyc/Demo/demo-4.py b/Rpyc/Demo/demo-4.py deleted file mode 100644 index 1a651ae..0000000 --- a/Rpyc/Demo/demo-4.py +++ /dev/null @@ -1,41 +0,0 @@ -import time -from Rpyc.Factories import SocketConnection, Async - -c = SocketConnection("localhost") -c2 = SocketConnection("localhost") - -huge_xml = " " * 50000 + " " * 50000 -parseString = Async(c.modules.xml.dom.minidom.parseString) -res = parseString(huge_xml) - -print "while we're waiting for the server to complete, we do other stuff" -t = time.time() -while not res.is_ready: - time.sleep(0.5) - # we dont want to use `c`, because it would block us (as the server is blocking) - # but `c2` runs on another thread/process, so it wouldn't block - print c2.modules.os.getpid() - -t = time.time() - t -print "it took %d seconds" % (t,) - -print res.result - - -# -# note: to improve performance, delete the result when you no longer need it. -# this should be done because the server might (as in this case) hold enormous -# amounts of memory, which will slow it down -# -# if you do this: -# res = parseString(huge_xml) -# res = parseString(huge_xml) -# res will be deleted only after the second operation finishes, because only when -# the second result is assigned, the first is released -- server still holds -# around 160MB of the old xml tree for nothing. so it's a good idea to `del res` -# when you dont need it. -# -# also, there's a memory leak on the server, which i'm working on solving. -# - - diff --git a/Rpyc/Demo/demo-5.py b/Rpyc/Demo/demo-5.py deleted file mode 100644 index 4ea4491..0000000 --- a/Rpyc/Demo/demo-5.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# this demo will show you working with asynch proxies and callback -# verison 2.3 removes the AsyncCallback factory, and instead provides a mechanism -# where async results can provide a callback. it simplifies the design, so i -# went for it. -# -import time -from Rpyc.Factories import SocketConnection, Async - -c1 = SocketConnection("localhost") - -# f1 is an async proxy to the server's sleep function -f1 = Async(c1.modules.time.sleep) - -# this would block the server for 9 seconds -r1 = f1(9) -# and this would block it for 11 -r2 = f1(11) - -# of course the client isnt affected (that's the whole point of Async) -# but since the same server can't block simultaneously, the second request is -# queued. this is a good example of queuing. - -# now we'll wait for both results to finish. this should print around 20 lines -# (more or less, depending on the phase) -while not r1.is_ready or not r2.is_ready: - print "!" - time.sleep(1) - -print "---" - -# now we'll dig in the h4xx0r shit -- running things simultaneously -# for this, we'll need another connection, and another proxy: -c2 = SocketConnection("localhost") -f2 = Async(c2.modules.time.sleep) - -# now we'll do the same as the above, but this time, it will happen simulatenously -# becuase f1 and f2 work on different connections -r1 = f1(9) -r2 = f2(11) - -# so this time, it will print around 11 lines -while not r1.is_ready or not r2.is_ready: - print "!" - time.sleep(1) - -print "---" - -# very haxxor indeed. now, we'll see how to use the on_ready callback -r1 = f1(9) -r2 = f2(11) - -def blah(res): - print "look mama, no hands! res = %r" % (res.result,) - -# set the on_ready callback -- when r1 is becomes ready, the callback will -# be called automagically -r1.on_ready = blah - -# this should print 9 "!", then "look mama", then two more "!" -while not r1.is_ready or not r2.is_ready: - print "!" - time.sleep(1) - - - diff --git a/Rpyc/Demo/demo-6.py b/Rpyc/Demo/demo-6.py deleted file mode 100644 index 1c34039..0000000 --- a/Rpyc/Demo/demo-6.py +++ /dev/null @@ -1,130 +0,0 @@ -# as you can see - the import line now requires even less typing! -from Rpyc import * -c = SocketConnection("localhost") - -#------------------------------------------------------------------------------ -# this demo shows the new `execute` and `namespace` features of rpyc -#------------------------------------------------------------------------------ - - -# the code below will run AT THE OTHER SIDE OF THE CONNECTION... so you'll see -# 'hello world' on the server's console -c.execute("print 'hello world'") - -import sys -c.modules.sys.stdout = sys.stdout - -# and this time, on our console -c.execute("print 'brave new world'") - -# restore that -c.modules.sys.stdout = c.modules.sys.__stdout__ - -# anyway, the `execute` method runs the given code at the other side of the connection -# and works in the `namespace` dict. what? -c.execute("x = [1,2,3]") -print c.namespace.x - -# now it makes sense, doesn't it? the 'namespace' attribute is something i called -# AttrFrontend -- it wraps a dict with the attribute protocol, so you can access -# it with the dot notation, instead of the braces notation (more intuitive). -# this namespace works both ways -- executing code affects the namespace, while -# altering the namespace directly also affects it: -c.namespace.x.append(4) -c.execute("x.append(5)") -print c.namespace.x - -# but you should not assign complex objects (not int/float/str, etc) to this namespace -# directy, or NetProxies will be created. there's nothing wrong with that, but keep -# in mind it might cause blocking (and even deadlocks), as i'll explain later. - -# another cool thing i want to show is the second, optional parameter to execute: mode. -# the mode controls how the code is compiled. the default mode is "exec", which means -# it executes the code as a module. the other option is "eval" which returns a value. -# so if you want to _do_ something, like printing of assigning a variable, you do it -# with "exec", and if you want to evaluate something, you do it with "eval" -# for example: - -# this will print None -print c.execute("1+2") - -# while this will print 3 -print c.execute("1+2", "eval") - -# but there's a time in a man's life when he asks himself, why the heck? you can, as i -# showed in other places, just do this: -# c.modules.__builtin__.eval("1+2") -# so what's the point? -# -# well, i've been waiting for this question. the rationale behind this seemingly useless -# feature is for times you NEED to have the code executing remotely, but writing a -# dedicated module for it is overdoing it: -# * more files to update ==> more chance that you'll forget to update -# * distributing the module to all of the machines -# * making a mess on the file system -# * it's really not a module... it's just some code that logically belongs to one single -# module, but technical difficulties prevent it -# -# and to show you what i mean -- i want to start a thread on the server, like it did in -# several places over the demos. this thread will send me an event every second. what i -# used to do was, creating another module, like testmodule.py to define the thread -# function, so it will exist on the server, and i could call it. -# if i defined thread_func at the client side, then the thread will block when trying -# to execute the code, because the client holds it. so this new mechanism lets you -# distribute code in a volatile fashion: -# * when the connection is closed, everything you defined is gone -# * no file-system mess -# * no need to distribute files across the network -# * only one place to maintain - -c.execute(""" -my_thread_active = True - -def my_thread_func(callback): - import time - from Rpyc import Async - - callback = Async(callback) - while my_thread_active: - callback(time.time()) - time.sleep(1) - print "the thread says goodbye" -""") - -def callback(timestamp): - print "the timestamp is", timestamp - -c.modules.thread.start_new_thread(c.namespace.my_thread_func, (callback,)) -c.modules.time.sleep(5) -c.namespace.my_thread_active = False -c.close() - -# it's not only for threads of course. there are many times when you NEED the code/objects -# on the remote side. for example: -# * situations that would block (like having the thread func on the client) -# * code that check the type of the object (type or isinstance), and a NetProxy would make -# it cry. DONT CHECK THE TYPE OF OBJECTS, PEOPLE, JUST USE THEM! that's why they invented -# duck-typing. argh. -# * other places i didnt think of as of yet. i want to sleep. leave me alone ;) zzzZZZ -# -# so enjoy! - - - - - - - - - - - - - - - - - - - - diff --git a/Rpyc/Demo/pipe-child.py b/Rpyc/Demo/pipe-child.py deleted file mode 100644 index 517d0ef..0000000 --- a/Rpyc/Demo/pipe-child.py +++ /dev/null @@ -1,8 +0,0 @@ -import sys -from Rpyc import PipeConnection - -c = PipeConnection(sys.stdin, sys.stdout) -c.modules.sys.path.append("i love lucy") - - -# child dies \ No newline at end of file diff --git a/Rpyc/Demo/pipe-parent.py b/Rpyc/Demo/pipe-parent.py deleted file mode 100644 index bd8cc89..0000000 --- a/Rpyc/Demo/pipe-parent.py +++ /dev/null @@ -1,17 +0,0 @@ -# a demo for parent/child over pipes - -import sys -from popen2 import popen3 -from Rpyc import PipeConnection - -cout, cin, cerr = popen3("python pipe-child.py") -conn = PipeConnection(cout, cin) - -try: - while True: - conn.serve() -except EOFError: - print "goodbye child" - -print sys.path[-1] - diff --git a/Rpyc/Demo/testmodule.py b/Rpyc/Demo/testmodule.py deleted file mode 100644 index 20d2bc9..0000000 --- a/Rpyc/Demo/testmodule.py +++ /dev/null @@ -1,19 +0,0 @@ -import time -from Rpyc.Factories import Async - -def threadfunc(callback): - """this function will call the callback every second""" - callback = Async(callback) - try: - while True: - callback() - time.sleep(1) - except: - print "thread exiting" - -def printer(text): - print text - -def caller(func, *args): - func(*args) - \ No newline at end of file diff --git a/Rpyc/Demo/testsuite.bat b/Rpyc/Demo/testsuite.bat deleted file mode 100644 index fa46892..0000000 --- a/Rpyc/Demo/testsuite.bat +++ /dev/null @@ -1,6 +0,0 @@ -python demo-1.py -python demo-2.py -python demo-3.py -python demo-4.py -python demo-5.py -python demo-6.py diff --git a/Rpyc/Factories.py b/Rpyc/Factories.py deleted file mode 100644 index 9de3b69..0000000 --- a/Rpyc/Factories.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -the factory: -exposes a nice and easy interface to the internals of rpyc. -this module, along with Utils, are the only modules most clients will need. -""" - -from Stream import SocketStream, PipeStream -from Channel import Channel -from Connection import Connection -from AsyncNetProxy import AsyncNetProxy -from weakref import WeakValueDictionary -from Lib import DEFAULT_PORT - - -__all__ = ["SocketConnection", "AuthSocketConnection", "PipeConnection", "Async"] -_async_proxy_cache = WeakValueDictionary() - -class LoginError(Exception): - pass - -def SocketConnection(host, port = DEFAULT_PORT, **kw): - """shorthand for creating a conneciton over a socket to a server""" - return Connection(Channel(SocketStream.from_new_socket(host, port, **kw))) - -def _create_auth_connection(chan, username, password): - from Authentication import login - if not login(chan, username, password): - raise LoginError("the server did not accept the login") - return Connection(chan) - -def AuthSocketConnection(host, username, password, port = DEFAULT_PORT, **kw): - """shorthand for creating a conneciton over a socket to a server, with authentication""" - chan = Channel(SocketStream.from_new_socket(host, port, **kw)) - return _create_auth_connection(chan, username, password) - -def PipeConnection(incoming, outgoing): - """shorthand for creating a conneciton over a pipe""" - return Connection(Channel(PipeStream(incoming, outgoing))) - -def AuthPipeConnection(incoming, outgoing, username, password): - """shorthand for creating a conneciton over a pipe""" - chan = Channel(PipeStream(incoming, outgoing)) - return _create_auth_connection(chan, username, password) - -def Async(proxy): - """a factory for creating asynchronous proxies for existing synchronous ones""" - key = id(proxy) - if key in _async_proxy_cache: - return _async_proxy_cache[key] - else: - new_proxy = AsyncNetProxy(proxy) - _async_proxy_cache[key] = new_proxy - return new_proxy - - - - diff --git a/Rpyc/Lib.py b/Rpyc/Lib.py deleted file mode 100644 index bff6957..0000000 --- a/Rpyc/Lib.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -shared types, functions and constants -""" -import sys - - -DEFAULT_PORT = 18812 - -def raise_exception(typ, val, tbtext): - """a helper for raising remote exceptions""" - if type(typ) == str: - raise typ - else: - val._remote_traceback = tbtext - raise val - -class ImmDict(object): - """immutable dict (passes by value)""" - def __init__(self, dict): - self.dict = dict - def items(self): - return self.dict.items() - -class AttrFrontend(object): - """a wrapper that implements the attribute protocol for a dict backend""" - def __init__(self, dict): - self.__dict__["____dict"] = dict - def __delattr__(self, name): - del self.__dict__["____dict"][name] - def __getattr__(self, name): - return self.__dict__["____dict"][name] - def __setattr__(self, name, value): - self.__dict__["____dict"][name] = value - def __repr__(self): - return "" % (self.__dict__["____dict"].keys(),) - -# installs an rpyc-enabled exception hook. this happens automatically when the module -# is imported. also, make sure the current excepthook is the original one, so we dont -# install our hook twice (and thus cause infinite recursion) in case the module is reloaded -def rpyc_excepthook(exctype, value, traceback): - if hasattr(value, "_remote_traceback"): - print >> sys.stderr, "======= Remote traceback =======" - print >> sys.stderr, value._remote_traceback - print >> sys.stderr, "======= Local exception =======" - orig_excepthook(exctype, value, traceback) - else: - orig_excepthook(exctype, value, traceback) - -if sys.excepthook.__name__ != "rpyc_excepthook": - orig_excepthook = sys.excepthook - sys.excepthook = rpyc_excepthook - - diff --git a/Rpyc/ModuleNetProxy.py b/Rpyc/ModuleNetProxy.py deleted file mode 100644 index 3445088..0000000 --- a/Rpyc/ModuleNetProxy.py +++ /dev/null @@ -1,46 +0,0 @@ -from NetProxy import NetProxyWrapper - - -class ModuleNetProxy(NetProxyWrapper): - """a netproxy specialzied for exposing remote modules (first tries to getattr, - if it fails tries to import)""" - __doc__ = NetProxyWrapper.__doc__ - - def __init__(self, proxy, base): - NetProxyWrapper.__init__(self, proxy) - self.__dict__["____base"] = base - self.__dict__["____cache"] = {} - - def __request__(self, handler, *args): - return self.__dict__["____conn"].sync_request(handler, self.__dict__["____oid"], *args) - - def __getattr__(self, name): - if name in self.__dict__["____cache"]: - return self.__dict__["____cache"][name] - - try: - return self.__request__("handle_getattr", name) - except AttributeError: - pass - - try: - fullname = self.__dict__["____base"] + "." + name - obj = self.__dict__["____conn"].rimport(fullname) - module = ModuleNetProxy(obj, fullname) - self.__dict__["____cache"][name] = module - return module - except ImportError: - raise AttributeError("'module' object has not attribute or submodule %r" % (name,)) - -class RootImporter(object): - """the root of the interpreter's import hierarchy""" - - def __init__(self, conn): - self.__dict__["____conn"] = conn - - def __getitem__(self, name): - return self.__dict__["____conn"].rimport(name) - - def __getattr__(self, name): - return ModuleNetProxy(self[name], name) - diff --git a/Rpyc/NetProxy.py b/Rpyc/NetProxy.py deleted file mode 100644 index 96ed53e..0000000 --- a/Rpyc/NetProxy.py +++ /dev/null @@ -1,119 +0,0 @@ -from Lib import ImmDict - - -def fully_dynamic_metaclass(name, bases, dict): - """ - a meta class that enables special methods to be accessed like regular names - (via __getattr__), like it used to be in old-style classes. - """ - - special_methods = [ - "__hash__", "__len__", "__iter__", "next", "__reversed__", - "__add__", "__iadd__", "__radd__", "__sub__", "__isub__", "__rsub__", "__mul__", - "__imul__", "__rmul__", "__div__", "__idiv__", "__rdiv__", "__truediv__", - "__itruediv__", "__rtruediv__", "__floordiv__", "__ifloordiv__", "__rfloorfiv__", - "__pow__", "__ipow__", "__rpow__", "__lshift__", "__ilshift__", "__rlshift__", - "__rshift__", "__irshift__", "__rrshift__", "__and__", "__iand__", "__rand__", - "__or__", "__ior__", "__ror__", "__xor__", "__ixor__", "__rxor__", "__mod__", - "__imod__", "__rmod__", "__divmod__", "__idivmod__", "__rdivmod__", "__pos__", - "__neg__", "__int__", "__float__", "__long__", "__oct__", "__hex__", "__coerce__", - "__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__", "__cmp__", - ] - - # i added '__class__' to the special attributes, but it broke some things - # (like dir), so we'll have to live without it - special_attributes = ["__doc__", "__module__"] - - def make_method(name): - def caller(self, *a, **k): - return self.__getattr__(name)(*a, **k) - return caller - - def make_property(name): - def getter(self): - return self.__getattr__(name) - def setter(self, value): - self.__setattr__(name, value) - def deller(self): - self.__delattr__(name) - return property(getter, setter, deller) - - classdict = {} - for sn in special_methods: - classdict[sn] = make_method(sn) - classdict.update(dict) - for sa in special_attributes: - classdict[sa] = make_property(sa) - return type(name, bases, classdict) - -class NetProxy(object): - """NetProxy - convey local operations to the remote object. this is an abstract class""" - __metaclass__ = fully_dynamic_metaclass - - def __init__(self, conn, oid): - self.__dict__["____conn"] = conn - self.__dict__["____oid"] = oid - - def __request__(self, handler, *args): - raise NotImplementedError() - - def __call__(self, *args, **kwargs): - return self.__request__("handle_call", args, ImmDict(kwargs)) - - def __delattr__(self, *args): - return self.__request__("handle_delattr", *args) - def __getattr__(self, *args): - return self.__request__("handle_getattr", *args) - def __setattr__(self, *args): - return self.__request__("handle_setattr", *args) - - def __delitem__(self, *args): - return self.__request__("handle_delitem", *args) - def __getitem__(self, *args): - return self.__request__("handle_getitem", *args) - def __setitem__(self, *args): - return self.__request__("handle_setitem", *args) - - # special cases - def __repr__(self, *args): - return self.__request__("handle_repr", *args) - def __str__(self, *args): - return self.__request__("handle_str", *args) - def __nonzero__(self, *args): - return self.__request__("handle_bool", *args) - -class NetProxyWrapper(NetProxy): - """a netproxy that wraps an inner netproxy""" - __doc__ = NetProxy.__doc__ - - def __init__(self, proxy): - NetProxy.__init__(self, proxy.__dict__["____conn"], proxy.__dict__["____oid"]) - # we must keep the original proxy alive - self.__dict__["____original_proxy"] = proxy - -def _dummy_callback(*args, **kw): - pass - -class SyncNetProxy(NetProxy): - """the default, synchronous netproxy""" - __doc__ = NetProxy.__doc__ - - def __init__(self, conn, oid): - NetProxy.__init__(self, conn, oid) - self.__dict__["____conn"].sync_request("handle_incref", self.__dict__["____oid"]) - - def __del__(self): - try: - # decref'ing is done asynchronously, because we dont need to wait for the remote - # object to die. moreover, we dont care if it fails, because that would mean the - # connection is broken, so the remote object is already dead - self.__dict__["____conn"].async_request(_dummy_callback, "handle_decref", self.__dict__["____oid"]) - except: - pass - - def __request__(self, handler, *args): - return self.__dict__["____conn"].sync_request(handler, self.__dict__["____oid"], *args) - - - - diff --git a/Rpyc/Servers/ServerUtils.py b/Rpyc/Servers/ServerUtils.py deleted file mode 100644 index e9b361d..0000000 --- a/Rpyc/Servers/ServerUtils.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -import socket -import sys -import gc -from threading import Thread -from Rpyc.Connection import Connection -from Rpyc.Stream import SocketStream, PipeStream -from Rpyc.Channel import Channel -from Rpyc.Lib import DEFAULT_PORT - - -class Logger(object): - def __init__(self, logfile = None, active = True): - self.logfile = logfile - self.active = active - def __call__(self, *args): - if not self.logfile: - return - if not self.active: - return - text = " ".join([str(a) for a in args]) - self.logfile.write("[%d] %s\n" % (os.getpid(), text)) - self.logfile.flush() - -log = Logger(sys.stdout) - -def _serve(chan): - conn = Connection(chan) - try: - try: - while True: - conn.serve() - except EOFError: - pass - finally: - conn.close() - gc.collect() - -def serve_stream(stream, authenticate = False, users = None): - chan = Channel(stream) - - if authenticate: - from Rpyc.Authentication import accept - log("requiring authentication") - if accept(chan, users): - log("authenication successful") - else: - log("authentication failed") - return - - _serve(chan) - -def create_listener_socket(port): - sock = socket.socket() - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #sock.bind(("", port)) - sock.bind(("localhost", port)) - sock.listen(4) - log("listening on", sock.getsockname()) - return sock - -def serve_socket(sock, **kw): - sockname = sock.getpeername() - log("welcome", sockname) - try: - try: - serve_stream(SocketStream(sock), **kw) - except socket.error: - pass - finally: - log("goodbye", sockname) - -def serve_pipes(incoming, outgoing, **kw): - serve_stream(PipeStream(incoming, outgoing), **kw) - -def threaded_server(port = DEFAULT_PORT, **kwargs): - sock = create_listener_socket(port) - while True: - newsock, name = sock.accept() - t = Thread(target = serve_socket, args = (newsock,), kwargs = kwargs) - t.setDaemon(True) - t.start() - -def start_threaded_server(*args, **kwargs): - """starts the threaded_server on a separate thread. this turns the - threaded_server into a mix-in you can place anywhere in your code""" - t = Thread(target = threaded_server, args = args, kwargs = kwargs) - t.setDaemon(True) - t.start() - diff --git a/Rpyc/Servers/__init__.py b/Rpyc/Servers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/Rpyc/Servers/auth_server.py b/Rpyc/Servers/auth_server.py deleted file mode 100644 index b13fb20..0000000 --- a/Rpyc/Servers/auth_server.py +++ /dev/null @@ -1,19 +0,0 @@ -from ServerUtils import DEFAULT_PORT, threaded_server - -# -# define user:password pairs of your own -# -users = { - "johnnash" : "t3hgam3r", - "tolkien" : "1ring", - "yossarian" : "catch22", -} - -def main(port = DEFAULT_PORT): - threaded_server(port, authenticate = True, users = users) - -if __name__ == "__main__": - main() - - - diff --git a/Rpyc/Servers/forking_server.py b/Rpyc/Servers/forking_server.py deleted file mode 100644 index bfe78c8..0000000 --- a/Rpyc/Servers/forking_server.py +++ /dev/null @@ -1,26 +0,0 @@ -import sys -import os -from ServerUtils import serve_socket, create_listener_socket, DEFAULT_PORT - - -def serve_in_child(sock): - """forks a child to run the server in. the parent doesnt wait() for the child, - so if you do a `ps`, you'll see zombies. but who cares. i used to do a doublefork() - for that, but it's really meaningless. anyway, when the parent dies, the zombies - die as well.""" - if os.fork() == 0: - try: - serve_socket(sock) - finally: - sys.exit() - -def main(port = DEFAULT_PORT): - sock = create_listener_socket(port) - while True: - newsock, name = sock.accept() - serve_in_child(newsock) - -if __name__ == "__main__": - main() - - \ No newline at end of file diff --git a/Rpyc/Servers/selecting_server.py b/Rpyc/Servers/selecting_server.py deleted file mode 100644 index 0136d63..0000000 --- a/Rpyc/Servers/selecting_server.py +++ /dev/null @@ -1,29 +0,0 @@ -import select -import socket -from ServerUtils import log, create_listener_socket, DEFAULT_PORT, SocketStream, Connection - - -def main(port = DEFAULT_PORT): - sock = create_listener_socket(port) - connections = [] - - while True: - rlist, wlist, xlist = select.select([sock] + connections, [], []) - - if sock in rlist: - rlist.remove(sock) - newsock, name = sock.accept() - conn = Connection(SocketStream(newsock)) - conn.sockname = name - connections.append(conn) - log("welcome", conn.sockname) - - for conn in rlist: - try: - conn.serve() - except (EOFError, socket.error): - connections.remove(conn) - log("goodbyte", conn.sockname) - -if __name__ == "__main__": - main() diff --git a/Rpyc/Servers/simple_server.py b/Rpyc/Servers/simple_server.py deleted file mode 100644 index 60315a5..0000000 --- a/Rpyc/Servers/simple_server.py +++ /dev/null @@ -1,13 +0,0 @@ -from ServerUtils import serve_socket, create_listener_socket, DEFAULT_PORT - - -def main(port = DEFAULT_PORT): - sock = create_listener_socket(port) - while True: - newsock, name = sock.accept() - serve_socket(newsock) - -if __name__ == "__main__": - main() - - \ No newline at end of file diff --git a/Rpyc/Servers/std_server.py b/Rpyc/Servers/std_server.py deleted file mode 100644 index c556b83..0000000 --- a/Rpyc/Servers/std_server.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -# -# installation instructions -# * add a service in /etc/services for rpyc: tcp port 18812 -# * add "rpyc .... /usr/lib/pythonXX/site-packages/Rpyc/Servers/std_server.py" -# to /etc/inetd.conf (i dont remember syntax, rtfm) -# * dont forget to chmod +x this file -# * restart inetd with sighup -# -import sys -import time -from traceback import format_exception -from ServerUtils import log, serve_pipes - - -def main(filename = "/tmp/rpyc-server.log"): - log.logfile = open(filename, "a") - log("-" * 80) - log("started serving at", time.asctime()) - try: - try: - serve_pipes(sys.stdin, sys.stdout) - except: - log(*format_exception(*sys.exc_info())) - finally: - log("server exits at", time.asctime()) - -if __name__ == "__main__": - main() - - \ No newline at end of file diff --git a/Rpyc/Servers/threaded_server.py b/Rpyc/Servers/threaded_server.py deleted file mode 100644 index bc65605..0000000 --- a/Rpyc/Servers/threaded_server.py +++ /dev/null @@ -1,9 +0,0 @@ -from ServerUtils import DEFAULT_PORT, threaded_server - - -def main(port = DEFAULT_PORT): - threaded_server(port) - -if __name__ == "__main__": - main() - diff --git a/Rpyc/Stream.py b/Rpyc/Stream.py deleted file mode 100644 index 66568b6..0000000 --- a/Rpyc/Stream.py +++ /dev/null @@ -1,117 +0,0 @@ -import select -import socket - - -class Stream(object): - """ - a stream is a file-like object that is used to expose a consistent and uniform interface - to the 'physical' file-like objects (like sockets and pipes), which have many quirks (sockets - may recv() less than `count`, pipes are simplex and don't flush, etc.). - a stream is always in blocking mode. - """ - - def close(self): - raise NotImplementedError() - - def fileno(self): - raise NotImplementedError() - - def is_available(self): - rlist, wlist, xlist = select.select([self], [], [], 0) - return bool(rlist) - - def wait(self): - select.select([self], [], []) - - def read(self, count): - raise NotImplementedError() - - def write(self, data): - raise NotImplementedError() - - -class SocketStream(Stream): - """ - a stream that operates over a socket. note: - * the socket is expected to be reliable (i.e., TCP) - * the socket is expected to be in blocking mode - """ - def __init__(self, sock): - self.sock = sock - - def __repr__(self): - host, port = self.sock.getpeername() - return "<%s(%s:%d)>" % (self.__class__.__name__, host, port) - - def from_new_socket(cls, host, port, **kw): - sock = socket.socket(**kw) - sock.connect((host, port)) - return cls(sock) - from_new_socket = classmethod( from_new_socket ) - - def fileno(self): - return self.sock.fileno() - - def close(self): - self.sock.close() - - def read(self, count): - data = [] - while count > 0: - buf = self.sock.recv(count) - if not buf: - raise EOFError() - count -= len(buf) - data.append(buf) - return "".join(data) - - def write(self, data): - while data: - count = self.sock.send(data) - data = data[count:] - - -class PipeStream(Stream): - """ - a stream that operates over two simplex pipes. - note: the pipes are expected to be in blocking mode - """ - - def __init__(self, incoming, outgoing): - self.incoming = incoming - self.outgoing = outgoing - - def fileno(self): - return self.incoming.fileno() - - def close(self): - self.incoming.close() - self.outgoing.close() - - def read(self, count): - data = [] - while count > 0: - buf = self.incoming.read(count) - if not buf: - raise EOFError() - count -= len(buf) - data.append(buf) - return "".join(data) - - def write(self, data): - self.outgoing.write(data) - self.outgoing.flush() - - # win32: stubs - import sys - if sys.platform == "win32": - def is_available(self): - return True - - def wait(self): - pass - - - - - diff --git a/Rpyc/Utils.py b/Rpyc/Utils.py deleted file mode 100644 index 00963c2..0000000 --- a/Rpyc/Utils.py +++ /dev/null @@ -1,265 +0,0 @@ -""" -convenience utilities: - * provides dir(), getattr(), hasattr(), help() and reload() that support netproxies - * provides obtain() for really transfering remote objects - * provides upload() and download() for working with files - * provides a nice interface for remote shell operations (like os.system) - and a openning a remote python interpreter (for debugging, etc.) - -i removed lots of stuff from the __all__, keeping only the useful things, -so that import * wouldnt mess your namespace too much -""" -import __builtin__ -import sys -import os -import inspect -from NetProxy import NetProxy - -__all__ = [ - "dir", "getattr", "hasattr", "help", "reload", "obtain", - "upload", "download", -] - -CHUNK_SIZE = 4096 - -class UtilityError(Exception): - pass - -# -# working with netproxies -# -def dir(*obj): - """a version of dir() that supports NetProxies""" - if not obj: - inspect_stack = [inspect.stack()[1][0].f_locals.keys()] - inspect_stack.sort() - return inspect_stack - if not len(obj) == 1: - raise TypeError("dir expected at most 1 arguments, got %d" % (len(obj),)) - obj = obj[0] - if isinstance(obj, NetProxy): - return obj.__dict__["____conn"].modules["__builtin__"].dir(obj) - else: - return __builtin__.dir(obj) - -def getattr(obj, name, *default): - """a version of getattr() that supports NetProxies""" - if len(default) > 1: - raise TypeError("getattr expected at most 3 arguments, got %d" % (2 + len(default),)) - if isinstance(obj, NetProxy): - try: - return obj.__getattr__(name) - except AttributeError: - if not default: - raise - return default[0] - else: - return __builtin__.getattr(obj, name, *default) - -def hasattr(obj, name): - """a version of hasattr() that supports NetProxies""" - try: - getattr(obj, name) - except AttributeError: - return False - else: - return True - -class _Helper(object): - """a version of help() that supports NetProxies""" - def __repr__(self): - return repr(__builtin__.help) - def __call__(self, obj = None): - if isinstance(obj, NetProxy): - print "Help on NetProxy object for an instance of %r:" % (obj.__getattr__("__class__").__name__,) - print - print "Doc:" - print obj.__doc__ - print - print "Members:" - print dir(obj) - else: - __builtin__.help(obj) -help = _Helper() - -def reload(module): - """a version of reload() that supports NetProxies""" - if isinstance(module, NetProxy): - return module.__dict__["____conn"].modules["__builtin__"].reload(module) - else: - return __builtin__.reload(module) - -def obtain(proxy): - """transfers a remote object to this process. this is done by pickling it, so it - must be a picklable object, and should be immutable (otherwise the local object - may be different from the remote one, which may cause problems). generally speaking, - you should not obtain remote objects, as NetProxies provide a stronger mechanism. - but there are times when you want to get the real object in your hand, for pickling - it locally (e.g., storing test results in a file), or if the connection is too slow.""" - import cPickle - dumped = proxy.__dict__["____conn"].modules.cPickle.dumps(proxy) - return cPickle.loads(dumped) - -def getconn(obj): - """returns the connection of a NetProxy""" - if "____conn" not in obj.__dict__: - raise TypeError("`obj` is not a NetProxy") - return proxy.__dict__["____conn"] - -# -# working with files -# -def upload(conn, localpath, remotepath, *a, **k): - """uploads a file or a directory recursively (depending on what `localpath` is)""" - if os.path.isdir(localpath): - upload_dir(conn, localpath, remotepath, *a, **k) - elif os.path.isfile(localpath): - upload_file(conn, localpath, remotepath, *a, **k) - else: - raise UtilityError("can only upload files or directories") - -def download(conn, remotepath, localpath, *a, **k): - """downloads a file or a directory recursively (depending on what `remotepath` is)""" - if conn.modules.os.path.isdir(remotepath): - download_dir(conn, remotepath, localpath, *a, **k) - elif conn.modules.os.path.isfile(remotepath): - download_file(conn, remotepath, localpath, *a, **k) - else: - raise UtilityError("can only download files or directories") - -def upload_file(conn, localpath, remotepath): - lf = open(localpath, "rb") - rf = conn.modules.__builtin__.open(remotepath, "wb") - while True: - chunk = lf.read(CHUNK_SIZE) - if not chunk: - break - rf.write(chunk) - lf.close() - rf.close() - -def download_file(conn, remotepath, localpath): - lf = open(localpath, "wb") - rf = conn.modules.__builtin__.open(remotepath, "rb") - while True: - chunk = rf.read(CHUNK_SIZE) - if not chunk: - break - lf.write(chunk) - lf.close() - rf.close() - -def upload_dir(conn, localpath, remotepath, extensions = [""]): - if not conn.modules.os.path.exists(remotepath): - conn.modules.os.makedirs(remotepath) - for fn in os.listdir(localpath): - lfn = os.path.join(localpath, fn) - rfn = conn.modules.os.path.join(remotepath, fn) - if os.path.isdir(lfn): - upload_dir(conn, lfn, rfn, extensions) - elif os.path.isfile(lfn): - for ext in extensions: - if fn.endswith(ext): - upload_file(conn, lfn, rfn) - break - -def download_dir(conn, remotepath, localpath, extensions = [""]): - if not os.path.exists(localpath): - os.makedirs(localpath) - for fn in conn.modules.os.listdir(remotepath): - lfn = os.path.join(localpath, fn) - rfn = conn.modules.os.path.join(remotepath, fn) - if conn.modules.os.path.isdir(lfn): - download_dir(conn, rfn, lfn, extensions) - elif conn.modules.os.path.isfile(lfn): - for ext in extensions: - if fn.endswith(ext): - download_file(conn, rfn, lfn) - break - -# -# distributing modules between hosts -# -def upload_package(conn, module, remotepath = None): - """uploads the given package to the server, storing it in `remotepath`. if - remotepath is None, it defaults to the server's site-packages. if the package - already exists, it is overwritten. - usage: - import xml - upload_package(conn, xml)""" - if remotepath is None: - remotepath = conn.modules["distutils.sysconfig"].get_python_lib() - localpath = os.path.dirname(module.__file__) - upload_dir(conn, localpath, remotepath, [".py", ".pyd", ".dll", ".so", ".zip"]) - -def update_module(conn, module): - """updates an existing module on the server. the local module is transfered to the - server, overwriting the old one, and is reloaded. - usage: - import xml.dom.minidom - upload_module(conn, xml.dom.minidom)""" - remote_module = conn.modules[module.__name__] - local_file = inspect.getsourcefile(module) - remote_file = inspect.getsourcefile(remote_module) - upload_file(conn, local_filem, remote_file) - reload(remote_module) - -# -# remote shell and interpreter -# -def _redirect_std(conn): - rsys = conn.modules.sys - orig = (rsys.stdin, rsys.stdout, rsys.stderr) - rsys.stdin = sys.stdin - rsys.stdout = sys.stdout - rsys.stderr = sys.stderr - return orig - -def _restore_std(conn, (stdin, stdout, stderr)): - rsys = conn.modules.sys - rsys.stdin = stdin - rsys.stdout = stdout - rsys.stderr = stderr - -def remote_shell(conn, command = None): - """runs the given command on the server, just like os.system, but takes care - of redirecting the server's stdout/stdin to the client""" - # BUG: on windows, there's a problem with redirecting the output of spawned command. - # it runs fine and all, just the client can't see the output. again, windows sucks. - if command is None: - if sys.platform == "win32": - command = "%COMSPEC%" - else: - command = "/bin/sh" - try: - orig = _redirect_std(conn) - return conn.modules.os.system(command) - finally: - _restore_std(conn, orig) - -def remote_interpreter(conn, namespace = None): - """starts an interactive interpreter on the server""" - if namespace is None: - #namespace = inspect.stack()[1][0].f_globals.copy() - #namespace.update(inspect.stack()[1][0].f_locals) - namespace = {"conn" : conn} - try: - orig = _redirect_std(conn) - return conn.modules["Rpyc.Utils"]._remote_interpreter_server_side(**namespace) - finally: - _restore_std(conn, orig) - -def _remote_interpreter_server_side(**namespace): - import code - namespace.update(globals()) - code.interact(local = namespace) - -def remote_post_mortem(conn): - """a version of pdb.pm() that operates on exceptions at the remote side of the connection""" - import pdb - pdb.post_mortem(c.modules.sys.last_traceback) - - - - - diff --git a/Rpyc/__init__.py b/Rpyc/__init__.py deleted file mode 100644 index 41784bb..0000000 --- a/Rpyc/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -RPyC -by tomer filiba (tomerfiliba at gmail dot com) -""" -from Factories import * -from Utils import * -from Factories import __all__ as Factories_exports -from Utils import __all__ as Utils_exports - -__all__ = Factories_exports + Utils_exports diff --git a/Rpyc/changelog.txt b/Rpyc/changelog.txt deleted file mode 100644 index 698b93e..0000000 --- a/Rpyc/changelog.txt +++ /dev/null @@ -1,129 +0,0 @@ -1.2: ------ -the first 1.XX release. supports synchronous operations - -1.21: ------ -bugfix release: fixed the problem with objects that don't have a __repr__ or -__str__ - -1.6: ------ -this version never came public. added thread synchronization and events, -which allow the server to inform the client of events, without blocking -for client response. - -2.2: ------ -non backwards-compatible! -first 2.XX release, added asynchronous proxies and thread synchronization -this has brought with it a new kind of server -- the threaded_server, -which performs well on both linux and windows. the older 'events' mechanism -was removed as asynchornous proxies are much better, and also allow -distributed computing. -also, added the Utils module, which provide many convenience functions. -in version 1.XX, i just overridden __builtin__.xxx, which was something -you might not have wanted. so now you do "from Utils import *" -also: revised demos -note: the selecing and simple servers and deprecated, and are there only -for systems that don't support threads (some older flavors of unix). - -knonw bugs: - * windows bug: the pipe parent/child dont work on windows - -2.22: ------ -some bug fixes to the servers, etc. -the selecting server turned out buggy. don't use it. -added a new demo - -known bugs: - * the selecting server - * windows bug: the Utils.remote_shell doesnt redirect the output of the -spawned command. - -2.25: ------ -fixed the selecting server -fixed a bug in download (the problem with copy-paste). thanks go to steff. -added two new utils: upload_package and update_module. they allow you to -upload packages to the server, and update and existing module. i dont think -they are very useful, but what the heck. - -2.26: ------ -fixed a bug when the remote object does not provide __nonzero__ - -2.30: ------ -fixed several minor bugs (mostly semantic) -added protection for installing excepthook more than once -added the `obtain` funcion, which "brings forth" a remote object -added remote_post_mortem (debugging the traceback of a remote exception) -added an optional callback for Async proxies (which is called when they are ready) -therefore, the AsyncCallback mechanism was removed. -changed demo-3 and added demo-5. -fixed a bug: __del__ should not be synchronous -connection objects now have a `remote_conn` property, letting you mess around with -the remote side of the connection - -2.31: ------ -when you close() a connection, it releases all resources as well (this is done to -untangle cyclic-dependencies and make the gc happy) -thanks to that, servers now release resources better and sooner - -2.32: ------ -fixed a bug in __all__ of factories.py -removed all the stuff from __init__.py (just useless) -cleanups -semantic corrections - -2.35: ------ -fixed a potential bug in Connection.close -converted FullyDyanmicMetaClass to a function (instead of a class). metaclasses are -too magical by themselves, so i didn't want to over-do it. this caused trouble with -__doc__ of netproxies, but i found a way around it. -added docstrings throughout the code -updated the servers -i will add an ssl-enabled server when i have time -w00t! rpyc is getting very stable (no real bugs) - -2.36: ------ -added 'threaded_server' to ServerUtils. it's a mix-in you can use wherever you -like, instead of writing a server by yourself. -improved the logging mechanism of ServerUtils - -2.40: ------ -added imports to __init__.py, so you can now do "from Rpyc import *". this -is backwards compatible however, "from Rpyc.Factories import SocketConnection" -still works. -cleaned a little the __all__ of Utils.py -new feature: added 'execute' and 'namespace'. 'execute' lets you execute code -on the remote side of the connection, and 'namespace' is the namespace in which -'execute' evaluates. -added demo-6.py to show how to use it -fixed demo-2.py (now that remote_interpreter isn't a part of Utils.__al__) - -2.45: ------ -cleanups: improved the unboxing of ImmDicts, some other minor things -bugfix: PipeStream.write expected write to return the number of bytes written, -as is the case with sockets. this is not the case with file objects, however, -which meant the operation blocked indefinitely. thanks to rotem yaari for -reporting the bug -this also solves a long time bug with windows: the pipe demo now works with -windows. i had to stub stream.wait() and stream.is_available() on windows, -so it's less efficient and Async wouldn't work properly, but that's life. -changed a little the semantics of Stream and Channel -added authentication: this will make several people happy. the auth_server -was added, which supports authentication, so you can run the server over the -internet. only authentication is added, not encryption. you are still encouraged -to use SSL/VPNs. this was added because many people had trouble with python SSL. - - - diff --git a/automate-default.sh b/automate-default.sh index c0b4b7b..3354a69 100755 --- a/automate-default.sh +++ b/automate-default.sh @@ -26,7 +26,7 @@ if [ -f $MONITOR_PID ] ; then PID=`cat $MONITOR_PID` rm -f $MONITOR_PID if [ -z $PID ] ; then - ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID + ${MONITOR_SCRIPT_ROOT}/tools/kill.cmd.sh $PID echo "done." else echo "No PID to be killed." @@ -62,21 +62,19 @@ fi #TODO: should add a call to ssh-add -l to check if the keys are loaded or not. source ${MONITOR_SCRIPT_ROOT}/agent.sh -#${MONITOR_SCRIPT_ROOT}/checksync.py $DATE || : -${MONITOR_SCRIPT_ROOT}/syncwithplc.py $DATE || : +${MONITOR_SCRIPT_ROOT}/tools/syncwithplc.py $DATE || : service plc restart monitor echo "Performing FindAll Nodes" ######################### # 1. FINDBAD NODES -${MONITOR_SCRIPT_ROOT}/findall.py $DATE || : +${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || : ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || : # clean up stray 'locfg' processes that hang around inappropriately... ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || : ${MONITOR_SCRIPT_ROOT}/policy.py $DATE || : -${MONITOR_SCRIPT_ROOT}/statistics/add-record.py || : curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || : cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log diff --git a/blacklist.py b/commands/blacklist.py similarity index 87% rename from blacklist.py rename to commands/blacklist.py index 34745c8..3ff769f 100755 --- a/blacklist.py +++ b/commands/blacklist.py @@ -43,9 +43,8 @@ def main(): loginbases_q = BlacklistRecord.getLoginbaseBlacklist() hostnames = [ h.hostname for h in hostnames_q ] loginbases = [ h.loginbase for h in loginbases_q ] - hostnames_exp = [ (h.hostname,h.date_created+timedelta(0,h.expires),h.date_created+timedelta(0,h.expires) < datetime.now() and h.expires != 0) for h in hostnames_q ] - #loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires)) for h in loginbases_q ] - loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires),h.date_created+timedelta(0,h.expires) < datetime.now() and h.expires != 0) for h in loginbases_q ] + hostnames_exp = [ (h.hostname,h.date_created+timedelta(0,h.expires)) for h in hostnames_q ] + loginbases_exp = [ (h.loginbase,h.date_created+timedelta(0,h.expires)) for h in loginbases_q ] if config.add: print "Blacklisting nodes: ", l_nodes @@ -79,9 +78,7 @@ def main(): objlist = hostnames_exp for i in objlist: - if i[2]: - print i[0], i[1], "<-- expired" - elif i[1] > datetime.now(): + if i[1] > datetime.now(): print i[0], i[1] else: print i[0] diff --git a/bootman.py b/commands/bootman.py similarity index 100% rename from bootman.py rename to commands/bootman.py diff --git a/checksync.py b/commands/checksync.py similarity index 100% rename from checksync.py rename to commands/checksync.py diff --git a/comonquery.py b/commands/comonquery.py similarity index 100% rename from comonquery.py rename to commands/comonquery.py diff --git a/findall.py b/commands/findall.py similarity index 100% rename from findall.py rename to commands/findall.py diff --git a/findbad.py b/commands/findbad.py similarity index 100% rename from findbad.py rename to commands/findbad.py diff --git a/findbadpcu.py b/commands/findbadpcu.py similarity index 100% rename from findbadpcu.py rename to commands/findbadpcu.py diff --git a/nodebad.py b/commands/nodebad.py similarity index 100% rename from nodebad.py rename to commands/nodebad.py diff --git a/nodeconfig.py b/commands/nodeconfig.py similarity index 100% rename from nodeconfig.py rename to commands/nodeconfig.py diff --git a/nodegroups.py b/commands/nodegroups.py similarity index 100% rename from nodegroups.py rename to commands/nodegroups.py diff --git a/nodeinfo.py b/commands/nodeinfo.py similarity index 100% rename from nodeinfo.py rename to commands/nodeinfo.py diff --git a/nodequery.py b/commands/nodequery.py similarity index 100% rename from nodequery.py rename to commands/nodequery.py diff --git a/pcubad.py b/commands/pcubad.py similarity index 100% rename from pcubad.py rename to commands/pcubad.py diff --git a/pcuinfo.py b/commands/pcuinfo.py similarity index 100% rename from pcuinfo.py rename to commands/pcuinfo.py diff --git a/plcquery.py b/commands/plcquery.py similarity index 100% rename from plcquery.py rename to commands/plcquery.py diff --git a/policy.py b/commands/policy.py similarity index 100% rename from policy.py rename to commands/policy.py diff --git a/commands/reboot.py b/commands/reboot.py new file mode 100755 index 0000000..92e0f69 --- /dev/null +++ b/commands/reboot.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +from monitor.reboot import * + +def main(): + logger.setLevel(logging.DEBUG) + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter('LOGGER - %(message)s') + ch.setFormatter(formatter) + logger.addHandler(ch) + + try: + if "test" in sys.argv: + dryrun = True + else: + dryrun = False + + for node in sys.argv[1:]: + if node == "test": continue + + print "Rebooting %s" % node + if reboot_policy(node, True, dryrun): + print "success" + else: + print "failed" + except Exception, err: + import traceback; traceback.print_exc() + from monitor.common import email_exception + email_exception(node) + print err + +if __name__ == '__main__': + main() + f = open("/tmp/rebootlog", 'a') + f.write("reboot %s\n" % sys.argv) + f.close() diff --git a/shconfig.py b/commands/shconfig.py similarity index 100% rename from shconfig.py rename to commands/shconfig.py diff --git a/sitebad.py b/commands/sitebad.py similarity index 100% rename from sitebad.py rename to commands/sitebad.py diff --git a/siteinfo.py b/commands/siteinfo.py similarity index 100% rename from siteinfo.py rename to commands/siteinfo.py diff --git a/siteleave.py b/commands/siteleave.py similarity index 100% rename from siteleave.py rename to commands/siteleave.py diff --git a/syncwithplc.py b/commands/syncwithplc.py similarity index 100% rename from syncwithplc.py rename to commands/syncwithplc.py diff --git a/model.txt b/docs/model.txt similarity index 100% rename from model.txt rename to docs/model.txt diff --git a/getsshkeys.py b/getsshkeys.py deleted file mode 100755 index 7932ab3..0000000 --- a/getsshkeys.py +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/python - -import os -import sys -import string -import time -import xml, xmlrpclib -try: - from monitor import config - auth = {'Username' : config.API_AUTH_USER, - 'AuthMethod' : "password", - 'AuthString' : config.API_AUTH_PASSWORD} -except: - import traceback - print traceback.print_exc() - auth = {'AuthMethod' : "anonymous"} - -args = {} -args['known_hosts'] = os.environ['HOME'] + os.sep + ".ssh" + os.sep + "known_hosts" -try: - from monitor import config - args['XMLRPC_SERVER'] = config.API_SERVER -except: - args['XMLRPC_SERVER'] = 'https://boot.planet-lab.org/PLCAPI/' - print "Using default API server %s" % args['XMLRPC_SERVER'] - -class SSHKnownHosts: - def __init__(self, args = args): - self.args = args - self.read_knownhosts() - self.auth = auth - self.api = xmlrpclib.Server(args['XMLRPC_SERVER'], verbose=False, allow_none=True) - self.nodenetworks = {} - - def _split_kh_entry(self, line): - s = line.split(' ') - try: - (host,ip) = s[0].split(',') - except: - ip = s[0] - host = "" - - key = ' '.join(s[1:3]) - comment = ' '.join(s[3:]) - return (host, ip, key, comment) - - def _get_index(self, host, ip): - index = "" - if host is not "": - index = "%s,%s" % (host,ip) - else: - index = ip - return index - - def read_knownhosts(self): - kh_read = open(self.args["known_hosts"], 'r') - self.pl_keys = {} - self.other_keys = {} - for line in kh_read: - (host, ip, key, comment) = self._split_kh_entry(line[:-1]) - rec = { self._get_index(host, ip) : "%s %s" % (key, comment) } - if 'PlanetLab' in comment: - self.pl_keys.update(rec) - else: - self.other_keys.update(rec) - - #for i in self.pl_keys: - # print i - # print self.pl_keys[i] - - return - - def write(self): - self.write_knownhosts() - - def write_knownhosts(self): - f = open(self.args['known_hosts'], 'w') - for index in self.pl_keys: - print >>f, "%s %s" % (index, self.pl_keys[index]) - for index in self.other_keys: - print >>f, "%s %s" % (index, self.other_keys[index]) - f.close() - - def updateAll(self): - l_nodes = self.getNodes() - d_nodes = {} - nokey_list = [] - for node in l_nodes: - name = node['hostname'] - d_nodes[name] = node - - for host in d_nodes: - node = d_nodes[host] - (host, ip, key, comment) = self._record_from_node(node, nokey_list) - rec = { "%s,%s" % (host,ip) : "%s %s" % (key, comment) } - self.pl_keys.update(rec) - - return nokey_list - - def delete(self, host): - node = self.getNodes(host) - if len(node) > 0: - (host, ip, _, _) = self._record_from_node(node[0]) - index = "%s,%s" % (host,ip) - if index in self.pl_keys: - del self.pl_keys[index] - if index in self.other_keys: - del self.other_keys[index] - return node - - def updateDirect(self, host): - cmd = os.popen("/usr/bin/ssh-keyscan -t rsa %s 2>/dev/null" % host) - line = cmd.read() - (h, ip, key, comment) = self._split_kh_entry(line[:-1]) - node = self.getNodes(host) - (host2, ip2, x, x) = self._record_from_node(node[0]) - rec = { self._get_index(host2, ip2) : "%s %s" % (key, "DIRECT") } - - self.delete(host) - self.other_keys.update(rec) - - def update(self, host): - node = self.delete(host) - #node = self.getNodes(host) - if node is not []: - ret = self._record_from_node(node[0]) - (host, ip, key, comment) = ret - if ip == None: - self.updateDirect(host) - else: - rec = { "%s,%s" % (host,ip) : "%s %s" % (key, comment) } - self.pl_keys.update(rec) - - def getNodes(self, host=None): - if type(host) == type(""): host = [host] - - # get the node(s) info - nodes = self.api.GetNodes(self.auth,host,["hostname","ssh_rsa_key","interface_ids"]) - - # for each node's node network, update the self.nodenetworks cache - nodenetworks = [] - for node in nodes: - for net in node["interface_ids"]: - nodenetworks.append(net) - - plcnodenetworks = self.api.GetInterfaces(self.auth,nodenetworks,["interface_id","ip"]) - for n in plcnodenetworks: - self.nodenetworks[n["interface_id"]]=n - return nodes - - def _record_from_node(self, node, nokey_list=None): - host = node['hostname'] - key = node['ssh_rsa_key'] - - nodenetworks = node['interface_ids'] - if len(nodenetworks)==0: return (host, None, None, None) - - # the [0] subscript to node['interface_ids'] means - # that this function wont work with multihomed nodes - l_nw = self.nodenetworks.get(nodenetworks[0],None) - if l_nw is None: return (host, None, None, None) - ip = l_nw['ip'] - - if key == None: - if nokey_list is not None: nokey_list += [node] - return (host, ip, None, None) - - key = key.strip() - # TODO: check for '==' at end of key. - if len(key) > 0 and key[-1] != '=': - print "Host with corrupt key! for %s %s" % (node['boot_state'], node['hostname']) - - s_date = time.strftime("%Y/%m/%d_%H:%M:%S",time.gmtime(time.time())) - #rec = { "%s,%s" % (host,ip) : "%s %s" % (key, "PlanetLab_%s" % (s_date)) } - #return rec - return (host, ip, key, "PlanetLab_%s" % s_date) - - -def main(hosts): - k = SSHKnownHosts() - if len (hosts) > 0: - for host in hosts: - k.updateDirect(host) - else: - k.updateAll() - k.write() - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/monitor-default.conf b/monitor-default.conf index 7414d41..9a5a52f 100644 --- a/monitor-default.conf +++ b/monitor-default.conf @@ -35,6 +35,8 @@ support_email=support@planet-lab.org # mailing list copied on all out-going messages cc_email=monitor-list@lists.planet-lab.org +zabbix_enabled=0 + [monitordatabase] monitor_dburi=postgres://user:passwd@localhost:5432/infovacuum zabbix_dburi=postgres://zabbixuser:<...>@localhost:5432/zabbix diff --git a/monitor-server.init b/monitor-server.init index aed298c..32d1209 100644 --- a/monitor-server.init +++ b/monitor-server.init @@ -75,10 +75,11 @@ function check_gadget_config () done } -function check_monitor_schema_and_data() +function check_monitor_schema_and_data_init() { - # NOTE: call create_all() to setup the database from the info model. - python -c "from monitor.database.info.model import *; from elixir import create_all; create_all()" + # from monitor.functions + check_monitor_schema_and_data + $MONITORPATH/config.d/init-bootman-sequence.py } @@ -247,7 +248,7 @@ case "$1" in dialog "$MESSAGE" fi - check_monitor_schema_and_data + check_monitor_schema_and_data_init # create /etc/httpd/conf.d/monitorweb.conf create_httpd_conf diff --git a/monitor/bootman.py b/monitor/bootman.py index b7ec58c..eac2761 100755 --- a/monitor/bootman.py +++ b/monitor/bootman.py @@ -11,7 +11,7 @@ import traceback import subprocess from sets import Set -from monitor.getsshkeys import SSHKnownHosts +from monitor.util.sshknownhosts import SSHKnownHosts from monitor.Rpyc import SocketConnection, Async from monitor.Rpyc.Utils import * @@ -825,46 +825,5 @@ def restore_basic(sitehist, hostname, config=None, forced_action=None): return bootman_action -# MAIN ------------------------------------------------------------------- - -def main(): - from monitor import parser as parsermodule - parser = parsermodule.getParser() - - parser.set_defaults(child=False, collect=False, nosetup=False, verbose=False, - force=None, quiet=False) - parser.add_option("", "--child", dest="child", action="store_true", - help="This is the child mode of this process.") - parser.add_option("", "--force", dest="force", metavar="boot_state", - help="Force a boot state passed to BootManager.py.") - parser.add_option("", "--quiet", dest="quiet", action="store_true", - help="Extra quiet output messages.") - parser.add_option("", "--verbose", dest="verbose", action="store_true", - help="Extra debug output messages.") - parser.add_option("", "--nonet", dest="nonet", action="store_true", - help="Do not setup the network, use existing log files to re-run a test pass.") - parser.add_option("", "--collect", dest="collect", action="store_true", - help="No action, just collect dmesg, and bm.log") - parser.add_option("", "--nosetup", dest="nosetup", action="store_true", - help="Do not perform the orginary setup phase.") - - parser = parsermodule.getParser(['nodesets', 'defaults'], parser) - config = parsermodule.parse_args(parser) - - if config.nodelist: - nodes = config.getListFromFile(config.nodelist) - elif config.node: - nodes = [ config.node ] - else: - parser.print_help() - sys.exit(1) - - for node in nodes: - # get sitehist - lb = plccache.plcdb_hn2lb[node] - sitehist = SiteInterface.get_or_make(loginbase=lb) - #reboot(node, config) - restore(sitehist, node, config=None, forced_action=None) - if __name__ == "__main__": - main() + print "ERROR: Can not execute module as a command! Please use commands/%s.py" % os.path.splitext(__file__)[0] diff --git a/monitor/database/info/__init__.py b/monitor/database/info/__init__.py index 03a1b74..bab03f5 100644 --- a/monitor/database/info/__init__.py +++ b/monitor/database/info/__init__.py @@ -45,4 +45,5 @@ from monitor.database.info.action import * from monitor.database.info.findbad import * from monitor.database.info.history import * from monitor.database.info.plc import * +from monitor.database.info.version import * setup_all() diff --git a/monitor/database/info/version.py b/monitor/database/info/version.py new file mode 100644 index 0000000..4f9041b --- /dev/null +++ b/monitor/database/info/version.py @@ -0,0 +1,17 @@ +from elixir import Entity, Field +from elixir import Integer +from elixir import options_defaults, using_options, setup_all + +from monitor.database.dborm import mon_metadata, mon_session +__metadata__ = mon_metadata +__session__ = mon_session + +from monitor.monitor_version import monitor_version + +major_version = int(monitor_version.split('.')[0]) +minor_version = int(monitor_version.split('.')[-1]) + +class DBVersion(Entity): + major = Field(Integer, required=True, default=major_version) + minor = Field(Integer, required=True, default=minor_version) + diff --git a/monitor/monitor_version.py b/monitor/monitor_version.py new file mode 100644 index 0000000..e55af94 --- /dev/null +++ b/monitor/monitor_version.py @@ -0,0 +1,4 @@ + +monitor_version="3.0" + +pcucontrol_version=monitor_version diff --git a/monitor/reboot.py b/monitor/reboot.py index 15d5c52..cd49a59 100755 --- a/monitor/reboot.py +++ b/monitor/reboot.py @@ -108,37 +108,6 @@ def reboot_policy(nodename, continue_probe, dryrun): print "return true" return True -def main(): - logger.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter('LOGGER - %(message)s') - ch.setFormatter(formatter) - logger.addHandler(ch) +if __name__ == "__main__": + print "ERROR: Can not execute module as a command! Please use commands/%s.py" % os.path.splitext(__file__)[0] - try: - if "test" in sys.argv: - dryrun = True - else: - dryrun = False - - for node in sys.argv[1:]: - if node == "test": continue - - print "Rebooting %s" % node - if reboot_policy(node, True, dryrun): - print "success" - else: - print "failed" - except Exception, err: - import traceback; traceback.print_exc() - from monitor.common import email_exception - email_exception(node) - print err - -if __name__ == '__main__': - logger = logging.getLogger("monitor") - main() - f = open("/tmp/rebootlog", 'a') - f.write("reboot %s\n" % sys.argv) - f.close() diff --git a/monitor/util/__init__.py b/monitor/util/__init__.py index 98802a3..c5a3dfd 100644 --- a/monitor/util/__init__.py +++ b/monitor/util/__init__.py @@ -1,4 +1,3 @@ -__all__=["writepid","removepid","daemonize"] import time import math diff --git a/monitor/getsshkeys.py b/monitor/util/sshknownhosts.py similarity index 96% rename from monitor/getsshkeys.py rename to monitor/util/sshknownhosts.py index 686252b..9fbedaa 100755 --- a/monitor/getsshkeys.py +++ b/monitor/util/sshknownhosts.py @@ -175,15 +175,5 @@ class SSHKnownHosts: #return rec return (host, ip, key, "PlanetLab_%s" % s_date) - -def main(hosts): - k = SSHKnownHosts() - if len (hosts) > 0: - for host in hosts: - k.updateDirect(host) - else: - k.updateAll() - k.write() - -if __name__ == '__main__': - main(sys.argv[1:]) +if __name__ == "__main__": + print "ERROR: Can not execute module as a command! Please use tools/getsshkey.py" diff --git a/rt_db b/rt_db deleted file mode 100644 index eba79ec..0000000 --- a/rt_db +++ /dev/null @@ -1,4 +0,0 @@ -RT_DB_HOST=rt.planet-lab.org -RT_DB_USER=rt_ro -RT_DB_PASSWORD=rtropassword -RT_DB_NAME=rt3 diff --git a/setup.py b/setup.py index 6dd7d31..a9744ee 100644 --- a/setup.py +++ b/setup.py @@ -2,46 +2,44 @@ from distutils.core import setup -packages=[ 'monitor', - 'monitor.database', - 'monitor.Rpyc', - 'monitor.database.zabbixapi', - 'monitor.database.info', - 'monitor.sources', - 'monitor.util', - 'monitor.wrapper' ] +from monitor.monitor_version import * + +packages=['monitor', + 'monitor.database', + 'monitor.Rpyc', + 'monitor.database.zabbixapi', + 'monitor.database.info', + 'monitor.sources', + 'monitor.util', + 'monitor.wrapper'] print packages setup(name='MonitorModule', - version='2.0', + version=monitor_version, description='Monitor Utility Module', author='Stephen Soltesz', author_email='soltesz@cs.princeton.edu', url='http://www.planet-lab.org', - packages=packages, -) + packages=packages) packages=['pcucontrol', - 'pcucontrol.util', - 'pcucontrol.transports', - 'pcucontrol.transports.ssh', - 'pcucontrol.transports.pyssh', - 'pcucontrol.models', - 'pcucontrol.models.hpilo', - 'pcucontrol.models.hpilo.iloxml', - 'pcucontrol.models.intelamt', - 'pcucontrol.models.intelamt', - - ] + 'pcucontrol.util', + 'pcucontrol.transports', + 'pcucontrol.transports.ssh', + 'pcucontrol.transports.pyssh', + 'pcucontrol.models', + 'pcucontrol.models.hpilo', + 'pcucontrol.models.hpilo.iloxml', + 'pcucontrol.models.intelamt', + 'pcucontrol.models.intelamt'] # TODO: add data dir for intelamt and hpilo stuff print packages setup(name='PCUControlModule', - version='2.0', + version=pcucontrol_version, description='PCU Control Module', author='Stephen Soltesz', author_email='soltesz@cs.princeton.edu', url='http://www.planet-lab.org', - packages=packages, -) + packages=packages) diff --git a/automate/automate.py b/tools/automate/automate.py similarity index 100% rename from automate/automate.py rename to tools/automate/automate.py diff --git a/automate/fetch.py b/tools/automate/fetch.py similarity index 100% rename from automate/fetch.py rename to tools/automate/fetch.py diff --git a/automate/query.py b/tools/automate/query.py similarity index 100% rename from automate/query.py rename to tools/automate/query.py diff --git a/automate/vxargs.py b/tools/automate/vxargs.py similarity index 100% rename from automate/vxargs.py rename to tools/automate/vxargs.py diff --git a/extra/nodeaction.py b/tools/extra/nodeaction.py similarity index 100% rename from extra/nodeaction.py rename to tools/extra/nodeaction.py diff --git a/extra/nodediff.py b/tools/extra/nodediff.py similarity index 100% rename from extra/nodediff.py rename to tools/extra/nodediff.py diff --git a/extra/nodehistory.py b/tools/extra/nodehistory.py similarity index 100% rename from extra/nodehistory.py rename to tools/extra/nodehistory.py diff --git a/extra/nodesets.py b/tools/extra/nodesets.py similarity index 100% rename from extra/nodesets.py rename to tools/extra/nodesets.py diff --git a/extra/pcutest.py b/tools/extra/pcutest.py similarity index 100% rename from extra/pcutest.py rename to tools/extra/pcutest.py diff --git a/extra/pkl2php.py b/tools/extra/pkl2php.py similarity index 100% rename from extra/pkl2php.py rename to tools/extra/pkl2php.py diff --git a/extra/prep_power_users.py b/tools/extra/prep_power_users.py similarity index 100% rename from extra/prep_power_users.py rename to tools/extra/prep_power_users.py diff --git a/extra/printpdb.py b/tools/extra/printpdb.py similarity index 100% rename from extra/printpdb.py rename to tools/extra/printpdb.py diff --git a/extra/showlatlon.py b/tools/extra/showlatlon.py similarity index 100% rename from extra/showlatlon.py rename to tools/extra/showlatlon.py diff --git a/extra/template.py b/tools/extra/template.py similarity index 100% rename from extra/template.py rename to tools/extra/template.py diff --git a/tools/getsshkeys.py b/tools/getsshkeys.py new file mode 100755 index 0000000..0104deb --- /dev/null +++ b/tools/getsshkeys.py @@ -0,0 +1,19 @@ +#!/usr/bin/python + +import sys +sys.path.append('.') +sys.path.append('..') + +from monitor.util.file import * + +def main(hosts): + k = SSHKnownHosts() + if len (hosts) > 0: + for host in hosts: + k.updateDirect(host) + else: + k.updateAll() + k.write() + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/kill.cmd.sh b/tools/kill.cmd.sh similarity index 100% rename from kill.cmd.sh rename to tools/kill.cmd.sh diff --git a/nagios/plc2nagios.py b/tools/plc2nagios.py similarity index 100% rename from nagios/plc2nagios.py rename to tools/plc2nagios.py diff --git a/setup-agent.sh b/tools/setup-agent.sh similarity index 100% rename from setup-agent.sh rename to tools/setup-agent.sh diff --git a/testapi.py b/tools/testapi.py similarity index 100% rename from testapi.py rename to tools/testapi.py diff --git a/timeout.pl b/tools/timeout.pl similarity index 100% rename from timeout.pl rename to tools/timeout.pl diff --git a/web/monitorweb-httpd.conf b/web/monitorweb-httpd.conf index 1ee0e3b..bf28927 100644 --- a/web/monitorweb-httpd.conf +++ b/web/monitorweb-httpd.conf @@ -1,36 +1,20 @@ -NameVirtualHost * - - - ServerName pl-virtual-06.cs.princeton.edu - ServerAdmin soltesz@cs.princeton.edu - UseCanonicalName Off - ServerSignature Off - - #DocumentRoot /usr/share/monitor/web/MonitorWeb/monitorweb - - # - # Options Indexes FollowSymLinks - # AllowOverride None - # Order allow,deny - # Allow from all - # - - Errorlog /var/log/httpd/monitorwebapp-error_log - Customlog /var/log/httpd/monitorwebapp-access_log common - - AddDefaultCharset utf-8 - - #NOTE: This doesn't work as expected. - # Load everything out of the DocumentRoot that is static - # ProxyPass /monitor/static ! - - ProxyPass /tg_js http://127.0.0.1:8080/tg_js - ProxyPassReverse /tg_js http://127.0.0.1:8080/tg_js - - ProxyPass /monitor http://127.0.0.1:8080 - ProxyPassReverse /monitor http://127.0.0.1:8080 - - ProxyPreserveHost On - ProxyRequests Off - - +# NOTE: I've tried other means of redirection, including mod_rewrite, but did +# not have any success. The means below is not idea, b/c it does not keep +# non-ssl session as non-ssl. But it works. + +# NOTE: redirect path without trailing '/' to path with. +Redirect /monitor /monitor/ + +# NOTE: this directive strips '/monitor/' from the requested path and pastes +# the remaining part to the end of the ProxyPass url below. All TG urls +# should be relative to their current position, or the absolute path +# that includes /monitor/ at the beginning. +# TODO: make location configurable. + + #LogLevel debug + #Errorlog /var/log/httpd/monitorwebapp-error_log + #Customlog /var/log/httpd/monitorwebapp-access_log common + + ProxyPass http://127.0.0.1:8082/ + ProxyPassReverse http://127.0.0.1:8082/ +