From 9bb5213371c2a3075adaed95d0e1e6ef5a58e7d6 Mon Sep 17 00:00:00 2001 From: Stephen Soltesz Date: Wed, 16 Nov 2011 17:41:53 +0000 Subject: [PATCH] Add bw, dns, and uptime checks. Add support library for DNS checks. Add update.sh script for auto-updates. Fix sar2graphite.py : checks for sysstat rpm Improve other scripts. --- web/collect/client/DNS/.cvsignore | 0 web/collect/client/DNS/Base.py | 444 +++++++++++++++++ web/collect/client/DNS/Base.pyc | Bin 0 -> 10073 bytes web/collect/client/DNS/Class.py | 57 +++ web/collect/client/DNS/Class.pyc | Bin 0 -> 795 bytes web/collect/client/DNS/Lib.py | 725 ++++++++++++++++++++++++++++ web/collect/client/DNS/Lib.pyc | Bin 0 -> 22633 bytes web/collect/client/DNS/Opcode.py | 52 ++ web/collect/client/DNS/Opcode.pyc | Bin 0 -> 841 bytes web/collect/client/DNS/Status.py | 66 +++ web/collect/client/DNS/Status.pyc | Bin 0 -> 1143 bytes web/collect/client/DNS/Type.py | 82 ++++ web/collect/client/DNS/Type.pyc | Bin 0 -> 1218 bytes web/collect/client/DNS/__init__.py | 78 +++ web/collect/client/DNS/__init__.pyc | Bin 0 -> 469 bytes web/collect/client/DNS/lazy.py | 82 ++++ web/collect/client/DNS/lazy.pyc | Bin 0 -> 1926 bytes web/collect/client/DNS/win32dns.py | 144 ++++++ web/collect/client/check_bw.py | 193 ++++++++ web/collect/client/check_dns.py | 192 ++++++++ web/collect/client/check_uptime.py | 177 +++++++ web/collect/client/update.sh | 41 ++ 22 files changed, 2333 insertions(+) create mode 100644 web/collect/client/DNS/.cvsignore create mode 100644 web/collect/client/DNS/Base.py create mode 100644 web/collect/client/DNS/Base.pyc create mode 100644 web/collect/client/DNS/Class.py create mode 100644 web/collect/client/DNS/Class.pyc create mode 100644 web/collect/client/DNS/Lib.py create mode 100644 web/collect/client/DNS/Lib.pyc create mode 100644 web/collect/client/DNS/Opcode.py create mode 100644 web/collect/client/DNS/Opcode.pyc create mode 100644 web/collect/client/DNS/Status.py create mode 100644 web/collect/client/DNS/Status.pyc create mode 100644 web/collect/client/DNS/Type.py create mode 100644 web/collect/client/DNS/Type.pyc create mode 100644 web/collect/client/DNS/__init__.py create mode 100644 web/collect/client/DNS/__init__.pyc create mode 100644 web/collect/client/DNS/lazy.py create mode 100644 web/collect/client/DNS/lazy.pyc create mode 100644 web/collect/client/DNS/win32dns.py create mode 100755 web/collect/client/check_bw.py create mode 100755 web/collect/client/check_dns.py create mode 100755 web/collect/client/check_uptime.py create mode 100644 web/collect/client/update.sh diff --git a/web/collect/client/DNS/.cvsignore b/web/collect/client/DNS/.cvsignore new file mode 100644 index 0000000..e69de29 diff --git a/web/collect/client/DNS/Base.py b/web/collect/client/DNS/Base.py new file mode 100644 index 0000000..22e6bf4 --- /dev/null +++ b/web/collect/client/DNS/Base.py @@ -0,0 +1,444 @@ +""" +$Id: Base.py,v 1.12.2.15 2011/03/19 22:15:01 customdesigned Exp $ + +This file is part of the pydns project. +Homepage: http://pydns.sourceforge.net + +This code is covered by the standard Python License. See LICENSE for details. + + Base functionality. Request and Response classes, that sort of thing. +""" + +import socket, string, types, time, select +import Type,Class,Opcode +import asyncore +# +# This random generator is used for transaction ids and port selection. This +# is important to prevent spurious results from lost packets, and malicious +# cache poisoning. This doesn't matter if you are behind a caching nameserver +# or your app is a primary DNS server only. To install your own generator, +# replace DNS.Base.random. SystemRandom uses /dev/urandom or similar source. +# +try: + from random import SystemRandom + random = SystemRandom() +except: + import random + +class DNSError(Exception): pass + +# Lib uses DNSError, so import after defining. +import Lib + +defaults= { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY, + 'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30, + 'server_rotate': 0 } + +defaults['server']=[] + +def ParseResolvConf(resolv_path="/etc/resolv.conf"): + "parses the /etc/resolv.conf file and sets defaults for name servers" + global defaults + lines=open(resolv_path).readlines() + for line in lines: + line = string.strip(line) + if not line or line[0]==';' or line[0]=='#': + continue + fields=string.split(line) + if len(fields) < 2: + continue + if fields[0]=='domain' and len(fields) > 1: + defaults['domain']=fields[1] + if fields[0]=='search': + pass + if fields[0]=='options': + pass + if fields[0]=='sortlist': + pass + if fields[0]=='nameserver': + defaults['server'].append(fields[1]) + +def DiscoverNameServers(): + import sys + if sys.platform in ('win32', 'nt'): + import win32dns + defaults['server']=win32dns.RegistryResolve() + else: + return ParseResolvConf() + +class DnsRequest: + """ high level Request object """ + def __init__(self,*name,**args): + self.donefunc=None + self.async=None + self.defaults = {} + self.argparse(name,args) + self.defaults = self.args + self.tid = 0 + + def argparse(self,name,args): + if not name and self.defaults.has_key('name'): + args['name'] = self.defaults['name'] + if type(name) is types.StringType: + args['name']=name + else: + if len(name) == 1: + if name[0]: + args['name']=name[0] + if defaults['server_rotate'] and \ + type(defaults['server']) == types.ListType: + defaults['server'] = defaults['server'][1:]+defaults['server'][:1] + for i in defaults.keys(): + if not args.has_key(i): + if self.defaults.has_key(i): + args[i]=self.defaults[i] + else: + args[i]=defaults[i] + if type(args['server']) == types.StringType: + args['server'] = [args['server']] + self.args=args + + def socketInit(self,a,b): + self.s = socket.socket(a,b) + + def processUDPReply(self): + if self.timeout > 0: + r,w,e = select.select([self.s],[],[],self.timeout) + if not len(r): + raise DNSError, 'Timeout' + (self.reply, self.from_address) = self.s.recvfrom(65535) + self.time_finish=time.time() + self.args['server']=self.ns + return self.processReply() + + def _readall(self,f,count): + res = f.read(count) + while len(res) < count: + if self.timeout > 0: + # should we restart timeout everytime we get a dribble of data? + rem = self.time_start + self.timeout - time.time() + if rem <= 0: raise DNSError,'Timeout' + self.s.settimeout(rem) + buf = f.read(count - len(res)) + if not buf: + raise DNSError,'incomplete reply - %d of %d read' % (len(res),count) + res += buf + return res + + def processTCPReply(self): + if self.timeout > 0: + self.s.settimeout(self.timeout) + else: + self.s.settimeout(None) + f = self.s.makefile('r') + header = self._readall(f,2) + count = Lib.unpack16bit(header) + self.reply = self._readall(f,count) + self.time_finish=time.time() + self.args['server']=self.ns + return self.processReply() + + def processReply(self): + self.args['elapsed']=(self.time_finish-self.time_start)*1000 + u = Lib.Munpacker(self.reply) + r=Lib.DnsResult(u,self.args) + r.args=self.args + #self.args=None # mark this DnsRequest object as used. + return r + #### TODO TODO TODO #### +# if protocol == 'tcp' and qtype == Type.AXFR: +# while 1: +# header = f.read(2) +# if len(header) < 2: +# print '========== EOF ==========' +# break +# count = Lib.unpack16bit(header) +# if not count: +# print '========== ZERO COUNT ==========' +# break +# print '========== NEXT ==========' +# reply = f.read(count) +# if len(reply) != count: +# print '*** Incomplete reply ***' +# break +# u = Lib.Munpacker(reply) +# Lib.dumpM(u) + + def getSource(self): + "Pick random source port to avoid DNS cache poisoning attack." + while True: + try: + source_port = random.randint(1024,65535) + self.s.bind(('', source_port)) + break + except socket.error, msg: + # Error 98, 'Address already in use' + if msg[0] != 98: raise + + def conn(self): + self.getSource() + self.s.connect((self.ns,self.port)) + + def req(self,*name,**args): + " needs a refactoring " + self.argparse(name,args) + #if not self.args: + # raise DNSError,'reinitialize request before reuse' + protocol = self.args['protocol'] + self.port = self.args['port'] + self.tid = random.randint(0,65535) + self.timeout = self.args['timeout']; + opcode = self.args['opcode'] + rd = self.args['rd'] + server=self.args['server'] + if type(self.args['qtype']) == types.StringType: + try: + qtype = getattr(Type, string.upper(self.args['qtype'])) + except AttributeError: + raise DNSError,'unknown query type' + else: + qtype=self.args['qtype'] + if not self.args.has_key('name'): + print self.args + raise DNSError,'nothing to lookup' + qname = self.args['name'] + if qtype == Type.AXFR: + print 'Query type AXFR, protocol forced to TCP' + protocol = 'tcp' + #print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype)) + m = Lib.Mpacker() + # jesus. keywords and default args would be good. TODO. + m.addHeader(self.tid, + 0, opcode, 0, 0, rd, 0, 0, 0, + 1, 0, 0, 0) + m.addQuestion(qname, qtype, Class.IN) + self.request = m.getbuf() + try: + if protocol == 'udp': + self.sendUDPRequest(server) + else: + self.sendTCPRequest(server) + except socket.error, reason: + raise DNSError, reason + if self.async: + return None + else: + if not self.response: + raise DNSError,'no working nameservers found' + return self.response + + def sendUDPRequest(self, server): + "refactor me" + self.response=None + for self.ns in server: + #print "trying udp",self.ns + try: + if self.ns.count(':'): + if hasattr(socket,'has_ipv6') and socket.has_ipv6: + self.socketInit(socket.AF_INET6, socket.SOCK_DGRAM) + else: continue + else: + self.socketInit(socket.AF_INET, socket.SOCK_DGRAM) + try: + # TODO. Handle timeouts &c correctly (RFC) + self.time_start=time.time() + self.conn() + if not self.async: + self.s.send(self.request) + r=self.processUDPReply() + # Since we bind to the source port and connect to the + # destination port, we don't need to check that here, + # but do make sure it's actually a DNS request that the + # packet is in reply to. + while r.header['id'] != self.tid \ + or self.from_address[1] != self.port: + r=self.processUDPReply() + self.response = r + # FIXME: check waiting async queries + finally: + if not self.async: + self.s.close() + except socket.error: + continue + break + + def sendTCPRequest(self, server): + " do the work of sending a TCP request " + self.response=None + for self.ns in server: + #print "trying tcp",self.ns + try: + if self.ns.count(':'): + if hasattr(socket,'has_ipv6') and socket.has_ipv6: + self.socketInit(socket.AF_INET6, socket.SOCK_STREAM) + else: continue + else: + self.socketInit(socket.AF_INET, socket.SOCK_STREAM) + try: + # TODO. Handle timeouts &c correctly (RFC) + self.time_start=time.time() + self.conn() + buf = Lib.pack16bit(len(self.request))+self.request + # Keep server from making sendall hang + self.s.setblocking(0) + # FIXME: throws WOULDBLOCK if request too large to fit in + # system buffer + self.s.sendall(buf) + # SHUT_WR breaks blocking IO with google DNS (8.8.8.8) + #self.s.shutdown(socket.SHUT_WR) + r=self.processTCPReply() + if r.header['id'] == self.tid: + self.response = r + break + finally: + self.s.close() + except socket.error: + continue + +#class DnsAsyncRequest(DnsRequest): +class DnsAsyncRequest(DnsRequest,asyncore.dispatcher_with_send): + " an asynchronous request object. out of date, probably broken " + def __init__(self,*name,**args): + DnsRequest.__init__(self, *name, **args) + # XXX todo + if args.has_key('done') and args['done']: + self.donefunc=args['done'] + else: + self.donefunc=self.showResult + #self.realinit(name,args) # XXX todo + self.async=1 + def conn(self): + self.getSource() + self.connect((self.ns,self.port)) + self.time_start=time.time() + if self.args.has_key('start') and self.args['start']: + asyncore.dispatcher.go(self) + def socketInit(self,a,b): + self.create_socket(a,b) + asyncore.dispatcher.__init__(self) + self.s=self + def handle_read(self): + if self.args['protocol'] == 'udp': + self.response=self.processUDPReply() + if self.donefunc: + apply(self.donefunc,(self,)) + def handle_connect(self): + self.send(self.request) + def handle_write(self): + pass + def showResult(self,*s): + self.response.show() + +# +# $Log: Base.py,v $ +# Revision 1.12.2.15 2011/03/19 22:15:01 customdesigned +# Added rotation of name servers - SF Patch ID: 2795929 +# +# Revision 1.12.2.14 2011/03/17 03:46:03 customdesigned +# Simple test for google DNS with tcp +# +# Revision 1.12.2.13 2011/03/17 03:08:03 customdesigned +# Use blocking IO with timeout for TCP replies. +# +# Revision 1.12.2.12 2011/03/16 17:50:00 customdesigned +# Fix non-blocking TCP replies. (untested) +# +# Revision 1.12.2.11 2010/01/02 16:31:23 customdesigned +# Handle large TCP replies (untested). +# +# Revision 1.12.2.10 2008/08/01 03:58:03 customdesigned +# Don't try to close socket when never opened. +# +# Revision 1.12.2.9 2008/08/01 03:48:31 customdesigned +# Fix more breakage from port randomization patch. Support Ipv6 queries. +# +# Revision 1.12.2.8 2008/07/31 18:22:59 customdesigned +# Wait until tcp response at least starts coming in. +# +# Revision 1.12.2.7 2008/07/28 01:27:00 customdesigned +# Check configured port. +# +# Revision 1.12.2.6 2008/07/28 00:17:10 customdesigned +# Randomize source ports. +# +# Revision 1.12.2.5 2008/07/24 20:10:55 customdesigned +# Randomize tid in requests, and check in response. +# +# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned +# Missing import Lib +# +# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned +# Use socket.inetntoa,inetaton. +# +# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned +# Trap socket error +# +# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned +# Skip bogus but non-empty lines in resolv.conf +# +# Revision 1.12 2002/04/23 06:04:27 anthonybaxter +# attempt to refactor the DNSRequest.req method a little. after doing a bit +# of this, I've decided to bite the bullet and just rewrite the puppy. will +# be checkin in some design notes, then unit tests and then writing the sod. +# +# Revision 1.11 2002/03/19 13:05:02 anthonybaxter +# converted to class based exceptions (there goes the python1.4 compatibility :) +# +# removed a quite gross use of 'eval()'. +# +# Revision 1.10 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.9 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.8 2002/03/19 10:30:33 anthonybaxter +# first round of major bits and pieces. The major stuff here (summarised +# from my local, off-net CVS server :/ this will cause some oddities with +# the +# +# tests/testPackers.py: +# a large slab of unit tests for the packer and unpacker code in DNS.Lib +# +# DNS/Lib.py: +# placeholder for addSRV. +# added 'klass' to addA, make it the same as the other A* records. +# made addTXT check for being passed a string, turn it into a length 1 list. +# explicitly check for adding a string of length > 255 (prohibited). +# a bunch of cleanups from a first pass with pychecker +# new code for pack/unpack. the bitwise stuff uses struct, for a smallish +# (disappointly small, actually) improvement, while addr2bin is much +# much faster now. +# +# DNS/Base.py: +# added DiscoverNameServers. This automatically does the right thing +# on unix/ win32. No idea how MacOS handles this. *sigh* +# Incompatible change: Don't use ParseResolvConf on non-unix, use this +# function, instead! +# a bunch of cleanups from a first pass with pychecker +# +# Revision 1.5 2001/08/09 09:22:28 anthonybaxter +# added what I hope is win32 resolver lookup support. I'll need to try +# and figure out how to get the CVS checkout onto my windows machine to +# make sure it works (wow, doing something other than games on the +# windows machine :) +# +# Code from Wolfgang.Strobl@gmd.de +# win32dns.py from +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260 +# +# Really, ParseResolvConf() should be renamed "FindNameServers" or +# some such. +# +# Revision 1.4 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.3 2001/07/19 07:20:12 anthony +# Handle blank resolv.conf lines. +# Patch from Bastian Kleineidam +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Base.pyc b/web/collect/client/DNS/Base.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98177d339b682414c73cd91ec55ce5d2109312cf GIT binary patch literal 10073 zcmcIqOLH98bv~ziW`G&I25*AYqa{%eq6a8a7E+-Y+Z0KWj75lQ&>@AgGHFa-08I?K z2inxJLkWCiZdmC50%^zUu`)>ljSZv+CbNfEd z`ObG9m;d=c6OI3R=NGryQvNqBB0mz5;wu%2MK(lYmDrPAl@0B;J=yU3ZC^J0e!C(Y zm43S_8`XZhCL6VWdqg%y`t4EK811*~vQd|VSNZD8ZbLd_vN0xV#^hLJqak+mTeI`E%B^eisvMrmv2-yW+Yy~k{h!UFG_qy z;!}ei%t^c?W=>39l6l$kBtAX(Z2|IJUw#E*UQv<8*_-jj;FG8@E8T zjtPvaP`Gn4o(Qr$x}sv-_rPR~k36J=2Z0&Bb8H+U~`3=lJ_dk@fPn*~;>5 zvyvKH&ezUj&ezT!nB2s{<^iWHY?Q`P9tXD%>`s;jpCxUR7G@;~){O~1yLsi>+WNI% zE6an}*eK}~D~(1F1UzA|)l1tp$;)e*F0H+Omp>9T$=HgTk7zG!{jp9%|iZM6;xl&mnrluY-QBABbBE7iF zDZ6=Qvv$@=KEfgu5$R^RwKWmRx)80cipah_=$eG5!5n$a0XEr7((U9md^v(gvS+fM zO|S_&ft{HAfyrBWW+Q9(iR&qvB67~y_PN{?S?9q@J4?6Pm>wIz-z)g~oQhbHLy=>Z zL}4{YDvuWQQ03}ZlX~Qd^(67-(8Gu$4~_Beo1%v_{H55F`<`^ChaEmTvJuh!CD^6n z`%3?NUu;zleasbC(A?KuyaWiQ;*3~GJeti=S*IrV|%0wjVnsBDW#VkZ8SB~1b;urz-n4WyQzQIN%f(^j_=*{v*h z1jV6oTJZFt*-i?ZA2x6$G<7&QA07*hio`w}Rfr2cv`92YU^Qrr!quWtmw1$o z2D~nM#^7|(7>BW;F(L6d8?GSeLB~8Woq>zUHsQhok{Ngwe?P|=uR`I} zbBk&}6lr;~<9d9&f{2t{rnyYg1(w&c)HqwnQbXqjZAZmH z+NQ4fTa<5;6!=AyZx>X)P2y#2lD&oLY_S1b4Od>SQRk78G_kD~;9S7biPrfCs%&Fk zY!CY+>N_ep<9N30PACMuf^oC?epjrr*a{plVH)cxxGh! zjBk?>nBZp#tfQKF?I>0ay~YtS`3>7mb)9{O@=|2@TGCwqP^y8mmri}QOYH&@~2 zATHOv?L^UcIUd0>>cB#32+s=zEkf9Y(32wuaIOOgRJ0QntzC0K0KnlBL}(N=$#v54 zorA6kPXjQ}|7S^I(Zg?c%|YQn1Eq%_;DM#tM-P&`m>|OU`<6ml)iXM;3p(k|>X6o?+KSn3Oz!;N=$W8mfR-WwPIEZH;k&}g)_k~ArHoYQg`;#b`~Yn!47G0hNE)J@LkLq5$*^aST!PVi@el!Xvfw5jjtgih;gI;BmACm;j(C7%8Rs#Dla8DskL=-;pb7bYHh3={v+**HB> zsb7fJvAQRVwgTol7CCiPkwYe*_f<0Jvs&ttfuVqT5ZqOuOBXEm7ay#73ZZUQ4y)3A z)lJ|Je?0V%#w~p9xe0v?`@JJy_Gd)z9{RFQDLO@mKZG0t&_deI_PQNoO+bwW?+0(k z$j#o41B7J5NDXgJ;e-MlH}zN8D7PVe9Fag^I1@3pObf_GLkrs?K-yU^bsmpB}70ASN6Xj_-0-Z##^Sz_|> zfpJbeZJgl^s0&2n2z(MPsKS}z8d~r4$$2l@HORKZJLtjDKT9@gbG@`1wRg|2ZYDNF z(&;pJ4Xsb{faee4(}zYEB8s8y@0chyd3gKf(ZLD3yK}|a-Or(@xzg?yy{69Sj85si zvpwV)PjGoSP?3}PsaRzWO%FD6$%O-Wua;_Xo*Tr^K~58{edr=BFq2bS>|%gg^g8w_&(WibUJPA|^_P%@i+a!KjLz%0 zH#k4|hY&49`C-iMVtj)^@U&K;CdQA{VXMZD?lyZgRWY?X0LgV7LjWyT8? zm&5wZZNC=Tzc0G)i{8WIZd{XfN}{-goF{4T203dMfvdd)C=LWR3!(>E5(ih;)`ND` zMgb>FiY!gi?I5x?YVWQj;83~`3xzZANH!t*Vbppj@3{=jtwG0wW|BJ3LO-#Yq;Yr= zihzqUsF|FQIH2#g)?(cP%H1orUo3v=Zrj*(){_1SpU+@}o-ZTn>x?dFRcF0vp+5SU zq&{F*KoiS%eHL(J(U0Ft1u#y>w`=sP;T8}DU&$l3vowYCq@_XI97<+<$-d!|@a4Z? z%XkrOI9`JcP!VOC@jb7<$w2^AZWUp5Nky!cF1ZZEnNjS6b4qGr-r=%Q4q`Nc_nIuh zfw`!CM3&%&+{>se9o2BW^1bdt=}|BoDlVUj9=rvQV8`IulZht>!c<4(?*4D3VkZTd zZK8^*mIY9RJHHjZmm=(yoHsK1!2jUKCX%M?Q8`VD+JjxYO;RarSX8kNHlY$BQF zzNSRu3AuhFoJ7IUylm_(jJ%jf9-A5y7eN%{W-Dsj3{{sPgt-x( z&UP6I($gSS2$?xBCWI@d4fWF9G<%o^`#qB%1jrj0{TIMynpqS^K^>he+wFA=R8YPc zj0`T{`!swH6<}5)QHpAt7{l*ex$W}Qc9-R*5ZEtJ&P=o5VV3V=+F@x4RjghbuPviG z!JNW5GuT=9CwN47oWp~s`94}Geuk@k1Kl<@vNos5G2L10(@(G4HMxb;y=-lsZ1${S z71sqlA7U7`hr>AM0a@LGuQfTDCyL`6bf09dD2~5CVL8cCvfCAulobO z<%rSMpt*Qq(wM<4%hH1``dN07Fhf*M!uRkPgamn6y zt>q>vvh*c{1uD+X{=Y(9pOIpl{)9GlUYk0>Kd-8?}j+q$b&EM=~S$F&X8vjv>1LbJ2%QNy|vl4QQMg@Uvf$S$GwMg~v%9 zjN!Qcq;yl)ngVITVy9{GXw8swJbCm(gel1^_yp{m%*lOUy5*wttXZIY0o|aQa-qU0a!Z4>yWbRY%I*E8U;(A{BU1N#oz>HNT1UJE^}Q)=XhUapyz0xMF6y*S`wd;t zX&rUw#Xb=M61`WRK<|eRz3)P`KyQsY{(ndBFJFvaJBhnUdjhk^F8G|5MO?cOzYeoV zT+8=4Ai4iOPkj6Ed&$B8#f$WSaCi)%S}M2Ug!`zS5$84a-rfDC!|BqKINfg|c6|b- zs2>G!#;XrF2qdyV1fx^GUt-1~j(>qy(EEw} zjLvu+KYR;%AA&bkq=0LX7ozudujx1aaa}|`*Tap*GIF|*$tSp%8*H`WtlesbumtCB zVaCqxkaoJABb9VfFs=0vuP`F+WoT$@J#&5eyCQTfaQyqXr5S@m{)@Y%ZqD8${a{-4|(gzK!06gYGcr2$LsJ9(C7ynoogzXA6khaq%!kO@0Yyv!0>|_r^CcGru6O1`1 z@eURFj1B>h_klT=^v-%`blNME-Y1f*-RC4(I7L6tDl@D>MGa?jSqLjGrFX{_FpFtO zda^lDmx;LIXTP*IyiK7uUar{A!cEX%l9AT^nXGM)?xuKBar07Pry<>8E z!tLz4q;|B4)uC`2x};P3#&fFMD3Rk_tVx1fJy%F}m*9iy6l79yY91z`ypV6_CbFjG z>ek`k;DmoW&^Jivgj!Jn2ys34614*Mm#}k$*Qlm8T}t|dN)i0IT|*n?4bAtRhIpxu z{r2^XI-GSmCOyKIy4dT6i&v;KV%J5hFcz0%Q!8}$3^s^cq%ctzwPS4P+cIqM?@pA) z9mC4vk8mz%SaeR|&Ic6%Fd*GFYFVW|Xs1BdiQJz|`3+VE$$zIkAhjs>P?iaQQv79+yIr!Jj@eou0OR1*25ya)1Q7kBMs58bJm zSrAa%6CTfS0*YWahYK%*8iw8_{7byjtvmu?ht5Y+A0xeU%c0K1Y|mwcbm91iW+7-PM4_3v&*%J?G0uLNQ{sHg@2)*+ z8~*zTml4wG1BGAwL2v4_y?4`TKeX|oIArkbMBct^Yhn^|6%w0`xO616TmwFb^|~I5ugW@-f%dScTez&lJcHs6xDECtbEj~4J%+j- z?QopVMfB#I6X|hbL4T|ln4Gn^WU>FQ&*dGjf{ZVglK*RpHe=r`s zp>%DXS!uS}JZDWOHwEqcK7CqbmS)+KDYH~0r6V&Vw_qx^I=57m=8{tv_;hMkTqJWI z)55u8-0M|@z?yZL^30TT4sv$)imB0+Vrn*AvZim_O4Pa}*GZ}Aex?9qHG#T7L*Tc7 zJeZSgoC?Ym+C-Z7Tyj9Ba0_X)=|DNiW-nB?z)q-vuq$V#T{-3208(l6Dk)qGz#_5n z6>lq!lV5Qq*{)Qrz&-|WJPmvI&+{N*T}k4?6S8`Oj+%CbUFpWLHfbC?sd_f8!X*3~ zPOESlI=Si5^h*_WRyq-i^JK;LLM&Hys=bqT@FrPa$l}4=e;gjJj9xGKZS~1^0eE#! Uc{Q)*HN0j=d8!3ZJ;iSA5AJ)U-~a#s literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Lib.py b/web/collect/client/DNS/Lib.py new file mode 100644 index 0000000..41d4363 --- /dev/null +++ b/web/collect/client/DNS/Lib.py @@ -0,0 +1,725 @@ +# -*- encoding: utf-8 -*- +""" + $Id: Lib.py,v 1.11.2.8 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + Library code. Largely this is packers and unpackers for various types. +""" + +# +# +# See RFC 1035: +# ------------------------------------------------------------------------ +# Network Working Group P. Mockapetris +# Request for Comments: 1035 ISI +# November 1987 +# Obsoletes: RFCs 882, 883, 973 +# +# DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION +# ------------------------------------------------------------------------ + + +import string, types + +import Type +import Class +import Opcode +import Status +import DNS + +from Base import DNSError + +LABEL_UTF8 = False +LABEL_ENCODING = 'idna' + +class UnpackError(DNSError): pass +class PackError(DNSError): pass + +# Low-level 16 and 32 bit integer packing and unpacking + +from struct import pack as struct_pack +from struct import unpack as struct_unpack +from socket import inet_ntoa, inet_aton + +def pack16bit(n): + return struct_pack('!H', n) + +def pack32bit(n): + return struct_pack('!L', n) + +def unpack16bit(s): + return struct_unpack('!H', s)[0] + +def unpack32bit(s): + return struct_unpack('!L', s)[0] + +def addr2bin(addr): + return struct_unpack('!l', inet_aton(addr))[0] + +def bin2addr(n): + return inet_ntoa(struct_pack('!L', n)) + +# Packing class + +class Packer: + " packer base class. supports basic byte/16bit/32bit/addr/string/name " + def __init__(self): + self.buf = '' + self.index = {} + def getbuf(self): + return self.buf + def addbyte(self, c): + if len(c) != 1: raise TypeError, 'one character expected' + self.buf = self.buf + c + def addbytes(self, bytes): + self.buf = self.buf + bytes + def add16bit(self, n): + self.buf = self.buf + pack16bit(n) + def add32bit(self, n): + self.buf = self.buf + pack32bit(n) + def addaddr(self, addr): + n = addr2bin(addr) + self.buf = self.buf + pack32bit(n) + def addstring(self, s): + if len(s) > 255: + raise ValueError, "Can't encode string of length "+ \ + "%s (> 255)"%(len(s)) + self.addbyte(chr(len(s))) + self.addbytes(s) + def addname(self, name): + # Domain name packing (section 4.1.4) + # Add a domain name to the buffer, possibly using pointers. + # The case of the first occurrence of a name is preserved. + # Redundant dots are ignored. + list = [] + for label in string.splitfields(name, '.'): + if not label: + raise PackError, 'empty label' + list.append(label) + keys = [] + for i in range(len(list)): + key = string.upper(string.joinfields(list[i:], '.')) + keys.append(key) + if self.index.has_key(key): + pointer = self.index[key] + break + else: + i = len(list) + pointer = None + # Do it into temporaries first so exceptions don't + # mess up self.index and self.buf + buf = '' + offset = len(self.buf) + index = [] + if DNS.LABEL_UTF8: + enc = 'utf8' + else: + enc = DNS.LABEL_ENCODING + for j in range(i): + label = list[j] + try: + label = label.encode(enc) + except UnicodeEncodeError: + if not DNS.LABEL_UTF8: raise + if not label.startswith('\ufeff'): + label = '\ufeff'+label + label = label.encode(enc) + n = len(label) + if n > 63: + raise PackError, 'label too long' + if offset + len(buf) < 0x3FFF: + index.append((keys[j], offset + len(buf))) + else: + print 'DNS.Lib.Packer.addname:', + print 'warning: pointer too big' + buf = buf + (chr(n) + label) + if pointer: + buf = buf + pack16bit(pointer | 0xC000) + else: + buf = buf + '\0' + self.buf = self.buf + buf + for key, value in index: + self.index[key] = value + def dump(self): + keys = self.index.keys() + keys.sort() + print '-'*40 + for key in keys: + print '%20s %3d' % (key, self.index[key]) + print '-'*40 + space = 1 + for i in range(0, len(self.buf)+1, 2): + if self.buf[i:i+2] == '**': + if not space: print + space = 1 + continue + space = 0 + print '%4d' % i, + for c in self.buf[i:i+2]: + if ' ' < c < '\177': + print ' %c' % c, + else: + print '%2d' % ord(c), + print + print '-'*40 + + +# Unpacking class + + +class Unpacker: + def __init__(self, buf): + self.buf = buf + self.offset = 0 + def getbyte(self): + if self.offset >= len(self.buf): + raise UnpackError, "Ran off end of data" + c = self.buf[self.offset] + self.offset = self.offset + 1 + return c + def getbytes(self, n): + s = self.buf[self.offset : self.offset + n] + if len(s) != n: raise UnpackError, 'not enough data left' + self.offset = self.offset + n + return s + def get16bit(self): + return unpack16bit(self.getbytes(2)) + def get32bit(self): + return unpack32bit(self.getbytes(4)) + def getaddr(self): + return bin2addr(self.get32bit()) + def getstring(self): + return self.getbytes(ord(self.getbyte())) + def getname(self): + # Domain name unpacking (section 4.1.4) + c = self.getbyte() + i = ord(c) + if i & 0xC0 == 0xC0: + d = self.getbyte() + j = ord(d) + pointer = ((i<<8) | j) & ~0xC000 + save_offset = self.offset + try: + self.offset = pointer + domain = self.getname() + finally: + self.offset = save_offset + return domain + if i == 0: + return '' + domain = self.getbytes(i) + remains = self.getname() + if not remains: + return domain + else: + return domain + '.' + remains + + +# Test program for packin/unpacking (section 4.1.4) + +def testpacker(): + N = 2500 + R = range(N) + import timing + # See section 4.1.4 of RFC 1035 + timing.start() + for i in R: + p = Packer() + p.addaddr('192.168.0.1') + p.addbytes('*' * 20) + p.addname('f.ISI.ARPA') + p.addbytes('*' * 8) + p.addname('Foo.F.isi.arpa') + p.addbytes('*' * 18) + p.addname('arpa') + p.addbytes('*' * 26) + p.addname('') + timing.finish() + print timing.milli(), "ms total for packing" + print round(timing.milli() / i, 4), 'ms per packing' + #p.dump() + u = Unpacker(p.buf) + u.getaddr() + u.getbytes(20) + u.getname() + u.getbytes(8) + u.getname() + u.getbytes(18) + u.getname() + u.getbytes(26) + u.getname() + timing.start() + for i in R: + u = Unpacker(p.buf) + + res = (u.getaddr(), + u.getbytes(20), + u.getname(), + u.getbytes(8), + u.getname(), + u.getbytes(18), + u.getname(), + u.getbytes(26), + u.getname()) + timing.finish() + print timing.milli(), "ms total for unpacking" + print round(timing.milli() / i, 4), 'ms per unpacking' + #for item in res: print item + + +# Pack/unpack RR toplevel format (section 3.2.1) + +class RRpacker(Packer): + def __init__(self): + Packer.__init__(self) + self.rdstart = None + def addRRheader(self, name, type, klass, ttl, *rest): + self.addname(name) + self.add16bit(type) + self.add16bit(klass) + self.add32bit(ttl) + if rest: + if rest[1:]: raise TypeError, 'too many args' + rdlength = rest[0] + else: + rdlength = 0 + self.add16bit(rdlength) + self.rdstart = len(self.buf) + def patchrdlength(self): + rdlength = unpack16bit(self.buf[self.rdstart-2:self.rdstart]) + if rdlength == len(self.buf) - self.rdstart: + return + rdata = self.buf[self.rdstart:] + save_buf = self.buf + ok = 0 + try: + self.buf = self.buf[:self.rdstart-2] + self.add16bit(len(rdata)) + self.buf = self.buf + rdata + ok = 1 + finally: + if not ok: self.buf = save_buf + def endRR(self): + if self.rdstart is not None: + self.patchrdlength() + self.rdstart = None + def getbuf(self): + if self.rdstart is not None: self.patchrdlength() + return Packer.getbuf(self) + # Standard RRs (section 3.3) + def addCNAME(self, name, klass, ttl, cname): + self.addRRheader(name, Type.CNAME, klass, ttl) + self.addname(cname) + self.endRR() + def addHINFO(self, name, klass, ttl, cpu, os): + self.addRRheader(name, Type.HINFO, klass, ttl) + self.addstring(cpu) + self.addstring(os) + self.endRR() + def addMX(self, name, klass, ttl, preference, exchange): + self.addRRheader(name, Type.MX, klass, ttl) + self.add16bit(preference) + self.addname(exchange) + self.endRR() + def addNS(self, name, klass, ttl, nsdname): + self.addRRheader(name, Type.NS, klass, ttl) + self.addname(nsdname) + self.endRR() + def addPTR(self, name, klass, ttl, ptrdname): + self.addRRheader(name, Type.PTR, klass, ttl) + self.addname(ptrdname) + self.endRR() + def addSOA(self, name, klass, ttl, + mname, rname, serial, refresh, retry, expire, minimum): + self.addRRheader(name, Type.SOA, klass, ttl) + self.addname(mname) + self.addname(rname) + self.add32bit(serial) + self.add32bit(refresh) + self.add32bit(retry) + self.add32bit(expire) + self.add32bit(minimum) + self.endRR() + def addTXT(self, name, klass, ttl, list): + self.addRRheader(name, Type.TXT, klass, ttl) + if type(list) is types.StringType: + list = [list] + for txtdata in list: + self.addstring(txtdata) + self.endRR() + # Internet specific RRs (section 3.4) -- class = IN + def addA(self, name, klass, ttl, address): + self.addRRheader(name, Type.A, klass, ttl) + self.addaddr(address) + self.endRR() + def addWKS(self, name, ttl, address, protocol, bitmap): + self.addRRheader(name, Type.WKS, Class.IN, ttl) + self.addaddr(address) + self.addbyte(chr(protocol)) + self.addbytes(bitmap) + self.endRR() + def addSRV(self): + raise NotImplementedError + +def prettyTime(seconds): + if seconds<60: + return seconds,"%d seconds"%(seconds) + if seconds<3600: + return seconds,"%d minutes"%(seconds/60) + if seconds<86400: + return seconds,"%d hours"%(seconds/3600) + if seconds<604800: + return seconds,"%d days"%(seconds/86400) + else: + return seconds,"%d weeks"%(seconds/604800) + + +class RRunpacker(Unpacker): + def __init__(self, buf): + Unpacker.__init__(self, buf) + self.rdend = None + def getRRheader(self): + name = self.getname() + rrtype = self.get16bit() + klass = self.get16bit() + ttl = self.get32bit() + rdlength = self.get16bit() + self.rdend = self.offset + rdlength + return (name, rrtype, klass, ttl, rdlength) + def endRR(self): + if self.offset != self.rdend: + raise UnpackError, 'end of RR not reached' + def getCNAMEdata(self): + return self.getname() + def getHINFOdata(self): + return self.getstring(), self.getstring() + def getMXdata(self): + return self.get16bit(), self.getname() + def getNSdata(self): + return self.getname() + def getPTRdata(self): + return self.getname() + def getSOAdata(self): + return self.getname(), \ + self.getname(), \ + ('serial',)+(self.get32bit(),), \ + ('refresh ',)+prettyTime(self.get32bit()), \ + ('retry',)+prettyTime(self.get32bit()), \ + ('expire',)+prettyTime(self.get32bit()), \ + ('minimum',)+prettyTime(self.get32bit()) + def getTXTdata(self): + list = [] + while self.offset != self.rdend: + list.append(self.getstring()) + return list + getSPFdata = getTXTdata + def getAdata(self): + return self.getaddr() + def getWKSdata(self): + address = self.getaddr() + protocol = ord(self.getbyte()) + bitmap = self.getbytes(self.rdend - self.offset) + return address, protocol, bitmap + def getSRVdata(self): + """ + _Service._Proto.Name TTL Class SRV Priority Weight Port Target + """ + priority = self.get16bit() + weight = self.get16bit() + port = self.get16bit() + target = self.getname() + #print '***priority, weight, port, target', priority, weight, port, target + return priority, weight, port, target + + +# Pack/unpack Message Header (section 4.1) + +class Hpacker(Packer): + def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount): + self.add16bit(id) + self.add16bit((qr&1)<<15 | (opcode&0xF)<<11 | (aa&1)<<10 + | (tc&1)<<9 | (rd&1)<<8 | (ra&1)<<7 + | (z&7)<<4 | (rcode&0xF)) + self.add16bit(qdcount) + self.add16bit(ancount) + self.add16bit(nscount) + self.add16bit(arcount) + +class Hunpacker(Unpacker): + def getHeader(self): + id = self.get16bit() + flags = self.get16bit() + qr, opcode, aa, tc, rd, ra, z, rcode = ( + (flags>>15)&1, + (flags>>11)&0xF, + (flags>>10)&1, + (flags>>9)&1, + (flags>>8)&1, + (flags>>7)&1, + (flags>>4)&7, + (flags>>0)&0xF) + qdcount = self.get16bit() + ancount = self.get16bit() + nscount = self.get16bit() + arcount = self.get16bit() + return (id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount) + + +# Pack/unpack Question (section 4.1.2) + +class Qpacker(Packer): + def addQuestion(self, qname, qtype, qclass): + self.addname(qname) + self.add16bit(qtype) + self.add16bit(qclass) + +class Qunpacker(Unpacker): + def getQuestion(self): + return self.getname(), self.get16bit(), self.get16bit() + + +# Pack/unpack Message(section 4) +# NB the order of the base classes is important for __init__()! + +class Mpacker(RRpacker, Qpacker, Hpacker): + pass + +class Munpacker(RRunpacker, Qunpacker, Hunpacker): + pass + + +# Routines to print an unpacker to stdout, for debugging. +# These affect the unpacker's current position! + +def dumpM(u): + print 'HEADER:', + (id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount) = u.getHeader() + print 'id=%d,' % id, + print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \ + % (qr, opcode, aa, tc, rd, ra, z, rcode) + if tc: print '*** response truncated! ***' + if rcode: print '*** nonzero error code! (%d) ***' % rcode + print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \ + % (qdcount, ancount, nscount, arcount) + for i in range(qdcount): + print 'QUESTION %d:' % i, + dumpQ(u) + for i in range(ancount): + print 'ANSWER %d:' % i, + dumpRR(u) + for i in range(nscount): + print 'AUTHORITY RECORD %d:' % i, + dumpRR(u) + for i in range(arcount): + print 'ADDITIONAL RECORD %d:' % i, + dumpRR(u) + +class DnsResult: + + def __init__(self,u,args): + self.header={} + self.questions=[] + self.answers=[] + self.authority=[] + self.additional=[] + self.args=args + self.storeM(u) + + def show(self): + import time + print '; <<>> PDG.py 1.0 <<>> %s %s'%(self.args['name'], + self.args['qtype']) + opt="" + if self.args['rd']: + opt=opt+'recurs ' + h=self.header + print ';; options: '+opt + print ';; got answer:' + print ';; ->>HEADER<<- opcode %s, status %s, id %d'%( + h['opcode'],h['status'],h['id']) + flags=filter(lambda x,h=h:h[x],('qr','aa','rd','ra','tc')) + print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d'%( + string.join(flags),h['qdcount'],h['ancount'],h['nscount'], + h['arcount']) + print ';; QUESTIONS:' + for q in self.questions: + print ';; %s, type = %s, class = %s'%(q['qname'],q['qtypestr'], + q['qclassstr']) + print + print ';; ANSWERS:' + for a in self.answers: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + print ';; AUTHORITY RECORDS:' + for a in self.authority: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + print ';; ADDITIONAL RECORDS:' + for a in self.additional: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + if self.args.has_key('elapsed'): + print ';; Total query time: %d msec'%self.args['elapsed'] + print ';; To SERVER: %s'%(self.args['server']) + print ';; WHEN: %s'%time.ctime(time.time()) + + def storeM(self,u): + (self.header['id'], self.header['qr'], self.header['opcode'], + self.header['aa'], self.header['tc'], self.header['rd'], + self.header['ra'], self.header['z'], self.header['rcode'], + self.header['qdcount'], self.header['ancount'], + self.header['nscount'], self.header['arcount']) = u.getHeader() + self.header['opcodestr']=Opcode.opcodestr(self.header['opcode']) + self.header['status']=Status.statusstr(self.header['rcode']) + for i in range(self.header['qdcount']): + #print 'QUESTION %d:' % i, + self.questions.append(self.storeQ(u)) + for i in range(self.header['ancount']): + #print 'ANSWER %d:' % i, + self.answers.append(self.storeRR(u)) + for i in range(self.header['nscount']): + #print 'AUTHORITY RECORD %d:' % i, + self.authority.append(self.storeRR(u)) + for i in range(self.header['arcount']): + #print 'ADDITIONAL RECORD %d:' % i, + self.additional.append(self.storeRR(u)) + + def storeQ(self,u): + q={} + q['qname'], q['qtype'], q['qclass'] = u.getQuestion() + q['qtypestr']=Type.typestr(q['qtype']) + q['qclassstr']=Class.classstr(q['qclass']) + return q + + def storeRR(self,u): + r={} + r['name'],r['type'],r['class'],r['ttl'],r['rdlength'] = u.getRRheader() + r['typename'] = Type.typestr(r['type']) + r['classstr'] = Class.classstr(r['class']) + #print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \ + # % (name, + # type, typename, + # klass, Class.classstr(class), + # ttl) + mname = 'get%sdata' % r['typename'] + if hasattr(u, mname): + r['data']=getattr(u, mname)() + else: + r['data']=u.getbytes(r['rdlength']) + return r + +def dumpQ(u): + qname, qtype, qclass = u.getQuestion() + print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \ + % (qname, + qtype, Type.typestr(qtype), + qclass, Class.classstr(qclass)) + +def dumpRR(u): + name, type, klass, ttl, rdlength = u.getRRheader() + typename = Type.typestr(type) + print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \ + % (name, + type, typename, + klass, Class.classstr(klass), + ttl) + mname = 'get%sdata' % typename + if hasattr(u, mname): + print ' formatted rdata:', getattr(u, mname)() + else: + print ' binary rdata:', u.getbytes(rdlength) + +if __name__ == "__main__": + testpacker() +# +# $Log: Lib.py,v $ +# Revision 1.11.2.8 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.11.2.7 2009/06/09 18:39:06 customdesigned +# Built-in SPF support +# +# Revision 1.11.2.6 2008/10/15 22:34:06 customdesigned +# Default to idna encoding. +# +# Revision 1.11.2.5 2008/09/17 17:35:14 customdesigned +# Use 7-bit ascii encoding, because case folding needs to be disabled +# before utf8 is safe to use, even experimentally. +# +# Revision 1.11.2.4 2008/09/17 16:09:53 customdesigned +# Encode unicode labels as UTF-8 +# +# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned +# Fix unpacker underflow. +# +# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned +# Use socket.inetntoa,inetaton. +# +# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned +# Mark utf-8 encoding +# +# Revision 1.11 2002/03/19 13:05:02 anthonybaxter +# converted to class based exceptions (there goes the python1.4 compatibility :) +# +# removed a quite gross use of 'eval()'. +# +# Revision 1.10 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.9 2002/03/19 10:30:33 anthonybaxter +# first round of major bits and pieces. The major stuff here (summarised +# from my local, off-net CVS server :/ this will cause some oddities with +# the +# +# tests/testPackers.py: +# a large slab of unit tests for the packer and unpacker code in DNS.Lib +# +# DNS/Lib.py: +# placeholder for addSRV. +# added 'klass' to addA, make it the same as the other A* records. +# made addTXT check for being passed a string, turn it into a length 1 list. +# explicitly check for adding a string of length > 255 (prohibited). +# a bunch of cleanups from a first pass with pychecker +# new code for pack/unpack. the bitwise stuff uses struct, for a smallish +# (disappointly small, actually) improvement, while addr2bin is much +# much faster now. +# +# DNS/Base.py: +# added DiscoverNameServers. This automatically does the right thing +# on unix/ win32. No idea how MacOS handles this. *sigh* +# Incompatible change: Don't use ParseResolvConf on non-unix, use this +# function, instead! +# a bunch of cleanups from a first pass with pychecker +# +# Revision 1.8 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.7 2001/07/19 07:50:44 anthony +# Added SRV (RFC 2782) support. Code from Michael Ströder. +# +# Revision 1.6 2001/07/19 07:39:18 anthony +# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder. +# +# Revision 1.5 2001/07/19 07:34:19 anthony +# oops. glitch in storeRR (fixed now). +# Reported by Bastian Kleineidam and by greg lin. +# +# Revision 1.4 2001/07/19 07:16:42 anthony +# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11. +# Patch from Timothy J. Miller. +# +# Revision 1.3 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Lib.pyc b/web/collect/client/DNS/Lib.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92a3b0775314422fad4817dc59de6251597a146f GIT binary patch literal 22633 zcmcJ1dvF{_df(TxFYE#ffCLE;A|(zbLINofeCSTmffPkbfB^bPker1C9wG)AZqEWk z0=tWufdr@)$=%VZ*p72?Y?qy+QjRNeE>2vPr1G}&vJ=~R$8nNNQmJ#2a{frgc_*$^ z{$uL*>zUcb;z8276NTBHo}TXhzW(0*_5AApE|+c}``WFl#QzP5$TblOZ(1T+WKl#b z(Uv??vZ#`>C5u*4&d6dWDQ9Ido0N01m`loeSy_$U1s5!=zbPPMGvs>g6KgOj*C9T!U@rbMNfzx!YwEDqUa-{ z&x<~qR=p&8n9Wa$9ueOV{<5Qwv5bq^EKAt{)S@enIr5DrMS46**v^|;D zo)&E~+y^u<(X>=R}{Ahn6&YMR_NIB~@3Vv?Z1Ci=e7=N~C0; zzM&`WoBo4|=GGgZ*cT@*UYxiz@wR>G!o`c{FT8pF;#>Bm3zHY#ntbz1c6B3+8f)4M z{pGr+?di>?eY#Y#=U4pDuKBB;?T2>L4I;Zyv!fNyZf@y%Xg7n#L$4Z5l&$Gcq*w_fFUab)h?{)>eWMU!mfCpee=fq({q(+yVeM7?M1G?8cvi-055QZEp|C!-*kgzZxy}yp_y{^ zkr#xvTi5nRy&2!3i%;CZZ)}8iwAJ*&iIV^1$&)8@<4Q!L9ub+Et4s$$BZ#sh;_JE_ z^@_-c?1B$cPR1+m0q1zl6L7Cn83&t>TS;$AOLce6TUv@rBC@o!*3cWP-qMn3#iqD~ zcsey#Id7KC!RxP~9)-L_MG?8OVJ%w1kY3>FP& zn2=xhqNRG&aNBoo)Tmd17W`rO_W?v1$e@&Ki980S0<1X>cWTxQ033%=uu+Yc+M}YE zdVBI+SmLl~eDSRZesm#0m#F8Q!A;JK5P{S^)teR3=YwrHoq})*KE_9-5%dm7k@wE%f#eOj zx9mj^HflH8i&+J*$sb4gf+41s0u5;;4>Qszivn?3@^wp~qFW-qg(f3FiDghiVc?U= zfw)H9v#Tp^;8r6qu)WQuSB*T)R>an-SEtmQ-)ef)MFtSE3IL(FsP2zAV8hiK>-uHD z1Mox*NqG;Lj-;4N=1g;SHyYccVB*<7!*Cc?26cev@64$l`j`Ed6mb5I88?h2|n4xgI z=|H|2)Nod;1Bp0?aXDDU#bg3*pU2D$d-b)61Pe{Vzt=f85Pnt~#}mTW8K`};cEck( z*dXe71T_%zM>cM)C@(PVoc{zfcnJf#_17ZXtJ8!uLI^hLs#jl*R%{vvW1&5M)xPw_ zFP<646rB^89u)gW?&?O&F(*+6bW~RY=hV&>J6P9254l>Kp%*YMbS-K~<<(I&YS4o& zfN`Sdp(U~`ShUT{z=cmz*3^4b_g|CkjI=V+gs7k*OmJk0&nLjk`lu|RkF0ENOO%x; z7ccx{B~$koWIM;6^Sl=Dtd*0KW{k&5w)4`;vb%hW<{t5jV(uGjcoVG`hZ$!6{K-Y6dn&rt01~p-orTCJyP#w zz3$_kL2s+auy(I(o|0BcrtXi*#}?LeyI)%UY{gZ(vVOx_Aj5#}=BNWELU6dZ){M67 zRri6n>Oj5Fe%gp?Z&UDpyixONHJ?@r%ssZVqejDCZPb_jC+~{TWSg3+Ou#QO=F5bu zHB|p(cuYh-b%VNJU!Js^4ZjYZh1VbW%f9@4tc_$GiiC>PFpe?xo2!0Q^SxCa(*59b zx7qaSnpz}q>&u>DUoi4Uv*`t70h2DXLc7aOsh^Md;=lb&q0?EIu_?;afMa zzc+nz>BIRC-i`pNxt*SS|Mt|4xfzaa#7YF=_+j0Lh?%CKF|Hu^JdE5R3P1Iu6$cX8 zXdH06(Yz2=zzYkjei(7$#3`#Dd0Qcu#^=m1Ux?WlZ)VC+$BkMo^dbWsq?A{$^0kMQ z+3fTa2(zauc}(U$=KVV%q;h#RBzaX(WpzlI@<~-zL#nKbR-eje@@hckRY{esqACkY z88xK(RY?kJQ1z=Ls!x?wS&gVME30zqm@24aR!I%2g7g`~3)6>;r06@N62r?^2vclR z^!XM;7AD|zWM+2rG$j7i{S%OpIzzc2u-00ZjMgj}o|#=gBMWJJNSBOk{sNX)=RnYC zPC1~nGNSYHI3rW{e-h%-JOS5c!S5rdbb*nRQBe@OIZ?9=ov_RGO^ObkL;t!5?+nju z&hPGI>i$(q#3BS*HUkL(<}X23)Ge`8qfeCX#na)L^=~m07Ug5<$C`!N_03L@V4Ai$ z_neQFWk_M{(uL3-dsEXsq*LU>!lB=M_G}3182f@Ib=V%`1&Sk7EUmm4N2XRlEcK3!84$bKXH6H1Ynl2QJVXYt0|QOK{0W z4XBbTs)FRz3#zE{%9gx3t_qS zjtg`+b+_{}O0VE@8W#jjoGDzsXU9A_P5hBb}z4o+6`FlUq~w)&bSQ#0cVt^R^Xve*wT5Yi(4O)u0+Q)*7Y( zmQ8GREL|5Mu)?&*Cb_ zv!GM;H!%wUvR5$f;L))fW^{zlo_BP5Q-6Dp(J>h=GGKyN!WDijfIV?((Z<1AIpFJO zfjOoyI7+{;$Dk>~;dmL!YBLXq?id}739d7X(yeEl0u=7D7a7~??{-ZB4GacS%W|k3jrR(pBkUb6OQGZa9vH$<*5*Y9yhdOfSEx8ab#wmD zBZ7n??M{k98oUmqo&V(BCr^w&2&5YHLOdqR2TREi8t`TGA0!JE9@VRM5a)MbDjW0F z{lr@`mKu#V-B@$|I{OPe6!(vSI2953C9D&yNU{1-);VoG3&mM^6Nky~)E! z>v%Gh-sEABbv!we-sBP5?0AxeQ*|V%$itz+&H8y+fKOeZ@rhCoN;A4FGqX_ z(YViCI;WBj8CP;5-Ibh1cO@s0R@RE}GuJ;+d!ude0JI&FnOSlstgAWy7mHxPCr_Tx zA-(veOA{C0dVAu+#6|MeSs&t-in%s%qjF>7x^w3`8JQmG2aU$W2NQniPq;zT_2Hb+ z!sH#TL0CmpuWO+lH6nMFJA8<2`}Jk&5_Yr}+D$J=>S;^uc)@I;`1NJ-LfpDtiAkQ_ z<3C#INqvavB{%K=jEe|+o01~(*ZlgjF{ea*ZpvX4cGkDRiCAx zu~FA#Tw^tm-zftiRcP%KuO1thA-hA zh}uAtB%Zb_jff+G_75jly-J1w3Rjh)nqkJ?LF4cOnHIYr^Q{z?2 zBdz2lLuA~EO04@F{PfZ7ywnS#5E7(nN6$ulclzW2J#Rn<(D6B`icWKAVc)ZVzC`4u?R!qIWKF$12CJmtq7rYYeh1 z!_FV3q|-Wu9_#;So>(qy+HehVA7s&Vqv}esQm3&1U=dNadQs)nAOi>)HKK;ppek8~Jvi|7l^Dv5|Nr=)Y@P zMsOes@(dGXySF;#CjCZIpkw^eF~II6h=&1&NP%v#i;$7WS%bR_te}LRvP2|F#fURb zay4N^Y)uj`@6TPoHEsBLeJ7ECZ5?cSJD@N&uEGZ2AI~uDU9H*WWE&i~@>O6ReKvUJ`hN1iY9fAZn_Ink;0a_t&pC7zr z--R+h*S&O{JC@FX2M)b;&xl_J(GCU>WQC4?YVN$MD>sn^rr$cSDEc1$OHuZeiy(1S>2g;o&^pNQZovk z0tkq})%wo7bF&K)#Gde%Lh|*`!&)61kXh`1wVBRrxFs=w*iE7~YsK!J3Gz$4F>K^c z7x4lwmD|@1UKkN3^)h}&-L!|;S>uikzzbMHm!TKzz4_k`Tw$XQQgB$6>2DnRE^00#AV5~vjW2ev#sWBq`FY_1 zVjXsrtMzr*0J9?yYc#lMd-#DgbKR!4{8^FhL_~R2!=#_6L5n<~E4}PbeMf!^o<@iw&o!bOYt2<}&8tVA=2q=97)2>lIv=fJ4gg-% zIK$0A4O3=~Yet;G7$h3Z-qQwdw{Y3U<*T?n#^ncb`GdIpC@y~lmp=;22mQA%U0#Rd ziO3Pm10hF-;rfdG#Vr5jiaGxF7JG`>Vm`$WXp8vyVM9BJ+ln*>NFSOhLc=x7I1@U< zXG7*0EljQCX#07t^EsV4o#}jD=zQJ-Qq{T6=S5Z(;wp+1EZdyV{7V$av>kfYMqP*g zV1K;oW7=Nx>l=|5`d_`P;tfx2uQWD-(EsXWQJA_nrfuzRg=RZc+n;*gqnPDL1YpFt z=Y0{Jm?=@THSe!^O`w``Se4a4OnBG^AF|leaZKE2ukAi8^LKW;Z+9HR>*Wazld)M& zq7nT`G-3d;2Y5>94k1XEv;bc@jXl8Uw+;wDAo$x6exM4}UR`%=u04Ir@OJ^1phb8+ zB+eaLY$+9N@&13Z=aPE0s5jm)^8(^cdy_9<6c^2N?x-<0%=jhP)6YpQx6@BT2&^L( zjtIZ=lVCkes%-+0{euAdtZl5*aowMUU_>l?Q4$&W8sIki34wd_uVbSP5`{~GJ?U7q z6ZRx8l!1XbphAYh8*?E-rtvnY8xXDOwth0vw|LPzTLWq!GCCv!GXS1k)?3&fb}`YnyX-}AO7x+1);s{ zMR(>M4ECdY43-AH;1{QWr3Ir$VhkgfuE72CL;%F;b;x1eQB~YsRoYcmD~L&BpL2dM zR&HMhMt=+#H3FoF9X~b1e`FU>VahIVVspJ3S-V==%zhWa{(x<}vl3z|=t>*bT_YkUG}p z@@JrS>{xJOPr(-!b=ixEt)J?mKWLm(>mg17_|0auLi`H1)F%kJ8WJ-~Cdmmqh6fqw zNTW|`nUp1|E4Zza?!uc57HATp(IAv~g9+^XAzc3OvtV%H%jDf}0vy06-1Hqa_yY;1 zW*wHR6R39CfRpUZquyxCo!YUMjR9hYQpt}0TdH`$Cw|qNSh@q7ePRwrIOgYX+VleK zit~|uC-57AA8pwSp1-^j*>@U2WY6PlU6gjyEhsT{A|!SPOb^tvpRyPFLO58$x`@3! zmEJ1ON1(T#N$D*cYg%iZf#@z%{f}c3a8{f@fy+ z2N=TnGkEYbxcvDY>&Q8F+v|T!$d3|YTY~m_Pnt7^ys@5QF0YDKF-PlkcE3F>xEo!z zq+|3#6@meODLG(%h6Fmca+o8jm0V*!2Db;U>5N=`59yAm&Z5i%!q;>TWgZZ|rt>KC zfbcb4K$&NUuIV0>i*O&VAupM(rgTPNB+k)Jo|w(`cw$UZG6+w=zUgY7o%1DB*m8BAUnI_9B0&gSfy9cYh1A2r>wNsYTM>$m)Fmma7%%l zjvfuC1K;au6n32(xzNGuJdxN2vKx_*DX!LcH@qpo8Cp4M6nR2wh9?hN zMMmy%c&UizJdKdwA>wK{hA_MND$QE%g-qQ)h66yl0P#0GV_YcO{oj;@xFM6dl7>BT zLz=*B_$z5cYVf3CaaY6tmo}t!Pa2kXH9V!v0VHtc9Nnea>FZO|&Lnx#*YAw!H$vng ztp_ODMw_$fy8MZ%{0Z394ou}|rViR1r4YN`XV0Fs121eg>Y-;x!A8C6MxH)ppFMk) zvJ}tjjrwO^(6Bvx$ARC6IAxEI=`(DEQwg>m8wMO8HVjyf4Fgke41;&Zbch(o-4CZL z^EYnK*<*T=;^_KZWntRkEi%Nee>gvT+qp6S72BD9|F$#52MFn0pPIUX7q8#!cw%HA zB)LfrgCNAoiMtekC^?Q%P7lxq%IWi{BxZrNS+ZN4CZpf_O)v>QBOoeky{URsM!lp8 zf&(07l~IRzHmFDS@AFGGo~nnA7jCRZyED1Im=wwtOQt_8+BV;IEU6xEs+5{Uy2- zuGT)Zk!w72qGG7v>2wl+mR_ zCN~$ExO7g;7J$!$pHZV|bT&Cqg)>e`;6>+Ulo>J>B+AHk=TY_eAe1E|GqX6{I19$l zAR;}A9~_0uAUAHbmVuChfXOgLPT^ZNkD0BchvHpIvt5M-N6dRi&Cjs;8IjQ%4ouDJ zV=^K~3V-If`8kmcy#6g&nAI=hz2C<73Nm&74H%&MCD@Kg zSHsUq87<@2Hn*o_)JWI$vRatcFSpfzO-?lsowahPF#FR!%lA43(H4BWHy!T>+KhXvTd&Bm%J*H_~$NYgOlyT{{23Q=H5H-I!_%s@S zTnEyLrV?GgOx1cVWz}7KpxvvlfTQrfk)^ckY0tyFXCl<)%Qo%*Nqa23Y{Raa zw4t)D18NpFqLsJ=c1Lk9AAnhomoM9i)~HN|*iXHD*~DUi1sJ>bJ1o-LW!b^m0M41l za2UlRh%Le>U>^15%eGNIl}R4a9Xp4kg0^jsoqH=xuc67W0{j??RyHhe#5|~&&`p=p zs!TFOk0;$)t1?OP=dHTU(9?#bF5C0Wx?bP#f-O7p*F50EUJJb{Q>od&u1q^0O*_D- zS#B@*#7hBLn4O-(BT9Jg8>|QXab5ld+WeTis5%Z^Y_dN6HIGu%`PD>d*ZkGU3miBp zY=Yy$&OgU`{|j9HB`)8>1>ee|)~liylY2_h{OfoC9n)w=&c8*|zk?c>;_JK~kih`t zhAWLvM?hsCQHkQv?4Zi4e$`Leol_ZgTpd*zb&5wkN|I;hv{E^iN7bPN%0uc%=gBp= zyp;bxjl;Go4uXDcJZXN26_dCUKP+}W9FQxUUw}}EAC~xV>i)?@Wfc%V!owl}jz1l-{%u*< z@mg>9Yo+8hO3LK5A+__hzV6rh_I&Mox?U@HzgFJ!wXelSGNPArpWC3L`C;tnFbbT1 z1DVJG8coWSA#O8n=KKa|82)jEqLwckd5o8N8kk&y-GR2ra+h2~uBQp(IBgET1_@Sj%_!W^h{QL;WZunz( zWBB&MMt*N>6yyp#MT-{UFb@6YFdCA8EorkF8q<{;t&VK?{X+GBad?b4Usx z7og1<<08!@BeqNq5z@t-e}~Keg6KpHVA+etLPq8PJ+KG#n%F)z8lC@%3-HbtxEd>N z=tfb%^0F79Y!36KK_;5pl-+^zdk$QZ{~>~JY$ORHd37F3lJ8f8>IG{=88rdg?8p&9 ztd;c51!Wg)K47Waw8ufrzGUI^Ro} zH4@V~Pj$rhbo$|~a4ZS6y#wQe*2OzxdVDNA^M-BAi}<#S=DUz?@JbH`rb?S>+E8}` z{7Evt@S%@Ui_&gs2`5~4eZz8`S_GdJqxt2010^yo z`Srix_P^qSZzj@Ide03#>_i(w>;rc-^c-)CH)a?>XHb|?QHCoS zMRa}zjlsesYhsNep-ujKF+DMN#eDwrD#BzThR+Wi;{jmqY+8d>UgfP}HJruw35M}0 qisWBz=;6?%;unTLE|!O{4EGI>4CjY?PGn9LipPfBVxibyR{tOI1@U_T literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Opcode.py b/web/collect/client/DNS/Opcode.py new file mode 100644 index 0000000..a51f2ba --- /dev/null +++ b/web/collect/client/DNS/Opcode.py @@ -0,0 +1,52 @@ +""" + $Id: Opcode.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + Opcode values in message header. RFC 1035, 1996, 2136. +""" + + + +QUERY = 0 +IQUERY = 1 +STATUS = 2 +NOTIFY = 4 +UPDATE = 5 + +# Construct reverse mapping dictionary + +_names = dir() +opcodemap = {} +for _name in _names: + if _name[0] != '_': opcodemap[eval(_name)] = _name + +def opcodestr(opcode): + if opcodemap.has_key(opcode): return opcodemap[opcode] + else: return `opcode` + +# +# $Log: Opcode.py,v $ +# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.6 2002/04/23 10:51:43 anthonybaxter +# Added UPDATE, NOTIFY. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Opcode.pyc b/web/collect/client/DNS/Opcode.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1efb3a6972d0b2727ea1d2134d33504bf87eca37 GIT binary patch literal 841 zcmZ`%?`zXg6g_FX{)i3~{IKB%FR~8>N7}SlSQ+BjIxNgin+`D`CFb2WnQfBgy_OaB zMdm-~zv5pYH_pv3CgkwmJNKSYui5G`Tjn{-LnPYEdad+pa36G7JLh;1iu7T zhF^xNz^_1U!QTQ`aSQl0M1Wt%br)R2$(L8~8{mc{@2V;;%SfNW;s&@5wuQO1aCp^+ zb=AiwRl^U!e*{&B*KElBFD$^Oy`k#Uc^0OMoouteCfDgX9ml0k+jU#*Zp-aar`>P& z`rQKxSK6dY#X6oRtY~nZ(VlJ7B#Jf7;ssNzDGPE#>5NRolxU+TK8l7M@5_bxj2zTSWnQfM$N}^ zkw{OcDh;PoBPD#i7>qv``oo8%H#wSIc*W`Hd@?-!6L9h2pFnLWvW&Seq)R29RSrz)w+8t4XbL^P_>?7r}P{1__Hqn literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Status.py b/web/collect/client/DNS/Status.py new file mode 100644 index 0000000..becd51b --- /dev/null +++ b/web/collect/client/DNS/Status.py @@ -0,0 +1,66 @@ +""" + $Id: Status.py,v 1.7.2.1 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + Status values in message header +""" + +NOERROR = 0 # No Error [RFC 1035] +FORMERR = 1 # Format Error [RFC 1035] +SERVFAIL = 2 # Server Failure [RFC 1035] +NXDOMAIN = 3 # Non-Existent Domain [RFC 1035] +NOTIMP = 4 # Not Implemented [RFC 1035] +REFUSED = 5 # Query Refused [RFC 1035] +YXDOMAIN = 6 # Name Exists when it should not [RFC 2136] +YXRRSET = 7 # RR Set Exists when it should not [RFC 2136] +NXRRSET = 8 # RR Set that should exist does not [RFC 2136] +NOTAUTH = 9 # Server Not Authoritative for zone [RFC 2136] +NOTZONE = 10 # Name not contained in zone [RFC 2136] +BADVERS = 16 # Bad OPT Version [RFC 2671] +BADSIG = 16 # TSIG Signature Failure [RFC 2845] +BADKEY = 17 # Key not recognized [RFC 2845] +BADTIME = 18 # Signature out of time window [RFC 2845] +BADMODE = 19 # Bad TKEY Mode [RFC 2930] +BADNAME = 20 # Duplicate key name [RFC 2930] +BADALG = 21 # Algorithm not supported [RFC 2930] + +# Construct reverse mapping dictionary + +_names = dir() +statusmap = {} +for _name in _names: + if _name[0] != '_': statusmap[eval(_name)] = _name + +def statusstr(status): + if statusmap.has_key(status): return statusmap[status] + else: return `status` + +# +# $Log: Status.py,v $ +# Revision 1.7.2.1 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.7 2002/04/23 12:52:19 anthonybaxter +# cleanup whitespace. +# +# Revision 1.6 2002/04/23 10:57:57 anthonybaxter +# update to complete the list of response codes. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Status.pyc b/web/collect/client/DNS/Status.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c236dc187b27170ce67e55c0467b30d7c30072 GIT binary patch literal 1143 zcmaiy-EPxJ6vzKg+q5wR`jM9QZiEnu#L8(}sc0j#A`llM8XMVxr6Zxp_)Hs@Bo3ZQ zMI>$zT<~D~0{bfa1gkS8g&Tw||8!>Ne9ig$-~Xy^xcj-6;`_4#;4uLG4*&urKn|n> zDnXV&OOQ*TWyodF3gil?3|R)PLau_=AlE<@$O_1XbU`;DZ-8z>-n6WOs*txpw;*qW zZbPnv)*&}Q8<3k&TlfOV8is(}#>Ydb8ai(uK<+^8a4UPXiHjxVH8^SqY8&bfpL}uf z=)*TSAHLyl+QL^r?t*HF>)U*rH3x9Ty)LyxY!Wlm-gN%QrD%E&y#uc)4)&YP#{OZW zc_a?@Tl+_?!#_nj(aUYsj$qRb*=nv>0#=9j8aB;L(!Qf-n6 zB?XC>b2H2*;v`GeL@Q6kN{N&1Q$L7(ah4Z?lu5FY_FVV-1;u4Dnkg-^i5M%blXE48 zDj`+48B3ixj1pssvCLrcvod3qvBsz{T*f+MgR#ljV$>KsVZ-?bDU&}={FDcOm(Ogu zPhgSq3LFDo?8VL}pc)ep*EuGrbCltN+G;dTriL3CCVKE*&8?H+_O6cWd=Cp<1<(oN z#?+TFnR5rqA2@1+fh{Nq=#-?2WY3m>j<`X-8EzkgNXXFKe-U-h3~?T39C2EMhQ zv^%f;D7N{Zv^#P4*}{C`pDsN5-QKl#FYH{~LA&QK+}kJ53`6E3lsv&SzAmb zk|Ebd1mvbjpD1tA7b%Kf_k>v{?M*LAg5T_7jypTx&%f2|v+W=IAwE8f0KNv0{{nyj z36KS8fl81i&;sNFs0>*KEkZ7WDv%Y>666wS8FCp^g{*>XNE@^QxdK{+Tm{u2YoImA zHPAZbI%orO1GEXb3AzD!19TJeCTI(C3v>(e7U(wQZO|RaJD_dIZO{(n4%A(|2jng$ zfZW5|7St|IzW*F@A8H?T7dy0zR|`nCV5xnmJ*a!Ed2iv#n-8$we83CZ!%u*`588)c zzsKh;S%57bb*U{z^He$M{EKUG=o~psrzM&Phlh=WR^#wUG!NPbNA1>^BD~6VG9#5o zm$4$zd7Fwywk<}JC>IyeREa1TX^?4=TnIf;BArv5i!@7KsZcw%c$Umm8eFQjm}s50 z8x3RN?6QKtFC<2ab9e8_z@mS%6lsOw-l4PwON^8f$< literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/__init__.py b/web/collect/client/DNS/__init__.py new file mode 100644 index 0000000..e9744e7 --- /dev/null +++ b/web/collect/client/DNS/__init__.py @@ -0,0 +1,78 @@ +# -*- encoding: utf-8 -*- +# $Id: __init__.py,v 1.8.2.9 2011/03/16 20:06:39 customdesigned Exp $ +# +# This file is part of the pydns project. +# Homepage: http://pydns.sourceforge.net +# +# This code is covered by the standard Python License. See LICENSE for details. +# + +# __init__.py for DNS class. + +__version__ = '2.3.5' + +import Type,Opcode,Status,Class +from Base import DnsRequest, DNSError +from Lib import DnsResult +from Base import * +from Lib import * +Error=DNSError +from lazy import * +Request = DnsRequest +Result = DnsResult + +# +# $Log: __init__.py,v $ +# Revision 1.8.2.9 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.8.2.8 2011/03/03 21:57:15 customdesigned +# Release 2.3.5 +# +# Revision 1.8.2.7 2009/06/09 18:05:29 customdesigned +# Release 2.3.4 +# +# Revision 1.8.2.6 2008/08/01 04:01:25 customdesigned +# Release 2.3.3 +# +# Revision 1.8.2.5 2008/07/28 02:11:07 customdesigned +# Bump version. +# +# Revision 1.8.2.4 2008/07/28 00:17:10 customdesigned +# Randomize source ports. +# +# Revision 1.8.2.3 2008/07/24 20:10:55 customdesigned +# Randomize tid in requests, and check in response. +# +# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned +# utf-8 in __init__.py +# +# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned +# Release 2.3.1 +# +# Revision 1.8 2002/05/06 06:17:49 anthonybaxter +# found that the old README file called itself release 2.2. So make +# this one 2.3... +# +# Revision 1.7 2002/05/06 06:16:15 anthonybaxter +# make some sort of reasonable version string. releasewards ho! +# +# Revision 1.6 2002/03/19 13:05:02 anthonybaxter +# converted to class based exceptions (there goes the python1.4 compatibility :) +# +# removed a quite gross use of 'eval()'. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2001/11/26 17:57:51 stroeder +# Added __version__ +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/__init__.pyc b/web/collect/client/DNS/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92618d228c30d5e7f12df36fd8fd49b0cea37a20 GIT binary patch literal 469 zcmYL_O-{ow5QU%f+oUZZ7KkH6ED8d#LkRJ+gH}qi^2Q2{7g0=8Vh5@0xB*w=DqJ9F zoJLr(^~Uo&^NoLg=ket&7oiUHgK(BA+`fCD8%>7ZudY}5|8Gin#y9kmDUS?!|s zp#ty#Dg+O~sEUVr0u_NrL+q0)r;JaTT!!GWeea{5B9{U93^D?r6GD~*MCJpyTwE=# ztG@61W#RxZ#QnI@seJFGHfBQMGJ9&!uuJW(w13LkJJ5(ApaB3oTRnztnbe0X$c)spTT4rLay_eC|w0&OIlU wPX{C_@PFvTVS;lZs!e5tSZsH8s++f_?3BEn(N`P;2%HOc!j4$XViwr_2c+*-Q~&?~ literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/lazy.py b/web/collect/client/DNS/lazy.py new file mode 100644 index 0000000..4d55fc8 --- /dev/null +++ b/web/collect/client/DNS/lazy.py @@ -0,0 +1,82 @@ +# $Id: lazy.py,v 1.5.2.4 2011/03/19 22:15:01 customdesigned Exp $ +# +# This file is part of the pydns project. +# Homepage: http://pydns.sourceforge.net +# +# This code is covered by the standard Python License. See LICENSE for details. +# + +# routines for lazy people. +import Base +import string + +from Base import DNSError + +def revlookup(name): + "convenience routine for doing a reverse lookup of an address" + names = revlookupall(name) + if not names: return None + return names[0] # return shortest name + +def revlookupall(name): + "convenience routine for doing a reverse lookup of an address" + # FIXME: check for IPv6 + a = string.split(name, '.') + a.reverse() + b = string.join(a, '.')+'.in-addr.arpa' + names = dnslookup(b, qtype = 'ptr') + # this will return all records. + names.sort(key=str.__len__) + return names + +def dnslookup(name,qtype): + "convenience routine to return just answer data for any query type" + if Base.defaults['server'] == []: Base.DiscoverNameServers() + result = Base.DnsRequest(name=name, qtype=qtype).req() + if result.header['status'] != 'NOERROR': + raise DNSError("DNS query status: %s" % result.header['status']) + elif len(result.answers) == 0 and Base.defaults['server_rotate']: + # check with next DNS server + result = Base.DnsRequest(name=name, qtype=qtype).req() + if result.header['status'] != 'NOERROR': + raise DNSError("DNS query status: %s" % result.header['status']) + return map(lambda x: x['data'],result.answers) + +def mxlookup(name): + """ + convenience routine for doing an MX lookup of a name. returns a + sorted list of (preference, mail exchanger) records + """ + l = dnslookup(name, qtype = 'mx') + l.sort() + return l + +# +# $Log: lazy.py,v $ +# Revision 1.5.2.4 2011/03/19 22:15:01 customdesigned +# Added rotation of name servers - SF Patch ID: 2795929 +# +# Revision 1.5.2.3 2011/03/16 20:06:24 customdesigned +# Expand convenience methods. +# +# Revision 1.5.2.2 2011/03/08 21:06:42 customdesigned +# Address sourceforge patch requests 2981978, 2795932 to add revlookupall +# and raise DNSError instead of IndexError on server fail. +# +# Revision 1.5.2.1 2007/05/22 20:23:38 customdesigned +# Lazy call to DiscoverNameServers +# +# Revision 1.5 2002/05/06 06:14:38 anthonybaxter +# reformat, move import to top of file. +# +# Revision 1.4 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/lazy.pyc b/web/collect/client/DNS/lazy.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fd781c7fd338f4d4d6973ffa20ceec1d297670 GIT binary patch literal 1926 zcmb_d%Wfk@6g|~`#hE0anJ_4dgi0e2<}raaibgzU#A-aKy+L7N)wn8|PTJK@RoTSI zS%BdK`3Qc14a=Z&+v5x%D;q4iyIhZZ>z;G&@$Y}`ZvXM|Z_h_qeOds%1Q5RiU;(Re z0i3MI3MWeDGtQS8PBJLlz^}kb1GXuHCTvRvE!egU+6*-4bpRAMH#ax^o&u1z06gsv zo`q0_5ofa1_Zwb8a|^HlmI~Jj3x!Dw%4Qg-g<`~mL9fA`#{fo^pE+MRKXN)$(^Pn; zPphD9RroV)bZ|2lqSIwnT}&ssI@QK&V{LFT7OcMCWA?NIAh=n*&6H(oa;@^NUgj@v z#x4;l4q}gon+}t6(ieG9ki5G%y|4t#Xr;)7m7t&g=My? zyU4HvR04_yaj4g9hIg+sq$U;_tiZoi>bikN14WA@q&A8UmQZTZz@XRuKfp>P>L~8= zS@_3<@W_OT5kxN2eBu&GnWP}^3pekv>LkfiDEyi1jFYlRiHcY;Bha2#g%{YYk2M7{ z9jlNeT1-LC42Pxj!(orm^dh?yGI}P~FcSs1X&wXe3t{ z`D@Jx<#q;~XQ=jSCTdQ+dd-W?gf<~)iL7Xnv+IoHkc}n|YzyZNREJQh&DOYGdAL;g zMZtdyl{zxNMb+DmB>cQip>7v)(G2klI#{L2Qfh!^*Uai)IO!0BGStDXi$E6v+ zwC2etj4EADNF>_VCOX+;-Km+DDW(U!b`eL_%!R%gyMdG&6OTOgF?W|!7i$p*cPYic za>lxlGqQ-gbY76C!HhCDHWTrQ)+&mbULvkrC~={E!Yq5{0Ln&L9R-XZ!FUP9u5#^DjU>9*-+KE!`98Dca7C+R5I| Ky6x`0?(W~}7kfSc literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/win32dns.py b/web/collect/client/DNS/win32dns.py new file mode 100644 index 0000000..1552ed7 --- /dev/null +++ b/web/collect/client/DNS/win32dns.py @@ -0,0 +1,144 @@ +""" + $Id: win32dns.py,v 1.3.2.1 2007/05/22 20:26:49 customdesigned Exp $ + + Extract a list of TCP/IP name servers from the registry 0.1 + 0.1 Strobl 2001-07-19 + Usage: + RegistryResolve() returns a list of ip numbers (dotted quads), by + scouring the registry for addresses of name servers + + Tested on Windows NT4 Server SP6a, Windows 2000 Pro SP2 and + Whistler Pro (XP) Build 2462 and Windows ME + ... all having a different registry layout wrt name servers :-/ + + Todo: + + Program doesn't check whether an interface is up or down + + (c) 2001 Copyright by Wolfgang Strobl ws@mystrobl.de, + License analog to the current Python license +""" + +import string, re +import _winreg + +def binipdisplay(s): + "convert a binary array of ip adresses to a python list" + if len(s)%4!= 0: + raise EnvironmentError # well ... + ol=[] + for i in range(len(s)/4): + s1=s[:4] + s=s[4:] + ip=[] + for j in s1: + ip.append(str(ord(j))) + ol.append(string.join(ip,'.')) + return ol + +def stringdisplay(s): + '''convert "d.d.d.d,d.d.d.d" to ["d.d.d.d","d.d.d.d"]. + also handle u'd.d.d.d d.d.d.d', as reporting on SF + ''' + import re + return map(str, re.split("[ ,]",s)) + +def RegistryResolve(): + nameservers=[] + x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) + try: + y= _winreg.OpenKey(x, + r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters") + except EnvironmentError: # so it isn't NT/2000/XP + # windows ME, perhaps? + try: # for Windows ME + y= _winreg.OpenKey(x, + r"SYSTEM\CurrentControlSet\Services\VxD\MSTCP") + nameserver,dummytype=_winreg.QueryValueEx(y,'NameServer') + if nameserver and not (nameserver in nameservers): + nameservers.extend(stringdisplay(nameserver)) + except EnvironmentError: + pass + return nameservers # no idea + try: + nameserver = _winreg.QueryValueEx(y, "DhcpNameServer")[0].split() + except: + nameserver = _winreg.QueryValueEx(y, "NameServer")[0].split() + if nameserver: + nameservers=nameserver + nameserver = _winreg.QueryValueEx(y,"NameServer")[0] + _winreg.CloseKey(y) + try: # for win2000 + y= _winreg.OpenKey(x, + r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\DNSRegisteredAdapters") + for i in range(1000): + try: + n=_winreg.EnumKey(y,i) + z=_winreg.OpenKey(y,n) + dnscount,dnscounttype=_winreg.QueryValueEx(z, + 'DNSServerAddressCount') + dnsvalues,dnsvaluestype=_winreg.QueryValueEx(z, + 'DNSServerAddresses') + nameservers.extend(binipdisplay(dnsvalues)) + _winreg.CloseKey(z) + except EnvironmentError: + break + _winreg.CloseKey(y) + except EnvironmentError: + pass +# + try: # for whistler + y= _winreg.OpenKey(x, + r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") + for i in range(1000): + try: + n=_winreg.EnumKey(y,i) + z=_winreg.OpenKey(y,n) + try: + nameserver,dummytype=_winreg.QueryValueEx(z,'NameServer') + if nameserver and not (nameserver in nameservers): + nameservers.extend(stringdisplay(nameserver)) + except EnvironmentError: + pass + _winreg.CloseKey(z) + except EnvironmentError: + break + _winreg.CloseKey(y) + except EnvironmentError: + #print "Key Interfaces not found, just do nothing" + pass +# + _winreg.CloseKey(x) + return nameservers + +if __name__=="__main__": + print "Name servers:",RegistryResolve() + +# +# $Log: win32dns.py,v $ +# Revision 1.3.2.1 2007/05/22 20:26:49 customdesigned +# Fix win32 nameserver discovery. +# +# Revision 1.3 2002/05/06 06:15:31 anthonybaxter +# apparently some versions of windows return servers as unicode +# string with space sep, rather than strings with comma sep. +# *sigh* +# +# Revision 1.2 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.1 2001/08/09 09:22:28 anthonybaxter +# added what I hope is win32 resolver lookup support. I'll need to try +# and figure out how to get the CVS checkout onto my windows machine to +# make sure it works (wow, doing something other than games on the +# windows machine :) +# +# Code from Wolfgang.Strobl@gmd.de +# win32dns.py from +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260 +# +# Really, ParseResolvConf() should be renamed "FindNameServers" or +# some such. +# +# diff --git a/web/collect/client/check_bw.py b/web/collect/client/check_bw.py new file mode 100755 index 0000000..0f8db34 --- /dev/null +++ b/web/collect/client/check_bw.py @@ -0,0 +1,193 @@ +#!/usr/bin/python + +import commands +import os +import sys +import re +import socket +import struct +import time + +#import ctypes +# TODO: maybe when there's more time; for better readability. +#class History(Structure): +# _fields_ = [ ("version", c_int), +# ("index", c_int), +# ("history", c_float * HISTORY_LENGTH), ] + +# allocate fixed space on disk to save persistent state. +# what to store in this file? +# slice_history : x,x,x,x,x,... +# root_history : y,y,y,y,y,y... + +HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour +HISTORY_fmt = ('iif', 'f'*HISTORY_LENGTH ) +HISTORY_version = 1 + + +def get_network_bytes(interface): + for line in open('/proc/net/dev', 'r'): + if interface in line: + data = line.split('%s:' % interface)[1].split() + rx_bytes, tx_bytes = (data[0], data[8]) + return (float(rx_bytes), float(tx_bytes)) + return None + +def read_safe_history(filename): + """ + This function guarantees that space is preserved. + If one of the file operations fail, it will throw an exception. + """ + if os.path.exists(filename): + # read existing data + fd = os.open(filename, os.O_RDONLY) + a = os.read(fd, os.path.getsize(filename)) + try: + (version, i, last_value) = struct.unpack_from(HISTORY_fmt[0], a, 0) + assert version == HISTORY_version + history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0])) + history = [ h for h in history ] + except: + # TODO: in the future a more clever version migration might be nice. + os.remove(filename) # just nuke the old version + # create for the first time, with empty data + (i, last_value, history) = (0, 0.0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, last_value, history), False) + + os.close(fd) + + else: + # create for the first time, with empty data + (i, last_value, history) = (0, 0.0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, last_value, history), False) + + return (i, last_value, history) + +def write_safe_history(filename, (i, last_value, history), check_for_file=True): + # length should match, and the file should already exist + assert len(history) == HISTORY_LENGTH + if check_for_file: + assert os.path.exists(filename) + + # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk + fd = os.open(filename, os.O_WRONLY|os.O_CREAT) + os.lseek(fd, 0, 0) + ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i, last_value)) + ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history)) + os.close(fd) + return ret + +def add_to_history((i, last_value, history), data): + try: + # note, this won't be the case when reboot occurs, or on first run. + assert last_value > 0.0 + assert data > last_value + #print "Recording: %s"% (data-last_value) + history[i] = data-last_value + i += 1 + i = i % HISTORY_LENGTH + except: + # on init when last_value is 0, or reboot when counter resets. + # do not record data except for last_value, do not increment index + pass + + last_value = data + return (i, last_value, history) + +def record_data(filename, data): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, data)) + +def get_percentile(filename, percentile): + (idx,last_version, history) = read_safe_history(filename) + summary = history[idx:] + history[:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + + # convert bytes to bw + bw = map(lambda x: x/(60*60*24.0), measured) + bw.sort() + l = len(bw) + pct = bw[int(l*percentile)] + #print bw + + return pct + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + for interface in ['eth0', 'eth1', 'eth2', 'eth3']: + t_bytes = get_network_bytes(interface) + if t_bytes != None: + break + if t_bytes == None: + # massive fail. cannot continue. + sys.exit(1) + + # take diff b/t sum(t_bytes) and last_value + record_data("bw_history.dat", sum(t_bytes)) + record_data("bw_history_rx.dat", t_bytes[0]) + record_data("bw_history_tx.dat", t_bytes[1]) + + print get_percentile("bw_history.dat", 0.90), + print get_percentile("bw_history_rx.dat", 0.90), + print get_percentile("bw_history_tx.dat", 0.90), + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/check_dns.py b/web/collect/client/check_dns.py new file mode 100755 index 0000000..ffd7359 --- /dev/null +++ b/web/collect/client/check_dns.py @@ -0,0 +1,192 @@ +#!/usr/bin/python + +# can't probe comon directly from node. +# http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="planetlab-01.cs.princeton.edu"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp' + +import commands +import os +import re +import socket +import struct +import DNS +import time +#import ctypes +# TODO: maybe when there's more time; for better readability. +#class History(Structure): +# _fields_ = [ ("version", c_int), +# ("index", c_int), +# ("history", c_float * HISTORY_LENGTH), ] + +# allocate fixed space on disk to save persistent state. +# what to store in this file? +# slice_history : x,x,x,x,x,... +# root_history : y,y,y,y,y,y... + +HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour +HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH ) +HISTORY_version = 1 + +def read_safe_history(filename): + """ + This function guarantees that space is preserved. + If one of the file operations fail, it will throw an exception. + """ + if os.path.exists(filename): + # read existing data + fd = os.open(filename, os.O_RDONLY) + a = os.read(fd, os.path.getsize(filename)) + try: + (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0) + assert version == HISTORY_version + history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0])) + history = [ h for h in history ] + except: + # TODO: in the future a more clever version migration might be nice. + os.remove(filename) # just nuke the old version + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + os.close(fd) + + else: + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + return (i, history) + +def write_safe_history(filename, (i, history), check_for_file=True): + # length should match, and the file should already exist + assert len(history) == HISTORY_LENGTH + if check_for_file: + assert os.path.exists(filename) + + # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk + fd = os.open(filename, os.O_WRONLY|os.O_CREAT) + os.lseek(fd, 0, 0) + ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i)) + ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history)) + os.close(fd) + return ret + +def add_to_history((i, history), data): + history[i] = data + i += 1 + i = i % HISTORY_LENGTH + return (i, history) + +def record_status_record(filename, status): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, status)) + +def get_success_ratio(filename): + rh = read_safe_history(filename) + idx = rh[0] + summary = rh[1][idx:] + rh[1][:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + + return float(len(filter(lambda x: x > 0, measured)))/float(len(measured)) + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + root_ips = get_nameserver_ips('/etc/resolv.conf') + slice_ips = get_nameserver_ips( '/vservers/princeton_comon/etc/resolv.conf') + + for i,ip in enumerate(root_ips.keys()): + (s,t) = check_dns(ip, 'udp') + if "Error" in s: t = -1 + record_status_record("dns_history_root_udp%s.dat" % i, t) + + (s,t) = check_dns(ip, 'tcp') + if "Error" in s: t = -1 + record_status_record("dns_history_root_tcp%s.dat" % i, t) + + for i,ip in enumerate(slice_ips.keys()): + (s,t) = check_dns(ip, 'udp') + if "Error" in s: t = -1 + record_status_record("dns_history_slice_udp%s.dat" % i, t) + + (s,t) = check_dns(ip, 'tcp') + if "Error" in s: t = -1 + record_status_record("dns_history_slice_tcp%s.dat" % i, t) + + if set(root_ips.keys()) == set(slice_ips.keys()): + print "CONF-ROOT_SLICE-MATCH", + else: + print "CONF-ROOT_SLICE-MISMATCH", + #if set(root_ips.keys()) != set(slice_ips.keys()): + #if set(root_ips.keys()) != set(ifcfg_ips.keys()) and len(set(ifcfg_ips.keys())) > 0: + # print "CONF-IFCFG_ROOT-MISMATCH", + + print get_success_ratio('dns_history_root_udp0.dat'), + print get_success_ratio('dns_history_root_udp1.dat'), + print get_success_ratio('dns_history_slice_udp0.dat'), + print get_success_ratio('dns_history_slice_udp1.dat'), + c_dns = os.popen("curl -s http://localhost:3121 | grep -a DNSFail").read().strip() + if len(c_dns) > 9 and "DNS" in c_dns: + c_dns = "cm " + c_dns[9:] + else: + c_dns = "" + print c_dns, + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/check_uptime.py b/web/collect/client/check_uptime.py new file mode 100755 index 0000000..160aeb4 --- /dev/null +++ b/web/collect/client/check_uptime.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +import commands +import os +import sys +import re +import socket +import struct +import time + +#import ctypes +# TODO: maybe when there's more time; for better readability. +#class History(Structure): +# _fields_ = [ ("version", c_int), +# ("index", c_int), +# ("history", c_float * HISTORY_LENGTH), ] + +# allocate fixed space on disk to save persistent state. +# what to store in this file? +# slice_history : x,x,x,x,x,... +# root_history : y,y,y,y,y,y... + +HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour +HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH ) +HISTORY_version = 1 + + +def get_network_bytes(interface): + for line in open('/proc/net/dev', 'r'): + if interface in line: + data = line.split('%s:' % interface)[1].split() + rx_bytes, tx_bytes = (data[0], data[8]) + return (float(rx_bytes), float(tx_bytes)) + return None + +def get_uptime(): + for line in open('/proc/uptime', 'r'): + data = line.split()[0] + return float(data) + return None + +def read_safe_history(filename): + """ + This function guarantees that space is preserved. + If one of the file operations fail, it will throw an exception. + """ + if os.path.exists(filename): + # read existing data + fd = os.open(filename, os.O_RDONLY) + a = os.read(fd, os.path.getsize(filename)) + try: + (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0) + assert version == HISTORY_version + history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0])) + history = [ h for h in history ] + except: + # TODO: in the future a more clever version migration might be nice. + os.remove(filename) # just nuke the old version + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + os.close(fd) + + else: + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + return (i, history) + +def write_safe_history(filename, (i, history), check_for_file=True): + # length should match, and the file should already exist + assert len(history) == HISTORY_LENGTH + if check_for_file: + assert os.path.exists(filename) + + # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk + fd = os.open(filename, os.O_WRONLY|os.O_CREAT) + os.lseek(fd, 0, 0) + ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i )) + ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history)) + os.close(fd) + return ret + +def add_to_history((i, history), data): + try: + assert data > 0.0 + history[i] = data + i += 1 + i = i % HISTORY_LENGTH + except: + # do not record data if data <= 0 + pass + return (i, history) + +def record_data(filename, data): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, data)) + +def get_avg_uptime(filename): + (idx, history) = read_safe_history(filename) + summary = history[idx:] + history[:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + return float(sum(measured))/float(len(measured)) + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + ut = get_uptime() + if ut == None: + # massive fail. cannot continue. + sys.exit(1) + + record_data("uptime_history.dat", ut) + + print get_avg_uptime("uptime_history.dat"), + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/update.sh b/web/collect/client/update.sh new file mode 100644 index 0000000..47016ff --- /dev/null +++ b/web/collect/client/update.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [ -f /etc/planetlab/plc_config ]; then + source /etc/planetlab/plc_config +else + PLC_SLICE_PREFIX='pl' +fi + +IP=IPADDR +DIR=multiops +FILE=bootstrap.tar.gz +HDIR=/home/${PLC_SLICE_PREFIX}_myops + +mkdir -p $HDIR +cd $HDIR + +# before update +if [ -f $FILE ] ; then + mod_time_before=`stat -c %Y $FILE` + CURL_ARGS="-z $FILE" +else + mod_time_before=0 + CURL_ARGS="" +fi + +# if bootstrap file has been updated +curl $CURL_ARGS -s -O --insecure https://$IP/$DIR/$FILE + +if [ -f $FILE ] ; then + mod_time_after=`stat -c %Y $FILE` +else + mod_time_after=0 +fi + +if [[ $mod_time_after -gt $mod_time_before ]] ; then + # then an update occurred, and we need to unpack it. + tar -xzf $FILE + chmod 755 ./*.sh ./*.py + ./bootstrap.sh +fi + -- 2.47.0