From 9bb5213371c2a3075adaed95d0e1e6ef5a58e7d6 Mon Sep 17 00:00:00 2001 From: Stephen Soltesz Date: Wed, 16 Nov 2011 17:41:53 +0000 Subject: [PATCH 1/1] Add bw, dns, and uptime checks. Add support library for DNS checks. Add update.sh script for auto-updates. Fix sar2graphite.py : checks for sysstat rpm Improve other scripts. --- web/collect/client/DNS/.cvsignore | 0 web/collect/client/DNS/Base.py | 444 +++++++++++++++++ web/collect/client/DNS/Base.pyc | Bin 0 -> 10073 bytes web/collect/client/DNS/Class.py | 57 +++ web/collect/client/DNS/Class.pyc | Bin 0 -> 795 bytes web/collect/client/DNS/Lib.py | 725 ++++++++++++++++++++++++++++ web/collect/client/DNS/Lib.pyc | Bin 0 -> 22633 bytes web/collect/client/DNS/Opcode.py | 52 ++ web/collect/client/DNS/Opcode.pyc | Bin 0 -> 841 bytes web/collect/client/DNS/Status.py | 66 +++ web/collect/client/DNS/Status.pyc | Bin 0 -> 1143 bytes web/collect/client/DNS/Type.py | 82 ++++ web/collect/client/DNS/Type.pyc | Bin 0 -> 1218 bytes web/collect/client/DNS/__init__.py | 78 +++ web/collect/client/DNS/__init__.pyc | Bin 0 -> 469 bytes web/collect/client/DNS/lazy.py | 82 ++++ web/collect/client/DNS/lazy.pyc | Bin 0 -> 1926 bytes web/collect/client/DNS/win32dns.py | 144 ++++++ web/collect/client/check_bw.py | 193 ++++++++ web/collect/client/check_dns.py | 192 ++++++++ web/collect/client/check_uptime.py | 177 +++++++ web/collect/client/update.sh | 41 ++ 22 files changed, 2333 insertions(+) create mode 100644 web/collect/client/DNS/.cvsignore create mode 100644 web/collect/client/DNS/Base.py create mode 100644 web/collect/client/DNS/Base.pyc create mode 100644 web/collect/client/DNS/Class.py create mode 100644 web/collect/client/DNS/Class.pyc create mode 100644 web/collect/client/DNS/Lib.py create mode 100644 web/collect/client/DNS/Lib.pyc create mode 100644 web/collect/client/DNS/Opcode.py create mode 100644 web/collect/client/DNS/Opcode.pyc create mode 100644 web/collect/client/DNS/Status.py create mode 100644 web/collect/client/DNS/Status.pyc create mode 100644 web/collect/client/DNS/Type.py create mode 100644 web/collect/client/DNS/Type.pyc create mode 100644 web/collect/client/DNS/__init__.py create mode 100644 web/collect/client/DNS/__init__.pyc create mode 100644 web/collect/client/DNS/lazy.py create mode 100644 web/collect/client/DNS/lazy.pyc create mode 100644 web/collect/client/DNS/win32dns.py create mode 100755 web/collect/client/check_bw.py create mode 100755 web/collect/client/check_dns.py create mode 100755 web/collect/client/check_uptime.py create mode 100644 web/collect/client/update.sh diff --git a/web/collect/client/DNS/.cvsignore b/web/collect/client/DNS/.cvsignore new file mode 100644 index 0000000..e69de29 diff --git a/web/collect/client/DNS/Base.py b/web/collect/client/DNS/Base.py new file mode 100644 index 0000000..22e6bf4 --- /dev/null +++ b/web/collect/client/DNS/Base.py @@ -0,0 +1,444 @@ +""" +$Id: Base.py,v 1.12.2.15 2011/03/19 22:15:01 customdesigned Exp $ + +This file is part of the pydns project. +Homepage: http://pydns.sourceforge.net + +This code is covered by the standard Python License. See LICENSE for details. + + Base functionality. Request and Response classes, that sort of thing. +""" + +import socket, string, types, time, select +import Type,Class,Opcode +import asyncore +# +# This random generator is used for transaction ids and port selection. This +# is important to prevent spurious results from lost packets, and malicious +# cache poisoning. This doesn't matter if you are behind a caching nameserver +# or your app is a primary DNS server only. To install your own generator, +# replace DNS.Base.random. SystemRandom uses /dev/urandom or similar source. +# +try: + from random import SystemRandom + random = SystemRandom() +except: + import random + +class DNSError(Exception): pass + +# Lib uses DNSError, so import after defining. +import Lib + +defaults= { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY, + 'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30, + 'server_rotate': 0 } + +defaults['server']=[] + +def ParseResolvConf(resolv_path="/etc/resolv.conf"): + "parses the /etc/resolv.conf file and sets defaults for name servers" + global defaults + lines=open(resolv_path).readlines() + for line in lines: + line = string.strip(line) + if not line or line[0]==';' or line[0]=='#': + continue + fields=string.split(line) + if len(fields) < 2: + continue + if fields[0]=='domain' and len(fields) > 1: + defaults['domain']=fields[1] + if fields[0]=='search': + pass + if fields[0]=='options': + pass + if fields[0]=='sortlist': + pass + if fields[0]=='nameserver': + defaults['server'].append(fields[1]) + +def DiscoverNameServers(): + import sys + if sys.platform in ('win32', 'nt'): + import win32dns + defaults['server']=win32dns.RegistryResolve() + else: + return ParseResolvConf() + +class DnsRequest: + """ high level Request object """ + def __init__(self,*name,**args): + self.donefunc=None + self.async=None + self.defaults = {} + self.argparse(name,args) + self.defaults = self.args + self.tid = 0 + + def argparse(self,name,args): + if not name and self.defaults.has_key('name'): + args['name'] = self.defaults['name'] + if type(name) is types.StringType: + args['name']=name + else: + if len(name) == 1: + if name[0]: + args['name']=name[0] + if defaults['server_rotate'] and \ + type(defaults['server']) == types.ListType: + defaults['server'] = defaults['server'][1:]+defaults['server'][:1] + for i in defaults.keys(): + if not args.has_key(i): + if self.defaults.has_key(i): + args[i]=self.defaults[i] + else: + args[i]=defaults[i] + if type(args['server']) == types.StringType: + args['server'] = [args['server']] + self.args=args + + def socketInit(self,a,b): + self.s = socket.socket(a,b) + + def processUDPReply(self): + if self.timeout > 0: + r,w,e = select.select([self.s],[],[],self.timeout) + if not len(r): + raise DNSError, 'Timeout' + (self.reply, self.from_address) = self.s.recvfrom(65535) + self.time_finish=time.time() + self.args['server']=self.ns + return self.processReply() + + def _readall(self,f,count): + res = f.read(count) + while len(res) < count: + if self.timeout > 0: + # should we restart timeout everytime we get a dribble of data? + rem = self.time_start + self.timeout - time.time() + if rem <= 0: raise DNSError,'Timeout' + self.s.settimeout(rem) + buf = f.read(count - len(res)) + if not buf: + raise DNSError,'incomplete reply - %d of %d read' % (len(res),count) + res += buf + return res + + def processTCPReply(self): + if self.timeout > 0: + self.s.settimeout(self.timeout) + else: + self.s.settimeout(None) + f = self.s.makefile('r') + header = self._readall(f,2) + count = Lib.unpack16bit(header) + self.reply = self._readall(f,count) + self.time_finish=time.time() + self.args['server']=self.ns + return self.processReply() + + def processReply(self): + self.args['elapsed']=(self.time_finish-self.time_start)*1000 + u = Lib.Munpacker(self.reply) + r=Lib.DnsResult(u,self.args) + r.args=self.args + #self.args=None # mark this DnsRequest object as used. + return r + #### TODO TODO TODO #### +# if protocol == 'tcp' and qtype == Type.AXFR: +# while 1: +# header = f.read(2) +# if len(header) < 2: +# print '========== EOF ==========' +# break +# count = Lib.unpack16bit(header) +# if not count: +# print '========== ZERO COUNT ==========' +# break +# print '========== NEXT ==========' +# reply = f.read(count) +# if len(reply) != count: +# print '*** Incomplete reply ***' +# break +# u = Lib.Munpacker(reply) +# Lib.dumpM(u) + + def getSource(self): + "Pick random source port to avoid DNS cache poisoning attack." + while True: + try: + source_port = random.randint(1024,65535) + self.s.bind(('', source_port)) + break + except socket.error, msg: + # Error 98, 'Address already in use' + if msg[0] != 98: raise + + def conn(self): + self.getSource() + self.s.connect((self.ns,self.port)) + + def req(self,*name,**args): + " needs a refactoring " + self.argparse(name,args) + #if not self.args: + # raise DNSError,'reinitialize request before reuse' + protocol = self.args['protocol'] + self.port = self.args['port'] + self.tid = random.randint(0,65535) + self.timeout = self.args['timeout']; + opcode = self.args['opcode'] + rd = self.args['rd'] + server=self.args['server'] + if type(self.args['qtype']) == types.StringType: + try: + qtype = getattr(Type, string.upper(self.args['qtype'])) + except AttributeError: + raise DNSError,'unknown query type' + else: + qtype=self.args['qtype'] + if not self.args.has_key('name'): + print self.args + raise DNSError,'nothing to lookup' + qname = self.args['name'] + if qtype == Type.AXFR: + print 'Query type AXFR, protocol forced to TCP' + protocol = 'tcp' + #print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype)) + m = Lib.Mpacker() + # jesus. keywords and default args would be good. TODO. + m.addHeader(self.tid, + 0, opcode, 0, 0, rd, 0, 0, 0, + 1, 0, 0, 0) + m.addQuestion(qname, qtype, Class.IN) + self.request = m.getbuf() + try: + if protocol == 'udp': + self.sendUDPRequest(server) + else: + self.sendTCPRequest(server) + except socket.error, reason: + raise DNSError, reason + if self.async: + return None + else: + if not self.response: + raise DNSError,'no working nameservers found' + return self.response + + def sendUDPRequest(self, server): + "refactor me" + self.response=None + for self.ns in server: + #print "trying udp",self.ns + try: + if self.ns.count(':'): + if hasattr(socket,'has_ipv6') and socket.has_ipv6: + self.socketInit(socket.AF_INET6, socket.SOCK_DGRAM) + else: continue + else: + self.socketInit(socket.AF_INET, socket.SOCK_DGRAM) + try: + # TODO. Handle timeouts &c correctly (RFC) + self.time_start=time.time() + self.conn() + if not self.async: + self.s.send(self.request) + r=self.processUDPReply() + # Since we bind to the source port and connect to the + # destination port, we don't need to check that here, + # but do make sure it's actually a DNS request that the + # packet is in reply to. + while r.header['id'] != self.tid \ + or self.from_address[1] != self.port: + r=self.processUDPReply() + self.response = r + # FIXME: check waiting async queries + finally: + if not self.async: + self.s.close() + except socket.error: + continue + break + + def sendTCPRequest(self, server): + " do the work of sending a TCP request " + self.response=None + for self.ns in server: + #print "trying tcp",self.ns + try: + if self.ns.count(':'): + if hasattr(socket,'has_ipv6') and socket.has_ipv6: + self.socketInit(socket.AF_INET6, socket.SOCK_STREAM) + else: continue + else: + self.socketInit(socket.AF_INET, socket.SOCK_STREAM) + try: + # TODO. Handle timeouts &c correctly (RFC) + self.time_start=time.time() + self.conn() + buf = Lib.pack16bit(len(self.request))+self.request + # Keep server from making sendall hang + self.s.setblocking(0) + # FIXME: throws WOULDBLOCK if request too large to fit in + # system buffer + self.s.sendall(buf) + # SHUT_WR breaks blocking IO with google DNS (8.8.8.8) + #self.s.shutdown(socket.SHUT_WR) + r=self.processTCPReply() + if r.header['id'] == self.tid: + self.response = r + break + finally: + self.s.close() + except socket.error: + continue + +#class DnsAsyncRequest(DnsRequest): +class DnsAsyncRequest(DnsRequest,asyncore.dispatcher_with_send): + " an asynchronous request object. out of date, probably broken " + def __init__(self,*name,**args): + DnsRequest.__init__(self, *name, **args) + # XXX todo + if args.has_key('done') and args['done']: + self.donefunc=args['done'] + else: + self.donefunc=self.showResult + #self.realinit(name,args) # XXX todo + self.async=1 + def conn(self): + self.getSource() + self.connect((self.ns,self.port)) + self.time_start=time.time() + if self.args.has_key('start') and self.args['start']: + asyncore.dispatcher.go(self) + def socketInit(self,a,b): + self.create_socket(a,b) + asyncore.dispatcher.__init__(self) + self.s=self + def handle_read(self): + if self.args['protocol'] == 'udp': + self.response=self.processUDPReply() + if self.donefunc: + apply(self.donefunc,(self,)) + def handle_connect(self): + self.send(self.request) + def handle_write(self): + pass + def showResult(self,*s): + self.response.show() + +# +# $Log: Base.py,v $ +# Revision 1.12.2.15 2011/03/19 22:15:01 customdesigned +# Added rotation of name servers - SF Patch ID: 2795929 +# +# Revision 1.12.2.14 2011/03/17 03:46:03 customdesigned +# Simple test for google DNS with tcp +# +# Revision 1.12.2.13 2011/03/17 03:08:03 customdesigned +# Use blocking IO with timeout for TCP replies. +# +# Revision 1.12.2.12 2011/03/16 17:50:00 customdesigned +# Fix non-blocking TCP replies. (untested) +# +# Revision 1.12.2.11 2010/01/02 16:31:23 customdesigned +# Handle large TCP replies (untested). +# +# Revision 1.12.2.10 2008/08/01 03:58:03 customdesigned +# Don't try to close socket when never opened. +# +# Revision 1.12.2.9 2008/08/01 03:48:31 customdesigned +# Fix more breakage from port randomization patch. Support Ipv6 queries. +# +# Revision 1.12.2.8 2008/07/31 18:22:59 customdesigned +# Wait until tcp response at least starts coming in. +# +# Revision 1.12.2.7 2008/07/28 01:27:00 customdesigned +# Check configured port. +# +# Revision 1.12.2.6 2008/07/28 00:17:10 customdesigned +# Randomize source ports. +# +# Revision 1.12.2.5 2008/07/24 20:10:55 customdesigned +# Randomize tid in requests, and check in response. +# +# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned +# Missing import Lib +# +# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned +# Use socket.inetntoa,inetaton. +# +# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned +# Trap socket error +# +# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned +# Skip bogus but non-empty lines in resolv.conf +# +# Revision 1.12 2002/04/23 06:04:27 anthonybaxter +# attempt to refactor the DNSRequest.req method a little. after doing a bit +# of this, I've decided to bite the bullet and just rewrite the puppy. will +# be checkin in some design notes, then unit tests and then writing the sod. +# +# Revision 1.11 2002/03/19 13:05:02 anthonybaxter +# converted to class based exceptions (there goes the python1.4 compatibility :) +# +# removed a quite gross use of 'eval()'. +# +# Revision 1.10 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.9 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.8 2002/03/19 10:30:33 anthonybaxter +# first round of major bits and pieces. The major stuff here (summarised +# from my local, off-net CVS server :/ this will cause some oddities with +# the +# +# tests/testPackers.py: +# a large slab of unit tests for the packer and unpacker code in DNS.Lib +# +# DNS/Lib.py: +# placeholder for addSRV. +# added 'klass' to addA, make it the same as the other A* records. +# made addTXT check for being passed a string, turn it into a length 1 list. +# explicitly check for adding a string of length > 255 (prohibited). +# a bunch of cleanups from a first pass with pychecker +# new code for pack/unpack. the bitwise stuff uses struct, for a smallish +# (disappointly small, actually) improvement, while addr2bin is much +# much faster now. +# +# DNS/Base.py: +# added DiscoverNameServers. This automatically does the right thing +# on unix/ win32. No idea how MacOS handles this. *sigh* +# Incompatible change: Don't use ParseResolvConf on non-unix, use this +# function, instead! +# a bunch of cleanups from a first pass with pychecker +# +# Revision 1.5 2001/08/09 09:22:28 anthonybaxter +# added what I hope is win32 resolver lookup support. I'll need to try +# and figure out how to get the CVS checkout onto my windows machine to +# make sure it works (wow, doing something other than games on the +# windows machine :) +# +# Code from Wolfgang.Strobl@gmd.de +# win32dns.py from +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260 +# +# Really, ParseResolvConf() should be renamed "FindNameServers" or +# some such. +# +# Revision 1.4 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.3 2001/07/19 07:20:12 anthony +# Handle blank resolv.conf lines. +# Patch from Bastian Kleineidam +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Base.pyc b/web/collect/client/DNS/Base.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98177d339b682414c73cd91ec55ce5d2109312cf GIT binary patch literal 10073 zcmcIqOLH98dA;3(*9;!Sg8&FVq^3mM6Nv-}%48}OVv7VJk}(O{mL z@5GB~oOj2oXOYIMXOH4G=Gr>YkT$QSQLP;X_YU$#mIil{dYoeMK~Ra~;Lh#Uwe`wc zfWd+&&TC22E|y9G{&9!Fb|xgzA!99nwhKtm=k;HML{G$dr6 zXZ5U^Tt;O+fTV?;C8sy0yHk-VIntYf^vKMEaJVDmv z<4A1WhEA%OqL#<%;%e+9UM|IXeW@MWtodlMo~7G$4$lSfS;b?QP?H<;6!zj7%nqx0 z=9wpR-c!%q_+r9)=9}C%Nx?kz*#bpm##-+g55KU$1t9pLZ(39RiUKRLVdL#D&@Mgp zAL#Wj7;N@wfn!Aj<{>1|Z-{D*IH&=>gPw;?hs?tPlTBk1v>7rB*eI-gg~RuH4Xf9H z@s3uGysonNy2_F&7ceP?9yJTeFqg@>uvBh^&j!e$QkK{$J(w5JDN}e=(#`}4W*g@g z_}{K|n#j-?2ryJrZ`LW2YWX;4`hFrL5*vVVElC|BShjJkU2g~lS&Noqg)Uk}GqJe< z%c0%KAy>IxCej7DtQDtu5v6uqi<(IaThpL<8~73cEwU|_Yr!T3;U+q9h~E0ur`Ez) zQJEOZX$KBq)mANUsJk1TubESq?IdnSwr@nQ+E0EAt?yBsuzjF)6=3}ys_b`>n8LI- zY=*pJ-Ur?=(ix=ZylHRB8#waR*^tY;j@o|!VbP2#VCe0;j5s2l~dS%@!b(0`1$cch% zHETIA;0Q`h!OGfw81F!f?E@!(I6Q^c;aMaaIy}cBrSTRa&fxWp#EPZYfrN^~*dLIt}w(orW3uIssKHV3+dqEkv_5o#Nk$eTG zKBOA>*kNUg2L6_>O7yzSjMC{c17|y`3_SIiG85!vnHVJyG?JZ0(2O6&%|jQFZPA4V zb=pgz`+SC9djm-%Jy)f6uv*nO4OasSGa@1=8(ijtIZy_}ppl4&8Ek^uruNolnq`sh z0LvM$Och4R$azuQHG7cOr7OLwwRc1cGC35il%FS2nG02|jhoxb&{o4mRKG=|oUd91 z3*^-*;cV&Xj91iuRArsxa(P_mDjwT}Sb*lStn7A(O+px8cF$z9y(;poBXDsjkc=*9 zf#rg$AC8!80!<|;G+jwAJ2F-q$N1S;{l!5BSRBRWs&||ydM*dep%>u$kQ%~e$R5)p!DB+| z$q@rN*8zkoYSe6XH$D&m=Kw9raFn?|I`O+DTCCtUrhHtOFQ9#zz2KK!)lN z%pf0_5~U%Ft0){>ci|K{(`NPEIM>3gY(Ps^xu$gM&C8WU+yJ*>0iYuzUNxRfp8}u> zYEl1y3|4_f0k4b|ZUI<|XjtnTC~(K{f7j^wfca^GypMneXaXJ`{#)bd25$4sFMPBA zz@gZU0IC4O0k@yJ{SIl@!_-kSLS&*@wW#q+O2Kh(BsUoj{C+o-`}%9B zkAx@I3+YjvYmS?sx$t9@1qQ{KLPR^RKiY01%H@ddxYayR0L-dtEsEfetpgZlkcVc} zSltGV*oJdj4nyjN=(LUwA%~Gq?`xGnf>H3UQ;7Vd6hJ8&}ZT)h|WsSlu`C*f5w2 zaz614!KMzM55aiUCln`rDlkMJ9Q6>~p)mqG)%7Mnobv=;J%ei)G_5mk0AS?Ef^dwR z13|K(vqWbOURyNQzF;;SVW?07;V8v%OX`&f`ED3oZoTiFh_c%8;xdsj=h~ zE-27(OMf_Rwe#=>ib8^b;YRFQWwnu81hH$cu&7uo$a@1bp$h2 zxtLnB*+$-ykqnerFf-nye-czN?j83Q5RIKdZJp#tr55l=vHy$=b#!Qx^G=5lw@e5x zqynBD&`i~ltLv6!iPQyTj~38~+zc3p=MtyK9%c?35H>x6M#P&Gd&b!_OSVMZ(KXKT zSB*2gg>@?ocUC!pqO8)H;yQ~TAdxBV)plc^w!{0Z;nwdYTe7)M+N#xem#=IkdB~8; zX>N_VPnpp3hc}V*jV>fN%k~=(d)yB1@iiWoyf$d=+THzCXLnzsu|3W%u(f%A5@GT* zlr!sW?`Xs%6@_R>ak$hoC%~S^Sphrq%?dWeqZ`VDR29^~8Ze*Jhq#|dLUn+lBHBV) z5z((F{|#}7>EdRsW#dS(R457D#k@`ee~g^YC3l^`uxm<7RSHlo&P}g)j*e9HX2D3T zUtyGP)O!jtnDxedL6kZo|7iafnI}!Uvn(>+!3A15_YRo3&AE+A43V4!OrM~=x3OO^ z1F$c~F(t!jh-YQR3y64l$SCM+`wN`xA0o;q81Dg-4Fty(Ny4sR@_R{rH)!k50{0ud zdkJ_s5ad}NzqM9~X2xae@3iFP!M z94|wXL{B%=ngdlfBsUU+`;LFkZ>=8fCy6Q5QV1?z|9g|mwQ=sp)|q|ccJkq z0Q|mzM*#i6vnS&(Hza8en~nY70=5&v>?ChdJsdu97Gt>cTR=2r+^X54$i?t^VM#Yk=3+7;n0R%2@8Oe8D z%t`Zf$oZV^Hc#~`Hqx_@%W0FGk!aw`y=Nt?^q6@n;iRzb2b9G-CL6?trcFh13XDQV zEO6?mBm!I;ph=l0ayyG8NaHxNK@GlsyH?LL-ns;#kV|+v+f^h;sD;FESY*P$FdRW5 z8|tLHY4$h`a97&K9W2iYjQ%aLnPz!|qk=k`S+?70+3%tD$A>M0YY#pR&+`UYw~@Sx zs^cyf&EY)cy7GDp5X5}MzCEomgU4BWm&5k2mUxTRNu%}hS+>2vM9z#G34g|<#$<_! zcJqDac=;J#K_U(UJ%Gj9q8}Z1=J|&5=-_s#4TKtV4Gog5P9EzP*9AQvu^E@g&4h9u zl+|7OT6Ckx5CPwk`;;3-2_mq(FVpa=yi!&We0yE47Arv<!tgwk!;k<`mTs@h+WX5VG^g>`Xjg07r;Oo&z^_^wf-f-sTL`OwrtNtD zU-4t_ATj=AVH}Qs7SFi;I|IUf-<$BJ{c#-XMpQQm`Yrim{+M?H?dBNKF+lgH&|(6N zH|Up8u7l+i%TcDQcr32V$yYQlToM0~yCS|$GXTdOoR&T4t{5~3s>nWu{R~j4&X;EA z6iUNkW+xrVfFn#t%AZ0v4jbU~Zc105ARIhoocq`b3j%_-ZgnS})tv>a=j6melSe!6|2xBc z7hrm#vkfPd1ao(J26Kax;6BK94>r;T7b=_(w}4jgZy&oPr`$t^1GpxW|@CkGBcZ?c^XLO;FIi&t6-~WKG$R^i5t=?W=yMM*q zI()YJqw0;D;kCQ!)_vy!Rz+$J_7%zxV+Pts*kS#Tm^@%2izie*EVVjZ5>$F|`ky!j0tdx>rQB5r_-egOzQiT{RiIZ^PB0jnj{j13mdyf=>{=2W2s zJWm1VLN9rO$0shyuD*ocj~seGq-u%YA?f)49lc+@8NHwwe2dJFU>2|?@TSasH^!FR z1ZMkqrt5U^5L?a)-|(G?SMhBTaVTr&knP7Ys$! zMR*PW97Se)d38q3WDH$JRwwkz>!deA)t{X?( z@NG!VPR{uj$s?X_z9?KWx`r$789G7#&T<=$v#Y1XlaFmvAo76W`6B^D!m3{|MJRoiMF1Y7NF zH%|Ls45-o1I40GN!xP99@^aDqU(;ED2()fkT6S7xhV}|0Rj-&G!7PJObXUO6uL#a9 zz~k|BLItiXSm7rF5w^Rap|qVg8P0?sauMVy+sGc{^A8TZBHJU3$skWbR$r07qeH^u z0x`#-3=UYQ;M2Q@Uh0O+S;b>_k#N;V4_Pd`gM6WP!otekjT+un-6=#jE?w{bp4&RU zm{2mu;oEf*Np#CXh9b#G4n*_1yn(K;TwEli?%+-TjCRD~D4tZTN7YN?SsQ6a=%e|Bdjw&x7LiItu zj;JkGC$OruX z!nix;g1+g5J&HCBTtDDXDtNQ_3HL<}8(bvPogeOum=RcyBja|)RDtEuv%i?~1==VU z`BrP-(W1MC?lr+0m%))te*xT$7QpQv(KB0pt7*p2ushA;Hojimd}R{O$4SWBI={JH za*KFzWJYa~0vDB#Bit8Zoh$?>yn90w^%dUS?oV8%0rkC0_?IjSm<{ISR{oB;UorW6 zCLE!UPUU`wdHxD*KIANu$jEyXi$m}jgGImSyUsk`x#{HGRlLl)8%#c7vdg56L}#DR z^p`QtetyO`c-*bv+LL;$zkhHiLOFe+@W(${3qRNC>01BedC2MI-f|+TPaf6BN#3X` ziskaZVT+2HSKJ>1{eVKW=a3BfEBHf%v5|s~TS)!6!Wd}xT^z4)z?v(L%>3qm0ToZR Ar~m)} literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Class.py b/web/collect/client/DNS/Class.py new file mode 100644 index 0000000..d67fc78 --- /dev/null +++ b/web/collect/client/DNS/Class.py @@ -0,0 +1,57 @@ +""" +$Id: Class.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + CLASS values (section 3.2.4) +""" + + +IN = 1 # the Internet +CS = 2 # the CSNET class (Obsolete - used only for examples in + # some obsolete RFCs) +CH = 3 # the CHAOS class. When someone shows me python running on + # a Symbolics Lisp machine, I'll look at implementing this. +HS = 4 # Hesiod [Dyer 87] + +# QCLASS values (section 3.2.5) + +ANY = 255 # any class + + +# Construct reverse mapping dictionary + +_names = dir() +classmap = {} +for _name in _names: + if _name[0] != '_': classmap[eval(_name)] = _name + +def classstr(klass): + if classmap.has_key(klass): return classmap[klass] + else: return `klass` + +# +# $Log: Class.py,v $ +# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.6 2002/04/23 12:52:19 anthonybaxter +# cleanup whitespace. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Class.pyc b/web/collect/client/DNS/Class.pyc new file mode 100644 index 0000000000000000000000000000000000000000..019262fba3bad31f1d8f10fcf70e0120a94a9f79 GIT binary patch literal 795 zcmaJy}}Z3h3o4(T@}YEK{lVnPNn1JQ!G1;T>Z z0?~%J4Wa{a2hfIFO6x*S*n^ufU>7bvKZn=>bU@f}EV^)IfqVjt7O)5Cn#e7Kv#$X# zl8>SXKdAfx2nQn1HTzo(z&%-rfKNB6Qhs%KvF9Uy;-C5>emWeD2E*}SG$At>PJ;0( zp6!&DTY;)rmnit`rs5~A%Rl6W;;UkVoOG4en#&c}Ir8ctN?NJq6=vFZ`CM+XO4k_h zT 255: + raise ValueError, "Can't encode string of length "+ \ + "%s (> 255)"%(len(s)) + self.addbyte(chr(len(s))) + self.addbytes(s) + def addname(self, name): + # Domain name packing (section 4.1.4) + # Add a domain name to the buffer, possibly using pointers. + # The case of the first occurrence of a name is preserved. + # Redundant dots are ignored. + list = [] + for label in string.splitfields(name, '.'): + if not label: + raise PackError, 'empty label' + list.append(label) + keys = [] + for i in range(len(list)): + key = string.upper(string.joinfields(list[i:], '.')) + keys.append(key) + if self.index.has_key(key): + pointer = self.index[key] + break + else: + i = len(list) + pointer = None + # Do it into temporaries first so exceptions don't + # mess up self.index and self.buf + buf = '' + offset = len(self.buf) + index = [] + if DNS.LABEL_UTF8: + enc = 'utf8' + else: + enc = DNS.LABEL_ENCODING + for j in range(i): + label = list[j] + try: + label = label.encode(enc) + except UnicodeEncodeError: + if not DNS.LABEL_UTF8: raise + if not label.startswith('\ufeff'): + label = '\ufeff'+label + label = label.encode(enc) + n = len(label) + if n > 63: + raise PackError, 'label too long' + if offset + len(buf) < 0x3FFF: + index.append((keys[j], offset + len(buf))) + else: + print 'DNS.Lib.Packer.addname:', + print 'warning: pointer too big' + buf = buf + (chr(n) + label) + if pointer: + buf = buf + pack16bit(pointer | 0xC000) + else: + buf = buf + '\0' + self.buf = self.buf + buf + for key, value in index: + self.index[key] = value + def dump(self): + keys = self.index.keys() + keys.sort() + print '-'*40 + for key in keys: + print '%20s %3d' % (key, self.index[key]) + print '-'*40 + space = 1 + for i in range(0, len(self.buf)+1, 2): + if self.buf[i:i+2] == '**': + if not space: print + space = 1 + continue + space = 0 + print '%4d' % i, + for c in self.buf[i:i+2]: + if ' ' < c < '\177': + print ' %c' % c, + else: + print '%2d' % ord(c), + print + print '-'*40 + + +# Unpacking class + + +class Unpacker: + def __init__(self, buf): + self.buf = buf + self.offset = 0 + def getbyte(self): + if self.offset >= len(self.buf): + raise UnpackError, "Ran off end of data" + c = self.buf[self.offset] + self.offset = self.offset + 1 + return c + def getbytes(self, n): + s = self.buf[self.offset : self.offset + n] + if len(s) != n: raise UnpackError, 'not enough data left' + self.offset = self.offset + n + return s + def get16bit(self): + return unpack16bit(self.getbytes(2)) + def get32bit(self): + return unpack32bit(self.getbytes(4)) + def getaddr(self): + return bin2addr(self.get32bit()) + def getstring(self): + return self.getbytes(ord(self.getbyte())) + def getname(self): + # Domain name unpacking (section 4.1.4) + c = self.getbyte() + i = ord(c) + if i & 0xC0 == 0xC0: + d = self.getbyte() + j = ord(d) + pointer = ((i<<8) | j) & ~0xC000 + save_offset = self.offset + try: + self.offset = pointer + domain = self.getname() + finally: + self.offset = save_offset + return domain + if i == 0: + return '' + domain = self.getbytes(i) + remains = self.getname() + if not remains: + return domain + else: + return domain + '.' + remains + + +# Test program for packin/unpacking (section 4.1.4) + +def testpacker(): + N = 2500 + R = range(N) + import timing + # See section 4.1.4 of RFC 1035 + timing.start() + for i in R: + p = Packer() + p.addaddr('192.168.0.1') + p.addbytes('*' * 20) + p.addname('f.ISI.ARPA') + p.addbytes('*' * 8) + p.addname('Foo.F.isi.arpa') + p.addbytes('*' * 18) + p.addname('arpa') + p.addbytes('*' * 26) + p.addname('') + timing.finish() + print timing.milli(), "ms total for packing" + print round(timing.milli() / i, 4), 'ms per packing' + #p.dump() + u = Unpacker(p.buf) + u.getaddr() + u.getbytes(20) + u.getname() + u.getbytes(8) + u.getname() + u.getbytes(18) + u.getname() + u.getbytes(26) + u.getname() + timing.start() + for i in R: + u = Unpacker(p.buf) + + res = (u.getaddr(), + u.getbytes(20), + u.getname(), + u.getbytes(8), + u.getname(), + u.getbytes(18), + u.getname(), + u.getbytes(26), + u.getname()) + timing.finish() + print timing.milli(), "ms total for unpacking" + print round(timing.milli() / i, 4), 'ms per unpacking' + #for item in res: print item + + +# Pack/unpack RR toplevel format (section 3.2.1) + +class RRpacker(Packer): + def __init__(self): + Packer.__init__(self) + self.rdstart = None + def addRRheader(self, name, type, klass, ttl, *rest): + self.addname(name) + self.add16bit(type) + self.add16bit(klass) + self.add32bit(ttl) + if rest: + if rest[1:]: raise TypeError, 'too many args' + rdlength = rest[0] + else: + rdlength = 0 + self.add16bit(rdlength) + self.rdstart = len(self.buf) + def patchrdlength(self): + rdlength = unpack16bit(self.buf[self.rdstart-2:self.rdstart]) + if rdlength == len(self.buf) - self.rdstart: + return + rdata = self.buf[self.rdstart:] + save_buf = self.buf + ok = 0 + try: + self.buf = self.buf[:self.rdstart-2] + self.add16bit(len(rdata)) + self.buf = self.buf + rdata + ok = 1 + finally: + if not ok: self.buf = save_buf + def endRR(self): + if self.rdstart is not None: + self.patchrdlength() + self.rdstart = None + def getbuf(self): + if self.rdstart is not None: self.patchrdlength() + return Packer.getbuf(self) + # Standard RRs (section 3.3) + def addCNAME(self, name, klass, ttl, cname): + self.addRRheader(name, Type.CNAME, klass, ttl) + self.addname(cname) + self.endRR() + def addHINFO(self, name, klass, ttl, cpu, os): + self.addRRheader(name, Type.HINFO, klass, ttl) + self.addstring(cpu) + self.addstring(os) + self.endRR() + def addMX(self, name, klass, ttl, preference, exchange): + self.addRRheader(name, Type.MX, klass, ttl) + self.add16bit(preference) + self.addname(exchange) + self.endRR() + def addNS(self, name, klass, ttl, nsdname): + self.addRRheader(name, Type.NS, klass, ttl) + self.addname(nsdname) + self.endRR() + def addPTR(self, name, klass, ttl, ptrdname): + self.addRRheader(name, Type.PTR, klass, ttl) + self.addname(ptrdname) + self.endRR() + def addSOA(self, name, klass, ttl, + mname, rname, serial, refresh, retry, expire, minimum): + self.addRRheader(name, Type.SOA, klass, ttl) + self.addname(mname) + self.addname(rname) + self.add32bit(serial) + self.add32bit(refresh) + self.add32bit(retry) + self.add32bit(expire) + self.add32bit(minimum) + self.endRR() + def addTXT(self, name, klass, ttl, list): + self.addRRheader(name, Type.TXT, klass, ttl) + if type(list) is types.StringType: + list = [list] + for txtdata in list: + self.addstring(txtdata) + self.endRR() + # Internet specific RRs (section 3.4) -- class = IN + def addA(self, name, klass, ttl, address): + self.addRRheader(name, Type.A, klass, ttl) + self.addaddr(address) + self.endRR() + def addWKS(self, name, ttl, address, protocol, bitmap): + self.addRRheader(name, Type.WKS, Class.IN, ttl) + self.addaddr(address) + self.addbyte(chr(protocol)) + self.addbytes(bitmap) + self.endRR() + def addSRV(self): + raise NotImplementedError + +def prettyTime(seconds): + if seconds<60: + return seconds,"%d seconds"%(seconds) + if seconds<3600: + return seconds,"%d minutes"%(seconds/60) + if seconds<86400: + return seconds,"%d hours"%(seconds/3600) + if seconds<604800: + return seconds,"%d days"%(seconds/86400) + else: + return seconds,"%d weeks"%(seconds/604800) + + +class RRunpacker(Unpacker): + def __init__(self, buf): + Unpacker.__init__(self, buf) + self.rdend = None + def getRRheader(self): + name = self.getname() + rrtype = self.get16bit() + klass = self.get16bit() + ttl = self.get32bit() + rdlength = self.get16bit() + self.rdend = self.offset + rdlength + return (name, rrtype, klass, ttl, rdlength) + def endRR(self): + if self.offset != self.rdend: + raise UnpackError, 'end of RR not reached' + def getCNAMEdata(self): + return self.getname() + def getHINFOdata(self): + return self.getstring(), self.getstring() + def getMXdata(self): + return self.get16bit(), self.getname() + def getNSdata(self): + return self.getname() + def getPTRdata(self): + return self.getname() + def getSOAdata(self): + return self.getname(), \ + self.getname(), \ + ('serial',)+(self.get32bit(),), \ + ('refresh ',)+prettyTime(self.get32bit()), \ + ('retry',)+prettyTime(self.get32bit()), \ + ('expire',)+prettyTime(self.get32bit()), \ + ('minimum',)+prettyTime(self.get32bit()) + def getTXTdata(self): + list = [] + while self.offset != self.rdend: + list.append(self.getstring()) + return list + getSPFdata = getTXTdata + def getAdata(self): + return self.getaddr() + def getWKSdata(self): + address = self.getaddr() + protocol = ord(self.getbyte()) + bitmap = self.getbytes(self.rdend - self.offset) + return address, protocol, bitmap + def getSRVdata(self): + """ + _Service._Proto.Name TTL Class SRV Priority Weight Port Target + """ + priority = self.get16bit() + weight = self.get16bit() + port = self.get16bit() + target = self.getname() + #print '***priority, weight, port, target', priority, weight, port, target + return priority, weight, port, target + + +# Pack/unpack Message Header (section 4.1) + +class Hpacker(Packer): + def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount): + self.add16bit(id) + self.add16bit((qr&1)<<15 | (opcode&0xF)<<11 | (aa&1)<<10 + | (tc&1)<<9 | (rd&1)<<8 | (ra&1)<<7 + | (z&7)<<4 | (rcode&0xF)) + self.add16bit(qdcount) + self.add16bit(ancount) + self.add16bit(nscount) + self.add16bit(arcount) + +class Hunpacker(Unpacker): + def getHeader(self): + id = self.get16bit() + flags = self.get16bit() + qr, opcode, aa, tc, rd, ra, z, rcode = ( + (flags>>15)&1, + (flags>>11)&0xF, + (flags>>10)&1, + (flags>>9)&1, + (flags>>8)&1, + (flags>>7)&1, + (flags>>4)&7, + (flags>>0)&0xF) + qdcount = self.get16bit() + ancount = self.get16bit() + nscount = self.get16bit() + arcount = self.get16bit() + return (id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount) + + +# Pack/unpack Question (section 4.1.2) + +class Qpacker(Packer): + def addQuestion(self, qname, qtype, qclass): + self.addname(qname) + self.add16bit(qtype) + self.add16bit(qclass) + +class Qunpacker(Unpacker): + def getQuestion(self): + return self.getname(), self.get16bit(), self.get16bit() + + +# Pack/unpack Message(section 4) +# NB the order of the base classes is important for __init__()! + +class Mpacker(RRpacker, Qpacker, Hpacker): + pass + +class Munpacker(RRunpacker, Qunpacker, Hunpacker): + pass + + +# Routines to print an unpacker to stdout, for debugging. +# These affect the unpacker's current position! + +def dumpM(u): + print 'HEADER:', + (id, qr, opcode, aa, tc, rd, ra, z, rcode, + qdcount, ancount, nscount, arcount) = u.getHeader() + print 'id=%d,' % id, + print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \ + % (qr, opcode, aa, tc, rd, ra, z, rcode) + if tc: print '*** response truncated! ***' + if rcode: print '*** nonzero error code! (%d) ***' % rcode + print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \ + % (qdcount, ancount, nscount, arcount) + for i in range(qdcount): + print 'QUESTION %d:' % i, + dumpQ(u) + for i in range(ancount): + print 'ANSWER %d:' % i, + dumpRR(u) + for i in range(nscount): + print 'AUTHORITY RECORD %d:' % i, + dumpRR(u) + for i in range(arcount): + print 'ADDITIONAL RECORD %d:' % i, + dumpRR(u) + +class DnsResult: + + def __init__(self,u,args): + self.header={} + self.questions=[] + self.answers=[] + self.authority=[] + self.additional=[] + self.args=args + self.storeM(u) + + def show(self): + import time + print '; <<>> PDG.py 1.0 <<>> %s %s'%(self.args['name'], + self.args['qtype']) + opt="" + if self.args['rd']: + opt=opt+'recurs ' + h=self.header + print ';; options: '+opt + print ';; got answer:' + print ';; ->>HEADER<<- opcode %s, status %s, id %d'%( + h['opcode'],h['status'],h['id']) + flags=filter(lambda x,h=h:h[x],('qr','aa','rd','ra','tc')) + print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d'%( + string.join(flags),h['qdcount'],h['ancount'],h['nscount'], + h['arcount']) + print ';; QUESTIONS:' + for q in self.questions: + print ';; %s, type = %s, class = %s'%(q['qname'],q['qtypestr'], + q['qclassstr']) + print + print ';; ANSWERS:' + for a in self.answers: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + print ';; AUTHORITY RECORDS:' + for a in self.authority: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + print ';; ADDITIONAL RECORDS:' + for a in self.additional: + print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'], + a['data']) + print + if self.args.has_key('elapsed'): + print ';; Total query time: %d msec'%self.args['elapsed'] + print ';; To SERVER: %s'%(self.args['server']) + print ';; WHEN: %s'%time.ctime(time.time()) + + def storeM(self,u): + (self.header['id'], self.header['qr'], self.header['opcode'], + self.header['aa'], self.header['tc'], self.header['rd'], + self.header['ra'], self.header['z'], self.header['rcode'], + self.header['qdcount'], self.header['ancount'], + self.header['nscount'], self.header['arcount']) = u.getHeader() + self.header['opcodestr']=Opcode.opcodestr(self.header['opcode']) + self.header['status']=Status.statusstr(self.header['rcode']) + for i in range(self.header['qdcount']): + #print 'QUESTION %d:' % i, + self.questions.append(self.storeQ(u)) + for i in range(self.header['ancount']): + #print 'ANSWER %d:' % i, + self.answers.append(self.storeRR(u)) + for i in range(self.header['nscount']): + #print 'AUTHORITY RECORD %d:' % i, + self.authority.append(self.storeRR(u)) + for i in range(self.header['arcount']): + #print 'ADDITIONAL RECORD %d:' % i, + self.additional.append(self.storeRR(u)) + + def storeQ(self,u): + q={} + q['qname'], q['qtype'], q['qclass'] = u.getQuestion() + q['qtypestr']=Type.typestr(q['qtype']) + q['qclassstr']=Class.classstr(q['qclass']) + return q + + def storeRR(self,u): + r={} + r['name'],r['type'],r['class'],r['ttl'],r['rdlength'] = u.getRRheader() + r['typename'] = Type.typestr(r['type']) + r['classstr'] = Class.classstr(r['class']) + #print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \ + # % (name, + # type, typename, + # klass, Class.classstr(class), + # ttl) + mname = 'get%sdata' % r['typename'] + if hasattr(u, mname): + r['data']=getattr(u, mname)() + else: + r['data']=u.getbytes(r['rdlength']) + return r + +def dumpQ(u): + qname, qtype, qclass = u.getQuestion() + print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \ + % (qname, + qtype, Type.typestr(qtype), + qclass, Class.classstr(qclass)) + +def dumpRR(u): + name, type, klass, ttl, rdlength = u.getRRheader() + typename = Type.typestr(type) + print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \ + % (name, + type, typename, + klass, Class.classstr(klass), + ttl) + mname = 'get%sdata' % typename + if hasattr(u, mname): + print ' formatted rdata:', getattr(u, mname)() + else: + print ' binary rdata:', u.getbytes(rdlength) + +if __name__ == "__main__": + testpacker() +# +# $Log: Lib.py,v $ +# Revision 1.11.2.8 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.11.2.7 2009/06/09 18:39:06 customdesigned +# Built-in SPF support +# +# Revision 1.11.2.6 2008/10/15 22:34:06 customdesigned +# Default to idna encoding. +# +# Revision 1.11.2.5 2008/09/17 17:35:14 customdesigned +# Use 7-bit ascii encoding, because case folding needs to be disabled +# before utf8 is safe to use, even experimentally. +# +# Revision 1.11.2.4 2008/09/17 16:09:53 customdesigned +# Encode unicode labels as UTF-8 +# +# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned +# Fix unpacker underflow. +# +# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned +# Use socket.inetntoa,inetaton. +# +# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned +# Mark utf-8 encoding +# +# Revision 1.11 2002/03/19 13:05:02 anthonybaxter +# converted to class based exceptions (there goes the python1.4 compatibility :) +# +# removed a quite gross use of 'eval()'. +# +# Revision 1.10 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.9 2002/03/19 10:30:33 anthonybaxter +# first round of major bits and pieces. The major stuff here (summarised +# from my local, off-net CVS server :/ this will cause some oddities with +# the +# +# tests/testPackers.py: +# a large slab of unit tests for the packer and unpacker code in DNS.Lib +# +# DNS/Lib.py: +# placeholder for addSRV. +# added 'klass' to addA, make it the same as the other A* records. +# made addTXT check for being passed a string, turn it into a length 1 list. +# explicitly check for adding a string of length > 255 (prohibited). +# a bunch of cleanups from a first pass with pychecker +# new code for pack/unpack. the bitwise stuff uses struct, for a smallish +# (disappointly small, actually) improvement, while addr2bin is much +# much faster now. +# +# DNS/Base.py: +# added DiscoverNameServers. This automatically does the right thing +# on unix/ win32. No idea how MacOS handles this. *sigh* +# Incompatible change: Don't use ParseResolvConf on non-unix, use this +# function, instead! +# a bunch of cleanups from a first pass with pychecker +# +# Revision 1.8 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.7 2001/07/19 07:50:44 anthony +# Added SRV (RFC 2782) support. Code from Michael Ströder. +# +# Revision 1.6 2001/07/19 07:39:18 anthony +# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder. +# +# Revision 1.5 2001/07/19 07:34:19 anthony +# oops. glitch in storeRR (fixed now). +# Reported by Bastian Kleineidam and by greg lin. +# +# Revision 1.4 2001/07/19 07:16:42 anthony +# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11. +# Patch from Timothy J. Miller. +# +# Revision 1.3 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Lib.pyc b/web/collect/client/DNS/Lib.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92a3b0775314422fad4817dc59de6251597a146f GIT binary patch literal 22633 zcmcJ1dvIJ?e%86&t(IDrnj-tr)2K!HcNZV>|Wz=eBFxgGqe3R zzRTR(Z1ulM9D$U4k%)8NI;I7>F>qu!f?grIp ztlOPzEC(xU078`X8y&BZRV9cEg0{7b0OP9-2mt8dG`ax8wErnxf57TQ_n)h8cIp$ug zpr<9ahVg31)q6oRbkVN6R$r;u7@K&n(yTSsLN~g<5}={l$M{zs1=>v1gz~d5Fh+}ztudkd&udkr!mc8ypX6wAc zq(f7qL`yRF8erRLL?p!sh!M@%M-0NP2g9M-;zGh1mfi{pj~&jnY&c#YTC&)nzS_wE zgpb!Z82~9sbTA0O;setnDLABtN3gPFhrwxR7#@kuQVE-?_ito7^#VurRtl%~5_|)_+6>A_oV}Dj@yi(q6<9E+Vkv z!-o)&+W#T|D3P4RJ`{dl0JMEU091X40O)zC@Yyb^Z`LXqU+R0d+xR7=Ka4n=nByJ39ar4&gulHv{qj@LAmCp ztj5*SA`bwt7@ac)GXrYpfMb)O7C6UhvUo&JxU)7NQU28Gez2Y`au_Vlk0RMpUOVFx z$#ZkHdM%opdkb(_U{R-(aXxXY;dBDP5I|B>YTXKYkECeHdx#L8&3odqEd&upc@f4?#j8B*#PmawIol zCFU>^Qd#8UQND>fJdKL2iA218gto^|OeW;^X_SuHt0yB8F0@wZ9RTUr!1i|wrg11C zd`PV{Z(wi(8-SYAEX)D_#UF)ur5zC(xw>5Ym?LWLbT{g zMvR2+=sEZFr#^LTltc6mBar3wnaa{y%rQrpMs!pco8B;fTUKm)T}M3xoY<~s5K*7t zAE)RXaE2^;*aR^y^uSOz3&vp4*5BZSv2Gw`;0bzf;`R&XVctB*n-z)*DZ&ItIa5;( z@v=H(X4po-tUoj`N+B-o!XtCy_KbPhp_XAKqWw)Lbz|bC;== z%mc+6`z+?W7$17rX>P9GMxVFMOxh1Rz;Yl9>-n%t{iCN{Z0%u5)9W@-kE!*VMmIc> zf_b>h)B!QAkCF9)gK=Y28u9-%4|_Dq9`u(t_xp*0+HUi}wwO0F^;@P8?J;+|P2;k0 z@Xxls(rKJ8z#t^8pvGS6K(x5hhfaZ$yctAuNp4Gf^_+3u;cW66a<6#+U$-byjS=6O z%IK1&*TsT^(4%)Dl1I>8w-AUqV9XC%%gl=f9Znb8_6ij3mB|Od(LQ#9MmVZLAZ>NG zGxMO+G!CLMWMj@i=Pq-R{UEyzZw~43ni(U5?=pyswSm83gAq*J9y0IdxSkLD%me&K zDXrSs)vLJ~F%0RhEFCZwlEZ`LmFT{^RJju@c~mcQY1X3o*ChDgU7HW)=WDW3Xzr<; z8#Nm4Qlq|5`xqG^Wh?xWG5RIed>I1=Q1y?82a)%FrCEnDFz&82YIW)?R=-nQsF|;$ zJBlMSx*vh;AIms$r^1z`S~On^mi*8orz*d)vJ%vNnFGyAeIc;y%WRk>K~s!;7yZQb z*ndP(wO9$~?gjTXyE0U$#GY6jLszY97cO4DHuv`QTdzf&sf}JPU%D}IwR}Z=Td{(5 zqpMe=h`B65V_iY|25?bf8NFYN7ClO2t8vKfR`XI=5ic6WQZ0-$58+fv7+@%b)HE`V zmt&9dW`|65+?byagUHk5D5WqkRl6&htw!IYFng?$=U{H5f$)zaFdapx`J&V5^g4SX zt_U9i+>fV}>v4+tqO%+6-A;F|x1MAL0)3L!u8B5MXU0<;?yolj}z*5GQUQri_Gqq4ZKE z07GD|fh&`%$IJ|*84M|ur*z4i^^YjQ!Sui^i%Sk5YY=SxVIIwWh2qlYK=|Q5G6siX zA#>+sNb+N)!#I=LI#H9W>k=Jk6YYcZQD@-F`t+lXOx!+4kqd5zKr7@aA;|pQlogOP zIj7NsDA>0H4Xpk);2u)9m3~T@nM7wTQ=(0kaqjUNS5{jJBd1S>?#L@X5Gb4=pur<+ z<;536Qpd=vzNo_uS>}%`%^j)Qywm+-N)o+W7H}T&z#*yum~3LZgqG;~XPc z1w=iUI#{vQN1`Z$(GpWe2J!hcDubA589LfALCpSjJR2P;wo4+r#2n%ioueBN906&* zyB*fi0na$85>=)xJ4CiuE%0w_zhf53E{KfON*!k0JPbRZ=A2)_1|Nxew1B`n&)_fv z&5A>6vDdaIMiM`rH+Upp0~lLtLl9zn@Fx^=dhAD2q(5Q@%g_!SAyo7*q*H2eBO&qc z+NW(xqc&V#NU&r#ff}&hpzYdNTUgW-VYbdkc8ck8*?DS;9G7K8>Tz#-yOabI&olhH zfa8fiCC3gEB{uo6QB5@7`>?a5rKtb$XeDDTj`yH~N386`RgOm?r|OqcCvmEtuy?M8 z?VbBoPujb@sbATocVbm@=bT_E+qr+JO{ipNfMqThW=D!KR4g%8#Uy_ft@boDq zxXyzXs3HFdG^9q$g7DN((Av(>rSenGK|NlCIz10f$dWQkKNzBxLlm1EOhgq_?}VyD zztDincn5~>l*3jE>IzyIIu|LZpw>Z)sFT%)D(*5=*0!QHyas6-PoY^WEX%d8V`?yl z5g33#GZ09D>5U?QCHwKm)*mENEqc!bl5zp=Xen7jBfc!#QL;$kVe*i--bKb?5gTj1 zR^AKdtfeOF(QhnQVE>XFngPMBcR;);6#5pP@OcCVwpR}NLVE`8S})u+ zY^bFvAoBw_sDyWZ1;>=g>z4CuJyXzjhpC}OwgCuUqzq3*P$^4BKb#@TY)CF`o~+he zOJjU78b4>zxGpqqF_zY$q+P~|8c2JhhS8p=L8OWECHk4G@4;!^3~hV1LEB#Bi8E|u$at&K7<+51 z7S_fp&6P@x&Y4VD#bgbV$wRiWjPtxkR9Vs?A$PXnm4eui%Z88(2U0uUNSD30p@Ka^ zVHSBIE}bQ^gBLgXCo4Tk4>`OflKqZScgxA~Ih4g;=0*gFinlRFH1c#gtlt#X8XXgOTM z6Ot$+P&_oI^gA~)srXq`S1K+mzkj{SR*2OG)b zxq>JBAp}&T2&LtvVr7;}IN3qQvKSHQ>_Q@+8qZ65LE0bDF$PAT9b-Vu=CN~EGIqCSMgDQplM9Y=>eHX{}p%k|DRVR?w;?zCO=wSx3lIH>0 za&l^3M^plVNBLrqdpySyfk)AjvOwsO$gAnwo#IR@ws}F)>@ej#Z!xI&LGvK-O95k2 zI`4FL;jE-DS44^ypIE2SpvC_P02V*ShHwfL9%MXchJkgv2ZG_*B1o}$2WfQC5(1)< zWm2Uh)T%-*cO_tD70d?{k96Ro2ptX-#P+Z@QP4F0CvC;97qm5&t6&YXg0`KZ@N8x7 zguK(TP^4%ri}l4gPmi)OTL-%=&mzs*R$7sJh+0WSKTdcLX)E!Vk4-l2ZH<1C#Y!c@ zwtT!&N4WrG5#zYd)3As3Ky(ygBMo9_Y7cg*Iyd3Kmr$ITfU#oC-I8fwdzf*nQMKWt z0`#IYmL%7Bq%M!#=&c~w0UK(j@LN6aG>2M5ErWseWiD4L7#=??EF3N14IYWmzHwMq zaBvr72Lse@$(fe18H|mMCXeX$RU}xHK~g|RP!`}nWJAH?4nz{m29el@m}Lv@G{_~i zA%5f0r0{a7eBt_KyBD*{(W+R=w%#JrJW`knS80Q9O&VP&2hLQJ2rWb}|nV(n{2P%tS~8SP`^&t!*aUd*YMMDM!k}5JEXBV3Jj@4cgg1kz zd0&<}u4U8^Bay(CuPv`FZ%rgIZZWQ##4TL}le3(iSq+dPJMxvA9C7f0($U-$p4o04Yd+WAFD+b6Ej_Ud;vT zi*2v5I2~{dXk#0(Ua*BORNI(}3%y~nXOPgg*s(1DnxSQ2yK zuGn&A<*|#5?m?}|PW53dF;60iD*03Vy@W>=I!x*>#3vn0kFV--54+rmt}d@E15EfJXkhX-M9|Hi| zftzs`#`SKQYw#10G=S4nITNt?nTMT#Sdip5FuHk3)U;{UEp(vm8l5awTp#!*UyfC9`0rMA7}}8m_7n)sn*?xZN?~k@)&^ zD(QK4-)FPqJ}xtsi*IRsmyY`$-G>Nv=?x8gV-uSYb~gf>5Oz0WH}@XsDWxZbc|=4D z^p(LYcJF~cKkmI?oLHPqwjZvcu-!*K&1zDCK3tiW!!>%`TVhpp{}^#eTBO$_;@lwE z;9{|NNDCk?J_y9H04H88VR@j|J&OsxRO z%**z|&olZJ1TCl9w9v)=%_j9(W~_VnSP+j5+DV8;1P%(4oq^AD-c~GhK)v(G){OLizz!sxq}C5+ zi`2h`giRMI!6&N3M+k!GwQY@@9^?W!a?YsXe}o-cp_&S;6eHYENxMq{%BpxN)uG1@ zciy#JCwBd%C+b>s*3vZxX$eTXsXOSuWT$@p9d`bsn{>`LL2x>D^$GhbPqD8b z``-4&(PByA60ox$-=s6nToY@yPAr`vj&cZF4D&8sr_Rrlqz~=&Iv%NSNW#^+_9Wab zY)hKQ38JpCm%TrU&ZQgRh0;=Yy&pu1_d^VR7=dU=`!d$5xA89IBo%Cm#2*d(tb(=j zZJe4YG4=CoTcYMBp`S#cLh>=TW|A1Aa0H0apCCr904edr57rO=#4e=5lwDp!WI@7n z{iT?GEXH^PVGt_a?$!W_Fc0mH^D18Un?ZU)WR%Y>qT@KD;xCaqSz9*xV}}mAoJ0Rf z4xO{%X+Yle<04qf<;Z#*wckRKo#2aW1PTt}G%3#Y2R-@WUS#7ydJI)v4356umZa4ln zHx)GB!<%(ub2n+TkCn0M=1xyvb1%^gaA6R;H}P&-I>wwOE3ck80G z8EYd*(@J=8=pz5v9k4A(&%Uo#v@ysROO%E)4z%`IY+*FS*3T1LqPIe+4{*dZxE zZ?{=9=BF8@v*P^>gP&#aml%AN!Cz+Za}53p0vUQ3!_>{U*oF63nep=s{sw|_!1axM?!vJKjccpj6cy!)X!d*-JcE~!N-0Dn=?nbcDmV{+TN~lsW@JY#mjFT9`MZ&m8LAaB!-W@s* zQxq^>A3{64mz;Ex?ZBYN_^cWXhKM~}j|Zmf&URavy1^dx;956|(t4_K332NjK^Fsx zRGrUSVel-Xqnxv)BZ{V)ubtD?riiAo!aKQ2sZ{h6RrS-f#hMizZhl}bS(RlMt9}(n zrjeGSf(yL~Tn{t2(iEI6d)tcZjT`ujlR6dpB`!TjYI40!F*gU#fq(X;i;O0}jsjVW zn-bF3P~;cc8fRN{)Xn79&c`+cL#iRB=@&?T{2qWzX4s=4D>EQJa$NfF<1!P>0|X@y zEM%!28CcA~&J64Vi2FaRRsx6?=DW7@Z2zqk6oYuc%v0KrD%d+jn^^FaEY)V`op~$z`eoS0shFjl(ETRnJ z!)8QUF9EO%)gp_eNE4Z%m1D%u&evGo?#w<5C{v-r_iDNeyGpN2{JyF;61PSqWeRxm z)*6m?YmNHvaH`bZ_N=xYVgHz&GB*p_R>u1#a=+bzFp8>M-!V6$q0B_#j#^6F1f;T4 zpvC$Snq*v3yY9qJA|60c2_F7aG6QUANf;^M0($?1E#hej+Hvq}$o<`<1xO?k&GoH_ zrVE)?fkAHpj&E(I9X_V7Uq_AKZ|RHpzP>fSKVfhG6s`ZathXw_jS}+$QXs{yr0eu< zFSR_($E_q9UTngPOb@X|1DWdJvOh#A#Nnn=3$hV!GC&zOV4OqFUeTKe5NEtz0p$z` z`8u42tZNZ*FyxT2{=DvBN)u1qKFAkUZD(T>31M1ZZ_I`(5q)V1E zzuu^S5HuTZ!0$Nd`w+wK=!kzzg-)Uo*NqJWbr2f{f@8zLrdz`RaL7H5TW?>Un!b9Y z?2h>3;+zZRshP{3qWh75;qB?k8{XCF&$`~_OEUFkbVHPY z?sF0-a?i5^HLDaZd?+U`;R&xGSd?_dX5)edoCL@vKt2KBC5d0=(o->OPy3msqRi70 z*h@f~&}yt@!q|=s-^Z6ltXa5*&y~<$l1l-FRHHV+@nHeY@M@Eq!UtBG!F4N;Wol?i z|22wixgv6E`Y_LyZ31o;@K4f<^ z8KOUP$o?EoI>t*@Gn4+)toMghuM_oNrU8nt`q1ei4)Alt4E6HYwb5bJmvmk2bx{A= ztbW%M*1yjB&t>&XPgws?p}t)tcuDw<5QKDNKlH*#vWQ2`P@*25Ptsmc8onL`-NUy< z5z4+sWulP;{UXQ%?@!~OeAPGHvuDqpb8k*u!B>IsO`wxDj(4U_!<1hVQBAHasKQ|RyO4l_dG%>CN|Tiw9p_4fI012YieBLFIcLqgn@5K>J4{V zx2_=}@g*i)ZpLSzaAvoRLpfcgMmy6Dcj~hD85jeYsaFG#K`h0QK4e;zU^7c*D9hY! z&aS`8rHl)AD|6!Zkwj&6>ZNzogtaQ&jk?b2w_&KZR)dN{dP_CDzzPI>jn(i4gH5V^e_J)Ez>U>+iOc>A0IH=v4p>@Bd4EFvFa?g`1 zc*((ZdS}=LeatDgkjUSLUIMm)= zMViP8$)NM}J`HUFtQAX>=y~i&u~C-P^`!s)5#rvr8T<|cd$dg7+1?S7c?e-jwEYB9 zC8-nH+>Vp~%m(n-piM7%{~Z}`6DQp)0vR!ki_L@fy;MpPcBT3M0)Y1U!=l0*9+^KA z$=7y%n_&YSz8N;a?$+3&MK}P{55kdJH%~*Z@f5!10iI0aA41xo2Z)0@KWG7*!y8qX z334eZh+IysY>kTrfL7x2utBOxr|#arM~3&m5QN+Vz-Goss9pL0fS6u}_I{crnhgFY z1L9p3M49;dLnVTiB)U+Uuzi`2IJ7ta?f{bPZhC(L;MPm>Kal$_mt-FRF3AZlNwE(? z_o>{VW7Py{v*qC7_ZTI8Ye79Wk#!)bec^u-MS7BjFXl?6(y`KkQfHMqX2ZgB^(nMu z*n#BJ4GAnf7XvRPC-hFD#-iCfWpG72_F#ME_J20)YrFoxV zfg8jdCl7aHzNq;_XaK-gGzl85$g@-5hlUVx+(fGBm z(fNb|20Y2v;hy(3LLV{sCWBvN@aqh?i|G9Z1G2D3vi68IokeEkFQ|s%y#TC}&D_E@J;6QPp T>u~;XXX#*nrPNvK>vjG=@dfdF literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Opcode.py b/web/collect/client/DNS/Opcode.py new file mode 100644 index 0000000..a51f2ba --- /dev/null +++ b/web/collect/client/DNS/Opcode.py @@ -0,0 +1,52 @@ +""" + $Id: Opcode.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + Opcode values in message header. RFC 1035, 1996, 2136. +""" + + + +QUERY = 0 +IQUERY = 1 +STATUS = 2 +NOTIFY = 4 +UPDATE = 5 + +# Construct reverse mapping dictionary + +_names = dir() +opcodemap = {} +for _name in _names: + if _name[0] != '_': opcodemap[eval(_name)] = _name + +def opcodestr(opcode): + if opcodemap.has_key(opcode): return opcodemap[opcode] + else: return `opcode` + +# +# $Log: Opcode.py,v $ +# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.6 2002/04/23 10:51:43 anthonybaxter +# Added UPDATE, NOTIFY. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Opcode.pyc b/web/collect/client/DNS/Opcode.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1efb3a6972d0b2727ea1d2134d33504bf87eca37 GIT binary patch literal 841 zcmZ`$UuzRV5TDEcoE8*(Xz{^8`VcT&atQ_+idfnd12wI=6!Acg^LCQEn%thZo6vwS zQoo>I#ZTbuwYI)^$NYBw{&sfe*Y6$seD}xu2=2ZrpKn>zhCu-Mj28G7h!XfE5M}Vo zAS&QjKvco60@dJ#)9Mg2HsHDsRELw#ufT5tHBIrJsKHekPcDB1q5)p3Y3_ev0ob@d7DGJGA}Pqpw(SkN&cNw8F7~>v+v)Z@?ts-{cQEW9 zV6;|Rt^}!Mk&?jCb%y)4jk7pWI8T;@d}d*;QO;4vgxOZ4?8)U7McT3Pom^2CE@+6c z*4ePrDH0qd*Lg&9nJ>smN&jUuJsHMGZYbyLzHAFprNdN&xxkC9j%A8xNkpk42R$M@ z8y}A*-Uyit1?ezZDu?-QABdZ9xh924iYroz8^M@DLAis|(_?hI{nu@D4-N)x?797c zV<$W(NO(?=lnhi1m<4L%3nGI?Oz@aRy=2fu$#*dGmrbzPW@FE@7LMovk=j!B^TANxF6)Zm|Xhs2$c$CSK=$ZU3++oj>QWGKFA0fu=QSJ!Z9_ z4gw*gAkgLsA1+7JPX+(@LG@-wvrDh=PR?iJ)7ym0i<6_-NSC=vBzX}H(vU|uH4iD+ u3R|1`5D$53l!u1@*L8!l4p&mFm-NPFUF|Z|DmClwuQXY!L(O^$yQM$#__Hqn literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/Status.py b/web/collect/client/DNS/Status.py new file mode 100644 index 0000000..becd51b --- /dev/null +++ b/web/collect/client/DNS/Status.py @@ -0,0 +1,66 @@ +""" + $Id: Status.py,v 1.7.2.1 2011/03/16 20:06:39 customdesigned Exp $ + + This file is part of the pydns project. + Homepage: http://pydns.sourceforge.net + + This code is covered by the standard Python License. See LICENSE for details. + + Status values in message header +""" + +NOERROR = 0 # No Error [RFC 1035] +FORMERR = 1 # Format Error [RFC 1035] +SERVFAIL = 2 # Server Failure [RFC 1035] +NXDOMAIN = 3 # Non-Existent Domain [RFC 1035] +NOTIMP = 4 # Not Implemented [RFC 1035] +REFUSED = 5 # Query Refused [RFC 1035] +YXDOMAIN = 6 # Name Exists when it should not [RFC 2136] +YXRRSET = 7 # RR Set Exists when it should not [RFC 2136] +NXRRSET = 8 # RR Set that should exist does not [RFC 2136] +NOTAUTH = 9 # Server Not Authoritative for zone [RFC 2136] +NOTZONE = 10 # Name not contained in zone [RFC 2136] +BADVERS = 16 # Bad OPT Version [RFC 2671] +BADSIG = 16 # TSIG Signature Failure [RFC 2845] +BADKEY = 17 # Key not recognized [RFC 2845] +BADTIME = 18 # Signature out of time window [RFC 2845] +BADMODE = 19 # Bad TKEY Mode [RFC 2930] +BADNAME = 20 # Duplicate key name [RFC 2930] +BADALG = 21 # Algorithm not supported [RFC 2930] + +# Construct reverse mapping dictionary + +_names = dir() +statusmap = {} +for _name in _names: + if _name[0] != '_': statusmap[eval(_name)] = _name + +def statusstr(status): + if statusmap.has_key(status): return statusmap[status] + else: return `status` + +# +# $Log: Status.py,v $ +# Revision 1.7.2.1 2011/03/16 20:06:39 customdesigned +# Refer to explicit LICENSE file. +# +# Revision 1.7 2002/04/23 12:52:19 anthonybaxter +# cleanup whitespace. +# +# Revision 1.6 2002/04/23 10:57:57 anthonybaxter +# update to complete the list of response codes. +# +# Revision 1.5 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.4 2002/03/19 12:26:13 anthonybaxter +# death to leading tabs. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/Status.pyc b/web/collect/client/DNS/Status.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c236dc187b27170ce67e55c0467b30d7c30072 GIT binary patch literal 1143 zcmaiy-EP}96vzK^5+_#cHXlxx+z~@D80a9!3D9CjG0dx#)>teRlG`-|2165>SZ!>1 zA|2GgZien+54IQBtLzDO4%2kK876^0{Yjk<@*Mv4chip#ejDZZ_-xSkI~wLsf&wJb zgY=+kkTs|^$Tg^S$aSa<$PK7EWF2Y~auaF`ato>f*?{sPeW*K-ccAV<-gRt3H6iao z-GjUjbsusYY8!F~Y6o%`dJi8cX$w>0KHi@|w=nqW5#$5t2fUR-wTriFC|mGoLGMF< z!GAt@c>2p9@M!#~_V7EoA3(K`w)gooTOQzxheOp9sm<)t1dG+z*Pri|kVO#MIhFuhVe`fhm_&t}n{!QU`NwZzJZVauM_LT33|r=ama; zY&KU}rNqmXotATPR^)nabRbf##o6##7^R`0U_xn|6;~#pcsBSZ8qai#lVIvBhXGe8x6ohq24pW3(7tVaxF*Ig{?-FP%dYp1w-MfwMWkwc*X5ze$ob9N%R4T*u@7tMQAE zna47YLZ`p)4_=2!>f)15htF^258?T(1*QFDJ&FgPbkwJHJ}7zi&z@Uuttu+FG?`~~ z5fZMnZ_+PPRekR5oY}N}=|gvQe&_7Yxy_mR>DQ8fw)uTK!iQ&`^;4GgX9fY9 z=|Ov-9CQve2R#SML+3&B(DR@IbOE#gy#QK-UIZ1Piy$A`2Q5J_ftI0{K_%!CXa#x& zv~gj4^O`R5gyAc+QAPT{t;*wLHU8~ zJLdtudUQ&4)t^mFWioqwqxLI@m4ix69qjGzSNCew{X;hEdx!Pf=PJ5RZ89R0#+PG5 zs`+lB9{IlN593r_#8*bKo`h4Yk_%;rMonfk=0DTqjfrfeZ@q zsHDmDG%^>-^wLzu#{Qi#O2}o5k{dJSa^K8c($t1y3a6yHGdoPisuf3OoSKRXj8U!A zqh=>)Do&=z*f73IE0XVZx2bNzt80_0a>~ub99ZLh9(>}*JO#u#!JHs3kh>8V1Pg*i zK~dlfmITX!l3+!!Dp(Wj2tF3bRg1@hUj)Aiei!^9_*1|FD;*3Xp@_6{#F9Q_SoZPo z9po&NB0T=)kFCICmIA)DL0MutWr?!VtmUH|g%f8F!*uZ0%$$prjmct}H5^je$Z;pA zemMSr;!`{QjF(cG0K7F6y-r!N=^O?FN}|EQb=|O1w-Y$kKF-vMmGK>Q8ttZ)kp^d( zTl-nI{$-|muA|P2R?B(4`YLd`+wZyf=ck>MGiSA(C577O&ZU3ecd-qAvI@PIneJw_ zU%1*XqMLc1pJZ9vjnme%EGt`CbffdyMN>Srvg5%x9GSEyb9J8X+odslhk7!{e`omL ogIr{vC!>U}ugn)RiuJ4ofmAMk7Z3O@k5 zPJ<&)W_D-o@%PUnTAzMCZ(+Y8{NLf$UkC*NM`TbqP%+?02JQfNM(zT4NA3alEO(*u zK?T4AP!r%2U^MALH3by{4+rg&E29pwDP$6@k)209LyZT_LE^xIB5dM-lKBXi$yIV) z{^DB24r)WWA8MV-w_a*vrX*LJ$F6I-n2{kI4Z2rmC{g_^KA`RlL5O$R>9VPXFdph` z*GfY-uG?)>40L01(`!Q!?p3a}H7;{4Q>;L0Z*q}6l`pByMNXT-I#vF$GZS*|bJFpt ww2_cO{15sNDZb!bh_Wt?5J|hUuey0{ieAavIrjB2Lf~Am6L!QR`~tiG0Hp6$Q~&?~ literal 0 HcmV?d00001 diff --git a/web/collect/client/DNS/lazy.py b/web/collect/client/DNS/lazy.py new file mode 100644 index 0000000..4d55fc8 --- /dev/null +++ b/web/collect/client/DNS/lazy.py @@ -0,0 +1,82 @@ +# $Id: lazy.py,v 1.5.2.4 2011/03/19 22:15:01 customdesigned Exp $ +# +# This file is part of the pydns project. +# Homepage: http://pydns.sourceforge.net +# +# This code is covered by the standard Python License. See LICENSE for details. +# + +# routines for lazy people. +import Base +import string + +from Base import DNSError + +def revlookup(name): + "convenience routine for doing a reverse lookup of an address" + names = revlookupall(name) + if not names: return None + return names[0] # return shortest name + +def revlookupall(name): + "convenience routine for doing a reverse lookup of an address" + # FIXME: check for IPv6 + a = string.split(name, '.') + a.reverse() + b = string.join(a, '.')+'.in-addr.arpa' + names = dnslookup(b, qtype = 'ptr') + # this will return all records. + names.sort(key=str.__len__) + return names + +def dnslookup(name,qtype): + "convenience routine to return just answer data for any query type" + if Base.defaults['server'] == []: Base.DiscoverNameServers() + result = Base.DnsRequest(name=name, qtype=qtype).req() + if result.header['status'] != 'NOERROR': + raise DNSError("DNS query status: %s" % result.header['status']) + elif len(result.answers) == 0 and Base.defaults['server_rotate']: + # check with next DNS server + result = Base.DnsRequest(name=name, qtype=qtype).req() + if result.header['status'] != 'NOERROR': + raise DNSError("DNS query status: %s" % result.header['status']) + return map(lambda x: x['data'],result.answers) + +def mxlookup(name): + """ + convenience routine for doing an MX lookup of a name. returns a + sorted list of (preference, mail exchanger) records + """ + l = dnslookup(name, qtype = 'mx') + l.sort() + return l + +# +# $Log: lazy.py,v $ +# Revision 1.5.2.4 2011/03/19 22:15:01 customdesigned +# Added rotation of name servers - SF Patch ID: 2795929 +# +# Revision 1.5.2.3 2011/03/16 20:06:24 customdesigned +# Expand convenience methods. +# +# Revision 1.5.2.2 2011/03/08 21:06:42 customdesigned +# Address sourceforge patch requests 2981978, 2795932 to add revlookupall +# and raise DNSError instead of IndexError on server fail. +# +# Revision 1.5.2.1 2007/05/22 20:23:38 customdesigned +# Lazy call to DiscoverNameServers +# +# Revision 1.5 2002/05/06 06:14:38 anthonybaxter +# reformat, move import to top of file. +# +# Revision 1.4 2002/03/19 12:41:33 anthonybaxter +# tabnannied and reindented everything. 4 space indent, no tabs. +# yay. +# +# Revision 1.3 2001/08/09 09:08:55 anthonybaxter +# added identifying header to top of each file +# +# Revision 1.2 2001/07/19 06:57:07 anthony +# cvs keywords added +# +# diff --git a/web/collect/client/DNS/lazy.pyc b/web/collect/client/DNS/lazy.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fd781c7fd338f4d4d6973ffa20ceec1d297670 GIT binary patch literal 1926 zcmbtVO>Z1U5Urm5@;bJO5)))uLahkI0EyuoSy3R4IAw!oFUTBLot?Hb_H6gKyEpM7 z`w$j>AU}dXz=b2cs@Vh!Vj;xJ-P0e{)m`;oJ^%9C=K8On{rq%Ft6vY_M`-a|bS3^2 zoe~|s9V^&3!OGOuonjUV;Xsj@X$DMKBVat7VQ}13{S6Mf;I{102 zoYhA@=+am2SQ{PeOBXAo(KAZ#NT6ph%3Y@ zsb6NRxT}noM2h!1oCm~?J!Im8%Z%{C3rr;aOQkMh(V+^t?@{VgHNa7&ur$v5@c+M) zl^CeKxXFFx?sE$FOlXW`3n;4WP1^J%ZMb1&M~})2jDOoUD^R6Cmr&; zIDv}jU)i|>ivOs{eS@umtgkWa%Yj_@WqSqYYqaC<{@aE8r2ld$M=+#v$fe5Xa5ZWy zU*`mTh3;LYl{_jc&4be?^O#Ue@s&lfnPjBuj9cilxeW^~gb?g)Km`g|1;f4~gh@Eh z^U<>>Me(eN+ZdyWR>xyvg9rNKwn!7xE{dl~@Yu6bMQ^lY%)87s27*{0ctQP{o2f)m*(N;>=lh9CF1RC6Rp^k_Q=fZ6w__&A5?Mb5#$KEFqTKg#92IW zv9L%-Y)KsKnFRmBni3gd&q8icS~AflGs@b`G@@~T%NtY+w_gALVhc_+nxvDmZQ4>F zs%^EaKFaROSMtVu=VB4AvpKllN)O1)S|X|oJpz+ib60x%8kXxCzjra?is(B1^t-Eq zt64DjR>X 0.0 + assert data > last_value + #print "Recording: %s"% (data-last_value) + history[i] = data-last_value + i += 1 + i = i % HISTORY_LENGTH + except: + # on init when last_value is 0, or reboot when counter resets. + # do not record data except for last_value, do not increment index + pass + + last_value = data + return (i, last_value, history) + +def record_data(filename, data): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, data)) + +def get_percentile(filename, percentile): + (idx,last_version, history) = read_safe_history(filename) + summary = history[idx:] + history[:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + + # convert bytes to bw + bw = map(lambda x: x/(60*60*24.0), measured) + bw.sort() + l = len(bw) + pct = bw[int(l*percentile)] + #print bw + + return pct + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + for interface in ['eth0', 'eth1', 'eth2', 'eth3']: + t_bytes = get_network_bytes(interface) + if t_bytes != None: + break + if t_bytes == None: + # massive fail. cannot continue. + sys.exit(1) + + # take diff b/t sum(t_bytes) and last_value + record_data("bw_history.dat", sum(t_bytes)) + record_data("bw_history_rx.dat", t_bytes[0]) + record_data("bw_history_tx.dat", t_bytes[1]) + + print get_percentile("bw_history.dat", 0.90), + print get_percentile("bw_history_rx.dat", 0.90), + print get_percentile("bw_history_tx.dat", 0.90), + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/check_dns.py b/web/collect/client/check_dns.py new file mode 100755 index 0000000..ffd7359 --- /dev/null +++ b/web/collect/client/check_dns.py @@ -0,0 +1,192 @@ +#!/usr/bin/python + +# can't probe comon directly from node. +# http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="planetlab-01.cs.princeton.edu"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp' + +import commands +import os +import re +import socket +import struct +import DNS +import time +#import ctypes +# TODO: maybe when there's more time; for better readability. +#class History(Structure): +# _fields_ = [ ("version", c_int), +# ("index", c_int), +# ("history", c_float * HISTORY_LENGTH), ] + +# allocate fixed space on disk to save persistent state. +# what to store in this file? +# slice_history : x,x,x,x,x,... +# root_history : y,y,y,y,y,y... + +HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour +HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH ) +HISTORY_version = 1 + +def read_safe_history(filename): + """ + This function guarantees that space is preserved. + If one of the file operations fail, it will throw an exception. + """ + if os.path.exists(filename): + # read existing data + fd = os.open(filename, os.O_RDONLY) + a = os.read(fd, os.path.getsize(filename)) + try: + (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0) + assert version == HISTORY_version + history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0])) + history = [ h for h in history ] + except: + # TODO: in the future a more clever version migration might be nice. + os.remove(filename) # just nuke the old version + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + os.close(fd) + + else: + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + return (i, history) + +def write_safe_history(filename, (i, history), check_for_file=True): + # length should match, and the file should already exist + assert len(history) == HISTORY_LENGTH + if check_for_file: + assert os.path.exists(filename) + + # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk + fd = os.open(filename, os.O_WRONLY|os.O_CREAT) + os.lseek(fd, 0, 0) + ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i)) + ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history)) + os.close(fd) + return ret + +def add_to_history((i, history), data): + history[i] = data + i += 1 + i = i % HISTORY_LENGTH + return (i, history) + +def record_status_record(filename, status): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, status)) + +def get_success_ratio(filename): + rh = read_safe_history(filename) + idx = rh[0] + summary = rh[1][idx:] + rh[1][:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + + return float(len(filter(lambda x: x > 0, measured)))/float(len(measured)) + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + root_ips = get_nameserver_ips('/etc/resolv.conf') + slice_ips = get_nameserver_ips( '/vservers/princeton_comon/etc/resolv.conf') + + for i,ip in enumerate(root_ips.keys()): + (s,t) = check_dns(ip, 'udp') + if "Error" in s: t = -1 + record_status_record("dns_history_root_udp%s.dat" % i, t) + + (s,t) = check_dns(ip, 'tcp') + if "Error" in s: t = -1 + record_status_record("dns_history_root_tcp%s.dat" % i, t) + + for i,ip in enumerate(slice_ips.keys()): + (s,t) = check_dns(ip, 'udp') + if "Error" in s: t = -1 + record_status_record("dns_history_slice_udp%s.dat" % i, t) + + (s,t) = check_dns(ip, 'tcp') + if "Error" in s: t = -1 + record_status_record("dns_history_slice_tcp%s.dat" % i, t) + + if set(root_ips.keys()) == set(slice_ips.keys()): + print "CONF-ROOT_SLICE-MATCH", + else: + print "CONF-ROOT_SLICE-MISMATCH", + #if set(root_ips.keys()) != set(slice_ips.keys()): + #if set(root_ips.keys()) != set(ifcfg_ips.keys()) and len(set(ifcfg_ips.keys())) > 0: + # print "CONF-IFCFG_ROOT-MISMATCH", + + print get_success_ratio('dns_history_root_udp0.dat'), + print get_success_ratio('dns_history_root_udp1.dat'), + print get_success_ratio('dns_history_slice_udp0.dat'), + print get_success_ratio('dns_history_slice_udp1.dat'), + c_dns = os.popen("curl -s http://localhost:3121 | grep -a DNSFail").read().strip() + if len(c_dns) > 9 and "DNS" in c_dns: + c_dns = "cm " + c_dns[9:] + else: + c_dns = "" + print c_dns, + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/check_uptime.py b/web/collect/client/check_uptime.py new file mode 100755 index 0000000..160aeb4 --- /dev/null +++ b/web/collect/client/check_uptime.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +import commands +import os +import sys +import re +import socket +import struct +import time + +#import ctypes +# TODO: maybe when there's more time; for better readability. +#class History(Structure): +# _fields_ = [ ("version", c_int), +# ("index", c_int), +# ("history", c_float * HISTORY_LENGTH), ] + +# allocate fixed space on disk to save persistent state. +# what to store in this file? +# slice_history : x,x,x,x,x,... +# root_history : y,y,y,y,y,y... + +HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour +HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH ) +HISTORY_version = 1 + + +def get_network_bytes(interface): + for line in open('/proc/net/dev', 'r'): + if interface in line: + data = line.split('%s:' % interface)[1].split() + rx_bytes, tx_bytes = (data[0], data[8]) + return (float(rx_bytes), float(tx_bytes)) + return None + +def get_uptime(): + for line in open('/proc/uptime', 'r'): + data = line.split()[0] + return float(data) + return None + +def read_safe_history(filename): + """ + This function guarantees that space is preserved. + If one of the file operations fail, it will throw an exception. + """ + if os.path.exists(filename): + # read existing data + fd = os.open(filename, os.O_RDONLY) + a = os.read(fd, os.path.getsize(filename)) + try: + (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0) + assert version == HISTORY_version + history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0])) + history = [ h for h in history ] + except: + # TODO: in the future a more clever version migration might be nice. + os.remove(filename) # just nuke the old version + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + os.close(fd) + + else: + # create for the first time, with empty data + (i, history) = (0, [0]*HISTORY_LENGTH) + write_safe_history(filename, (i, history), False) + + return (i, history) + +def write_safe_history(filename, (i, history), check_for_file=True): + # length should match, and the file should already exist + assert len(history) == HISTORY_LENGTH + if check_for_file: + assert os.path.exists(filename) + + # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk + fd = os.open(filename, os.O_WRONLY|os.O_CREAT) + os.lseek(fd, 0, 0) + ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i )) + ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history)) + os.close(fd) + return ret + +def add_to_history((i, history), data): + try: + assert data > 0.0 + history[i] = data + i += 1 + i = i % HISTORY_LENGTH + except: + # do not record data if data <= 0 + pass + return (i, history) + +def record_data(filename, data): + rh = read_safe_history(filename) + return write_safe_history(filename, add_to_history(rh, data)) + +def get_avg_uptime(filename): + (idx, history) = read_safe_history(filename) + summary = history[idx:] + history[:idx] + measured = filter(lambda x: x != 0, summary) + if len(measured) == 0: + return 0 + return float(sum(measured))/float(len(measured)) + +def timed(method): + + def timeit(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + + #print '%r (%r, %r) %2.2f sec' % \ + # (method.__name__, args, kw, te-ts) + return (result, te-ts) + + return timeit + +@timed +def check_dns(ip, protocol='udp'): + try: + #ip = ip[:-1] + "0" + ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip) + r = ro.req(protocol=protocol) + r = "OK" + except DNS.Base.DNSError, e: + r = "Error: %s" % e + return r + +def get_nameserver_ips(filename): + ip_re = re.compile("\d+\.\d+\.\d+\.\d+") + ret = {} + if not os.path.exists(filename): + return ret + + f = open(filename, 'r') + + if 'resolv' in filename: + for l in f: + for field in l.strip().split(): + if ip_re.match(field) and field not in ret: + ret[field] = 0 + + if 'ifcfg' in filename: + for l in f: + if 'DNS' not in l: + continue + for field in l.strip().split('='): + field = field.replace('"', '') + field = field.replace("'", '') + if ip_re.match(field) and field not in ret: + ret[field] = 0 + return ret + +def main(): + + ut = get_uptime() + if ut == None: + # massive fail. cannot continue. + sys.exit(1) + + record_data("uptime_history.dat", ut) + + print get_avg_uptime("uptime_history.dat"), + + print "" + + +if __name__ == "__main__": + main() + + +# TODO: comon? +#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip() diff --git a/web/collect/client/update.sh b/web/collect/client/update.sh new file mode 100644 index 0000000..47016ff --- /dev/null +++ b/web/collect/client/update.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [ -f /etc/planetlab/plc_config ]; then + source /etc/planetlab/plc_config +else + PLC_SLICE_PREFIX='pl' +fi + +IP=IPADDR +DIR=multiops +FILE=bootstrap.tar.gz +HDIR=/home/${PLC_SLICE_PREFIX}_myops + +mkdir -p $HDIR +cd $HDIR + +# before update +if [ -f $FILE ] ; then + mod_time_before=`stat -c %Y $FILE` + CURL_ARGS="-z $FILE" +else + mod_time_before=0 + CURL_ARGS="" +fi + +# if bootstrap file has been updated +curl $CURL_ARGS -s -O --insecure https://$IP/$DIR/$FILE + +if [ -f $FILE ] ; then + mod_time_after=`stat -c %Y $FILE` +else + mod_time_after=0 +fi + +if [[ $mod_time_after -gt $mod_time_before ]] ; then + # then an update occurred, and we need to unpack it. + tar -xzf $FILE + chmod 755 ./*.sh ./*.py + ./bootstrap.sh +fi + -- 2.43.0