Add support library for DNS checks.
Add update.sh script for auto-updates.
Fix sar2graphite.py : checks for sysstat rpm
Improve other scripts.
--- /dev/null
+"""
+$Id: Base.py,v 1.12.2.15 2011/03/19 22:15:01 customdesigned Exp $
+
+This file is part of the pydns project.
+Homepage: http://pydns.sourceforge.net
+
+This code is covered by the standard Python License. See LICENSE for details.
+
+ Base functionality. Request and Response classes, that sort of thing.
+"""
+
+import socket, string, types, time, select
+import Type,Class,Opcode
+import asyncore
+#
+# This random generator is used for transaction ids and port selection. This
+# is important to prevent spurious results from lost packets, and malicious
+# cache poisoning. This doesn't matter if you are behind a caching nameserver
+# or your app is a primary DNS server only. To install your own generator,
+# replace DNS.Base.random. SystemRandom uses /dev/urandom or similar source.
+#
+try:
+ from random import SystemRandom
+ random = SystemRandom()
+except:
+ import random
+
+class DNSError(Exception): pass
+
+# Lib uses DNSError, so import after defining.
+import Lib
+
+defaults= { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY,
+ 'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30,
+ 'server_rotate': 0 }
+
+defaults['server']=[]
+
+def ParseResolvConf(resolv_path="/etc/resolv.conf"):
+ "parses the /etc/resolv.conf file and sets defaults for name servers"
+ global defaults
+ lines=open(resolv_path).readlines()
+ for line in lines:
+ line = string.strip(line)
+ if not line or line[0]==';' or line[0]=='#':
+ continue
+ fields=string.split(line)
+ if len(fields) < 2:
+ continue
+ if fields[0]=='domain' and len(fields) > 1:
+ defaults['domain']=fields[1]
+ if fields[0]=='search':
+ pass
+ if fields[0]=='options':
+ pass
+ if fields[0]=='sortlist':
+ pass
+ if fields[0]=='nameserver':
+ defaults['server'].append(fields[1])
+
+def DiscoverNameServers():
+ import sys
+ if sys.platform in ('win32', 'nt'):
+ import win32dns
+ defaults['server']=win32dns.RegistryResolve()
+ else:
+ return ParseResolvConf()
+
+class DnsRequest:
+ """ high level Request object """
+ def __init__(self,*name,**args):
+ self.donefunc=None
+ self.async=None
+ self.defaults = {}
+ self.argparse(name,args)
+ self.defaults = self.args
+ self.tid = 0
+
+ def argparse(self,name,args):
+ if not name and self.defaults.has_key('name'):
+ args['name'] = self.defaults['name']
+ if type(name) is types.StringType:
+ args['name']=name
+ else:
+ if len(name) == 1:
+ if name[0]:
+ args['name']=name[0]
+ if defaults['server_rotate'] and \
+ type(defaults['server']) == types.ListType:
+ defaults['server'] = defaults['server'][1:]+defaults['server'][:1]
+ for i in defaults.keys():
+ if not args.has_key(i):
+ if self.defaults.has_key(i):
+ args[i]=self.defaults[i]
+ else:
+ args[i]=defaults[i]
+ if type(args['server']) == types.StringType:
+ args['server'] = [args['server']]
+ self.args=args
+
+ def socketInit(self,a,b):
+ self.s = socket.socket(a,b)
+
+ def processUDPReply(self):
+ if self.timeout > 0:
+ r,w,e = select.select([self.s],[],[],self.timeout)
+ if not len(r):
+ raise DNSError, 'Timeout'
+ (self.reply, self.from_address) = self.s.recvfrom(65535)
+ self.time_finish=time.time()
+ self.args['server']=self.ns
+ return self.processReply()
+
+ def _readall(self,f,count):
+ res = f.read(count)
+ while len(res) < count:
+ if self.timeout > 0:
+ # should we restart timeout everytime we get a dribble of data?
+ rem = self.time_start + self.timeout - time.time()
+ if rem <= 0: raise DNSError,'Timeout'
+ self.s.settimeout(rem)
+ buf = f.read(count - len(res))
+ if not buf:
+ raise DNSError,'incomplete reply - %d of %d read' % (len(res),count)
+ res += buf
+ return res
+
+ def processTCPReply(self):
+ if self.timeout > 0:
+ self.s.settimeout(self.timeout)
+ else:
+ self.s.settimeout(None)
+ f = self.s.makefile('r')
+ header = self._readall(f,2)
+ count = Lib.unpack16bit(header)
+ self.reply = self._readall(f,count)
+ self.time_finish=time.time()
+ self.args['server']=self.ns
+ return self.processReply()
+
+ def processReply(self):
+ self.args['elapsed']=(self.time_finish-self.time_start)*1000
+ u = Lib.Munpacker(self.reply)
+ r=Lib.DnsResult(u,self.args)
+ r.args=self.args
+ #self.args=None # mark this DnsRequest object as used.
+ return r
+ #### TODO TODO TODO ####
+# if protocol == 'tcp' and qtype == Type.AXFR:
+# while 1:
+# header = f.read(2)
+# if len(header) < 2:
+# print '========== EOF =========='
+# break
+# count = Lib.unpack16bit(header)
+# if not count:
+# print '========== ZERO COUNT =========='
+# break
+# print '========== NEXT =========='
+# reply = f.read(count)
+# if len(reply) != count:
+# print '*** Incomplete reply ***'
+# break
+# u = Lib.Munpacker(reply)
+# Lib.dumpM(u)
+
+ def getSource(self):
+ "Pick random source port to avoid DNS cache poisoning attack."
+ while True:
+ try:
+ source_port = random.randint(1024,65535)
+ self.s.bind(('', source_port))
+ break
+ except socket.error, msg:
+ # Error 98, 'Address already in use'
+ if msg[0] != 98: raise
+
+ def conn(self):
+ self.getSource()
+ self.s.connect((self.ns,self.port))
+
+ def req(self,*name,**args):
+ " needs a refactoring "
+ self.argparse(name,args)
+ #if not self.args:
+ # raise DNSError,'reinitialize request before reuse'
+ protocol = self.args['protocol']
+ self.port = self.args['port']
+ self.tid = random.randint(0,65535)
+ self.timeout = self.args['timeout'];
+ opcode = self.args['opcode']
+ rd = self.args['rd']
+ server=self.args['server']
+ if type(self.args['qtype']) == types.StringType:
+ try:
+ qtype = getattr(Type, string.upper(self.args['qtype']))
+ except AttributeError:
+ raise DNSError,'unknown query type'
+ else:
+ qtype=self.args['qtype']
+ if not self.args.has_key('name'):
+ print self.args
+ raise DNSError,'nothing to lookup'
+ qname = self.args['name']
+ if qtype == Type.AXFR:
+ print 'Query type AXFR, protocol forced to TCP'
+ protocol = 'tcp'
+ #print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
+ m = Lib.Mpacker()
+ # jesus. keywords and default args would be good. TODO.
+ m.addHeader(self.tid,
+ 0, opcode, 0, 0, rd, 0, 0, 0,
+ 1, 0, 0, 0)
+ m.addQuestion(qname, qtype, Class.IN)
+ self.request = m.getbuf()
+ try:
+ if protocol == 'udp':
+ self.sendUDPRequest(server)
+ else:
+ self.sendTCPRequest(server)
+ except socket.error, reason:
+ raise DNSError, reason
+ if self.async:
+ return None
+ else:
+ if not self.response:
+ raise DNSError,'no working nameservers found'
+ return self.response
+
+ def sendUDPRequest(self, server):
+ "refactor me"
+ self.response=None
+ for self.ns in server:
+ #print "trying udp",self.ns
+ try:
+ if self.ns.count(':'):
+ if hasattr(socket,'has_ipv6') and socket.has_ipv6:
+ self.socketInit(socket.AF_INET6, socket.SOCK_DGRAM)
+ else: continue
+ else:
+ self.socketInit(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ # TODO. Handle timeouts &c correctly (RFC)
+ self.time_start=time.time()
+ self.conn()
+ if not self.async:
+ self.s.send(self.request)
+ r=self.processUDPReply()
+ # Since we bind to the source port and connect to the
+ # destination port, we don't need to check that here,
+ # but do make sure it's actually a DNS request that the
+ # packet is in reply to.
+ while r.header['id'] != self.tid \
+ or self.from_address[1] != self.port:
+ r=self.processUDPReply()
+ self.response = r
+ # FIXME: check waiting async queries
+ finally:
+ if not self.async:
+ self.s.close()
+ except socket.error:
+ continue
+ break
+
+ def sendTCPRequest(self, server):
+ " do the work of sending a TCP request "
+ self.response=None
+ for self.ns in server:
+ #print "trying tcp",self.ns
+ try:
+ if self.ns.count(':'):
+ if hasattr(socket,'has_ipv6') and socket.has_ipv6:
+ self.socketInit(socket.AF_INET6, socket.SOCK_STREAM)
+ else: continue
+ else:
+ self.socketInit(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ # TODO. Handle timeouts &c correctly (RFC)
+ self.time_start=time.time()
+ self.conn()
+ buf = Lib.pack16bit(len(self.request))+self.request
+ # Keep server from making sendall hang
+ self.s.setblocking(0)
+ # FIXME: throws WOULDBLOCK if request too large to fit in
+ # system buffer
+ self.s.sendall(buf)
+ # SHUT_WR breaks blocking IO with google DNS (8.8.8.8)
+ #self.s.shutdown(socket.SHUT_WR)
+ r=self.processTCPReply()
+ if r.header['id'] == self.tid:
+ self.response = r
+ break
+ finally:
+ self.s.close()
+ except socket.error:
+ continue
+
+#class DnsAsyncRequest(DnsRequest):
+class DnsAsyncRequest(DnsRequest,asyncore.dispatcher_with_send):
+ " an asynchronous request object. out of date, probably broken "
+ def __init__(self,*name,**args):
+ DnsRequest.__init__(self, *name, **args)
+ # XXX todo
+ if args.has_key('done') and args['done']:
+ self.donefunc=args['done']
+ else:
+ self.donefunc=self.showResult
+ #self.realinit(name,args) # XXX todo
+ self.async=1
+ def conn(self):
+ self.getSource()
+ self.connect((self.ns,self.port))
+ self.time_start=time.time()
+ if self.args.has_key('start') and self.args['start']:
+ asyncore.dispatcher.go(self)
+ def socketInit(self,a,b):
+ self.create_socket(a,b)
+ asyncore.dispatcher.__init__(self)
+ self.s=self
+ def handle_read(self):
+ if self.args['protocol'] == 'udp':
+ self.response=self.processUDPReply()
+ if self.donefunc:
+ apply(self.donefunc,(self,))
+ def handle_connect(self):
+ self.send(self.request)
+ def handle_write(self):
+ pass
+ def showResult(self,*s):
+ self.response.show()
+
+#
+# $Log: Base.py,v $
+# Revision 1.12.2.15 2011/03/19 22:15:01 customdesigned
+# Added rotation of name servers - SF Patch ID: 2795929
+#
+# Revision 1.12.2.14 2011/03/17 03:46:03 customdesigned
+# Simple test for google DNS with tcp
+#
+# Revision 1.12.2.13 2011/03/17 03:08:03 customdesigned
+# Use blocking IO with timeout for TCP replies.
+#
+# Revision 1.12.2.12 2011/03/16 17:50:00 customdesigned
+# Fix non-blocking TCP replies. (untested)
+#
+# Revision 1.12.2.11 2010/01/02 16:31:23 customdesigned
+# Handle large TCP replies (untested).
+#
+# Revision 1.12.2.10 2008/08/01 03:58:03 customdesigned
+# Don't try to close socket when never opened.
+#
+# Revision 1.12.2.9 2008/08/01 03:48:31 customdesigned
+# Fix more breakage from port randomization patch. Support Ipv6 queries.
+#
+# Revision 1.12.2.8 2008/07/31 18:22:59 customdesigned
+# Wait until tcp response at least starts coming in.
+#
+# Revision 1.12.2.7 2008/07/28 01:27:00 customdesigned
+# Check configured port.
+#
+# Revision 1.12.2.6 2008/07/28 00:17:10 customdesigned
+# Randomize source ports.
+#
+# Revision 1.12.2.5 2008/07/24 20:10:55 customdesigned
+# Randomize tid in requests, and check in response.
+#
+# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned
+# Missing import Lib
+#
+# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned
+# Use socket.inetntoa,inetaton.
+#
+# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned
+# Trap socket error
+#
+# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned
+# Skip bogus but non-empty lines in resolv.conf
+#
+# Revision 1.12 2002/04/23 06:04:27 anthonybaxter
+# attempt to refactor the DNSRequest.req method a little. after doing a bit
+# of this, I've decided to bite the bullet and just rewrite the puppy. will
+# be checkin in some design notes, then unit tests and then writing the sod.
+#
+# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
+# converted to class based exceptions (there goes the python1.4 compatibility :)
+#
+# removed a quite gross use of 'eval()'.
+#
+# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.9 2002/03/19 12:26:13 anthonybaxter
+# death to leading tabs.
+#
+# Revision 1.8 2002/03/19 10:30:33 anthonybaxter
+# first round of major bits and pieces. The major stuff here (summarised
+# from my local, off-net CVS server :/ this will cause some oddities with
+# the
+#
+# tests/testPackers.py:
+# a large slab of unit tests for the packer and unpacker code in DNS.Lib
+#
+# DNS/Lib.py:
+# placeholder for addSRV.
+# added 'klass' to addA, make it the same as the other A* records.
+# made addTXT check for being passed a string, turn it into a length 1 list.
+# explicitly check for adding a string of length > 255 (prohibited).
+# a bunch of cleanups from a first pass with pychecker
+# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
+# (disappointly small, actually) improvement, while addr2bin is much
+# much faster now.
+#
+# DNS/Base.py:
+# added DiscoverNameServers. This automatically does the right thing
+# on unix/ win32. No idea how MacOS handles this. *sigh*
+# Incompatible change: Don't use ParseResolvConf on non-unix, use this
+# function, instead!
+# a bunch of cleanups from a first pass with pychecker
+#
+# Revision 1.5 2001/08/09 09:22:28 anthonybaxter
+# added what I hope is win32 resolver lookup support. I'll need to try
+# and figure out how to get the CVS checkout onto my windows machine to
+# make sure it works (wow, doing something other than games on the
+# windows machine :)
+#
+# Code from Wolfgang.Strobl@gmd.de
+# win32dns.py from
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
+#
+# Really, ParseResolvConf() should be renamed "FindNameServers" or
+# some such.
+#
+# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.3 2001/07/19 07:20:12 anthony
+# Handle blank resolv.conf lines.
+# Patch from Bastian Kleineidam
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+"""
+$Id: Class.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $
+
+ This file is part of the pydns project.
+ Homepage: http://pydns.sourceforge.net
+
+ This code is covered by the standard Python License. See LICENSE for details.
+
+ CLASS values (section 3.2.4)
+"""
+
+
+IN = 1 # the Internet
+CS = 2 # the CSNET class (Obsolete - used only for examples in
+ # some obsolete RFCs)
+CH = 3 # the CHAOS class. When someone shows me python running on
+ # a Symbolics Lisp machine, I'll look at implementing this.
+HS = 4 # Hesiod [Dyer 87]
+
+# QCLASS values (section 3.2.5)
+
+ANY = 255 # any class
+
+
+# Construct reverse mapping dictionary
+
+_names = dir()
+classmap = {}
+for _name in _names:
+ if _name[0] != '_': classmap[eval(_name)] = _name
+
+def classstr(klass):
+ if classmap.has_key(klass): return classmap[klass]
+ else: return `klass`
+
+#
+# $Log: Class.py,v $
+# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.6 2002/04/23 12:52:19 anthonybaxter
+# cleanup whitespace.
+#
+# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
+# death to leading tabs.
+#
+# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+# -*- encoding: utf-8 -*-
+"""
+ $Id: Lib.py,v 1.11.2.8 2011/03/16 20:06:39 customdesigned Exp $
+
+ This file is part of the pydns project.
+ Homepage: http://pydns.sourceforge.net
+
+ This code is covered by the standard Python License. See LICENSE for details.
+
+ Library code. Largely this is packers and unpackers for various types.
+"""
+
+#
+#
+# See RFC 1035:
+# ------------------------------------------------------------------------
+# Network Working Group P. Mockapetris
+# Request for Comments: 1035 ISI
+# November 1987
+# Obsoletes: RFCs 882, 883, 973
+#
+# DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
+# ------------------------------------------------------------------------
+
+
+import string, types
+
+import Type
+import Class
+import Opcode
+import Status
+import DNS
+
+from Base import DNSError
+
+LABEL_UTF8 = False
+LABEL_ENCODING = 'idna'
+
+class UnpackError(DNSError): pass
+class PackError(DNSError): pass
+
+# Low-level 16 and 32 bit integer packing and unpacking
+
+from struct import pack as struct_pack
+from struct import unpack as struct_unpack
+from socket import inet_ntoa, inet_aton
+
+def pack16bit(n):
+ return struct_pack('!H', n)
+
+def pack32bit(n):
+ return struct_pack('!L', n)
+
+def unpack16bit(s):
+ return struct_unpack('!H', s)[0]
+
+def unpack32bit(s):
+ return struct_unpack('!L', s)[0]
+
+def addr2bin(addr):
+ return struct_unpack('!l', inet_aton(addr))[0]
+
+def bin2addr(n):
+ return inet_ntoa(struct_pack('!L', n))
+
+# Packing class
+
+class Packer:
+ " packer base class. supports basic byte/16bit/32bit/addr/string/name "
+ def __init__(self):
+ self.buf = ''
+ self.index = {}
+ def getbuf(self):
+ return self.buf
+ def addbyte(self, c):
+ if len(c) != 1: raise TypeError, 'one character expected'
+ self.buf = self.buf + c
+ def addbytes(self, bytes):
+ self.buf = self.buf + bytes
+ def add16bit(self, n):
+ self.buf = self.buf + pack16bit(n)
+ def add32bit(self, n):
+ self.buf = self.buf + pack32bit(n)
+ def addaddr(self, addr):
+ n = addr2bin(addr)
+ self.buf = self.buf + pack32bit(n)
+ def addstring(self, s):
+ if len(s) > 255:
+ raise ValueError, "Can't encode string of length "+ \
+ "%s (> 255)"%(len(s))
+ self.addbyte(chr(len(s)))
+ self.addbytes(s)
+ def addname(self, name):
+ # Domain name packing (section 4.1.4)
+ # Add a domain name to the buffer, possibly using pointers.
+ # The case of the first occurrence of a name is preserved.
+ # Redundant dots are ignored.
+ list = []
+ for label in string.splitfields(name, '.'):
+ if not label:
+ raise PackError, 'empty label'
+ list.append(label)
+ keys = []
+ for i in range(len(list)):
+ key = string.upper(string.joinfields(list[i:], '.'))
+ keys.append(key)
+ if self.index.has_key(key):
+ pointer = self.index[key]
+ break
+ else:
+ i = len(list)
+ pointer = None
+ # Do it into temporaries first so exceptions don't
+ # mess up self.index and self.buf
+ buf = ''
+ offset = len(self.buf)
+ index = []
+ if DNS.LABEL_UTF8:
+ enc = 'utf8'
+ else:
+ enc = DNS.LABEL_ENCODING
+ for j in range(i):
+ label = list[j]
+ try:
+ label = label.encode(enc)
+ except UnicodeEncodeError:
+ if not DNS.LABEL_UTF8: raise
+ if not label.startswith('\ufeff'):
+ label = '\ufeff'+label
+ label = label.encode(enc)
+ n = len(label)
+ if n > 63:
+ raise PackError, 'label too long'
+ if offset + len(buf) < 0x3FFF:
+ index.append((keys[j], offset + len(buf)))
+ else:
+ print 'DNS.Lib.Packer.addname:',
+ print 'warning: pointer too big'
+ buf = buf + (chr(n) + label)
+ if pointer:
+ buf = buf + pack16bit(pointer | 0xC000)
+ else:
+ buf = buf + '\0'
+ self.buf = self.buf + buf
+ for key, value in index:
+ self.index[key] = value
+ def dump(self):
+ keys = self.index.keys()
+ keys.sort()
+ print '-'*40
+ for key in keys:
+ print '%20s %3d' % (key, self.index[key])
+ print '-'*40
+ space = 1
+ for i in range(0, len(self.buf)+1, 2):
+ if self.buf[i:i+2] == '**':
+ if not space: print
+ space = 1
+ continue
+ space = 0
+ print '%4d' % i,
+ for c in self.buf[i:i+2]:
+ if ' ' < c < '\177':
+ print ' %c' % c,
+ else:
+ print '%2d' % ord(c),
+ print
+ print '-'*40
+
+
+# Unpacking class
+
+
+class Unpacker:
+ def __init__(self, buf):
+ self.buf = buf
+ self.offset = 0
+ def getbyte(self):
+ if self.offset >= len(self.buf):
+ raise UnpackError, "Ran off end of data"
+ c = self.buf[self.offset]
+ self.offset = self.offset + 1
+ return c
+ def getbytes(self, n):
+ s = self.buf[self.offset : self.offset + n]
+ if len(s) != n: raise UnpackError, 'not enough data left'
+ self.offset = self.offset + n
+ return s
+ def get16bit(self):
+ return unpack16bit(self.getbytes(2))
+ def get32bit(self):
+ return unpack32bit(self.getbytes(4))
+ def getaddr(self):
+ return bin2addr(self.get32bit())
+ def getstring(self):
+ return self.getbytes(ord(self.getbyte()))
+ def getname(self):
+ # Domain name unpacking (section 4.1.4)
+ c = self.getbyte()
+ i = ord(c)
+ if i & 0xC0 == 0xC0:
+ d = self.getbyte()
+ j = ord(d)
+ pointer = ((i<<8) | j) & ~0xC000
+ save_offset = self.offset
+ try:
+ self.offset = pointer
+ domain = self.getname()
+ finally:
+ self.offset = save_offset
+ return domain
+ if i == 0:
+ return ''
+ domain = self.getbytes(i)
+ remains = self.getname()
+ if not remains:
+ return domain
+ else:
+ return domain + '.' + remains
+
+
+# Test program for packin/unpacking (section 4.1.4)
+
+def testpacker():
+ N = 2500
+ R = range(N)
+ import timing
+ # See section 4.1.4 of RFC 1035
+ timing.start()
+ for i in R:
+ p = Packer()
+ p.addaddr('192.168.0.1')
+ p.addbytes('*' * 20)
+ p.addname('f.ISI.ARPA')
+ p.addbytes('*' * 8)
+ p.addname('Foo.F.isi.arpa')
+ p.addbytes('*' * 18)
+ p.addname('arpa')
+ p.addbytes('*' * 26)
+ p.addname('')
+ timing.finish()
+ print timing.milli(), "ms total for packing"
+ print round(timing.milli() / i, 4), 'ms per packing'
+ #p.dump()
+ u = Unpacker(p.buf)
+ u.getaddr()
+ u.getbytes(20)
+ u.getname()
+ u.getbytes(8)
+ u.getname()
+ u.getbytes(18)
+ u.getname()
+ u.getbytes(26)
+ u.getname()
+ timing.start()
+ for i in R:
+ u = Unpacker(p.buf)
+
+ res = (u.getaddr(),
+ u.getbytes(20),
+ u.getname(),
+ u.getbytes(8),
+ u.getname(),
+ u.getbytes(18),
+ u.getname(),
+ u.getbytes(26),
+ u.getname())
+ timing.finish()
+ print timing.milli(), "ms total for unpacking"
+ print round(timing.milli() / i, 4), 'ms per unpacking'
+ #for item in res: print item
+
+
+# Pack/unpack RR toplevel format (section 3.2.1)
+
+class RRpacker(Packer):
+ def __init__(self):
+ Packer.__init__(self)
+ self.rdstart = None
+ def addRRheader(self, name, type, klass, ttl, *rest):
+ self.addname(name)
+ self.add16bit(type)
+ self.add16bit(klass)
+ self.add32bit(ttl)
+ if rest:
+ if rest[1:]: raise TypeError, 'too many args'
+ rdlength = rest[0]
+ else:
+ rdlength = 0
+ self.add16bit(rdlength)
+ self.rdstart = len(self.buf)
+ def patchrdlength(self):
+ rdlength = unpack16bit(self.buf[self.rdstart-2:self.rdstart])
+ if rdlength == len(self.buf) - self.rdstart:
+ return
+ rdata = self.buf[self.rdstart:]
+ save_buf = self.buf
+ ok = 0
+ try:
+ self.buf = self.buf[:self.rdstart-2]
+ self.add16bit(len(rdata))
+ self.buf = self.buf + rdata
+ ok = 1
+ finally:
+ if not ok: self.buf = save_buf
+ def endRR(self):
+ if self.rdstart is not None:
+ self.patchrdlength()
+ self.rdstart = None
+ def getbuf(self):
+ if self.rdstart is not None: self.patchrdlength()
+ return Packer.getbuf(self)
+ # Standard RRs (section 3.3)
+ def addCNAME(self, name, klass, ttl, cname):
+ self.addRRheader(name, Type.CNAME, klass, ttl)
+ self.addname(cname)
+ self.endRR()
+ def addHINFO(self, name, klass, ttl, cpu, os):
+ self.addRRheader(name, Type.HINFO, klass, ttl)
+ self.addstring(cpu)
+ self.addstring(os)
+ self.endRR()
+ def addMX(self, name, klass, ttl, preference, exchange):
+ self.addRRheader(name, Type.MX, klass, ttl)
+ self.add16bit(preference)
+ self.addname(exchange)
+ self.endRR()
+ def addNS(self, name, klass, ttl, nsdname):
+ self.addRRheader(name, Type.NS, klass, ttl)
+ self.addname(nsdname)
+ self.endRR()
+ def addPTR(self, name, klass, ttl, ptrdname):
+ self.addRRheader(name, Type.PTR, klass, ttl)
+ self.addname(ptrdname)
+ self.endRR()
+ def addSOA(self, name, klass, ttl,
+ mname, rname, serial, refresh, retry, expire, minimum):
+ self.addRRheader(name, Type.SOA, klass, ttl)
+ self.addname(mname)
+ self.addname(rname)
+ self.add32bit(serial)
+ self.add32bit(refresh)
+ self.add32bit(retry)
+ self.add32bit(expire)
+ self.add32bit(minimum)
+ self.endRR()
+ def addTXT(self, name, klass, ttl, list):
+ self.addRRheader(name, Type.TXT, klass, ttl)
+ if type(list) is types.StringType:
+ list = [list]
+ for txtdata in list:
+ self.addstring(txtdata)
+ self.endRR()
+ # Internet specific RRs (section 3.4) -- class = IN
+ def addA(self, name, klass, ttl, address):
+ self.addRRheader(name, Type.A, klass, ttl)
+ self.addaddr(address)
+ self.endRR()
+ def addWKS(self, name, ttl, address, protocol, bitmap):
+ self.addRRheader(name, Type.WKS, Class.IN, ttl)
+ self.addaddr(address)
+ self.addbyte(chr(protocol))
+ self.addbytes(bitmap)
+ self.endRR()
+ def addSRV(self):
+ raise NotImplementedError
+
+def prettyTime(seconds):
+ if seconds<60:
+ return seconds,"%d seconds"%(seconds)
+ if seconds<3600:
+ return seconds,"%d minutes"%(seconds/60)
+ if seconds<86400:
+ return seconds,"%d hours"%(seconds/3600)
+ if seconds<604800:
+ return seconds,"%d days"%(seconds/86400)
+ else:
+ return seconds,"%d weeks"%(seconds/604800)
+
+
+class RRunpacker(Unpacker):
+ def __init__(self, buf):
+ Unpacker.__init__(self, buf)
+ self.rdend = None
+ def getRRheader(self):
+ name = self.getname()
+ rrtype = self.get16bit()
+ klass = self.get16bit()
+ ttl = self.get32bit()
+ rdlength = self.get16bit()
+ self.rdend = self.offset + rdlength
+ return (name, rrtype, klass, ttl, rdlength)
+ def endRR(self):
+ if self.offset != self.rdend:
+ raise UnpackError, 'end of RR not reached'
+ def getCNAMEdata(self):
+ return self.getname()
+ def getHINFOdata(self):
+ return self.getstring(), self.getstring()
+ def getMXdata(self):
+ return self.get16bit(), self.getname()
+ def getNSdata(self):
+ return self.getname()
+ def getPTRdata(self):
+ return self.getname()
+ def getSOAdata(self):
+ return self.getname(), \
+ self.getname(), \
+ ('serial',)+(self.get32bit(),), \
+ ('refresh ',)+prettyTime(self.get32bit()), \
+ ('retry',)+prettyTime(self.get32bit()), \
+ ('expire',)+prettyTime(self.get32bit()), \
+ ('minimum',)+prettyTime(self.get32bit())
+ def getTXTdata(self):
+ list = []
+ while self.offset != self.rdend:
+ list.append(self.getstring())
+ return list
+ getSPFdata = getTXTdata
+ def getAdata(self):
+ return self.getaddr()
+ def getWKSdata(self):
+ address = self.getaddr()
+ protocol = ord(self.getbyte())
+ bitmap = self.getbytes(self.rdend - self.offset)
+ return address, protocol, bitmap
+ def getSRVdata(self):
+ """
+ _Service._Proto.Name TTL Class SRV Priority Weight Port Target
+ """
+ priority = self.get16bit()
+ weight = self.get16bit()
+ port = self.get16bit()
+ target = self.getname()
+ #print '***priority, weight, port, target', priority, weight, port, target
+ return priority, weight, port, target
+
+
+# Pack/unpack Message Header (section 4.1)
+
+class Hpacker(Packer):
+ def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode,
+ qdcount, ancount, nscount, arcount):
+ self.add16bit(id)
+ self.add16bit((qr&1)<<15 | (opcode&0xF)<<11 | (aa&1)<<10
+ | (tc&1)<<9 | (rd&1)<<8 | (ra&1)<<7
+ | (z&7)<<4 | (rcode&0xF))
+ self.add16bit(qdcount)
+ self.add16bit(ancount)
+ self.add16bit(nscount)
+ self.add16bit(arcount)
+
+class Hunpacker(Unpacker):
+ def getHeader(self):
+ id = self.get16bit()
+ flags = self.get16bit()
+ qr, opcode, aa, tc, rd, ra, z, rcode = (
+ (flags>>15)&1,
+ (flags>>11)&0xF,
+ (flags>>10)&1,
+ (flags>>9)&1,
+ (flags>>8)&1,
+ (flags>>7)&1,
+ (flags>>4)&7,
+ (flags>>0)&0xF)
+ qdcount = self.get16bit()
+ ancount = self.get16bit()
+ nscount = self.get16bit()
+ arcount = self.get16bit()
+ return (id, qr, opcode, aa, tc, rd, ra, z, rcode,
+ qdcount, ancount, nscount, arcount)
+
+
+# Pack/unpack Question (section 4.1.2)
+
+class Qpacker(Packer):
+ def addQuestion(self, qname, qtype, qclass):
+ self.addname(qname)
+ self.add16bit(qtype)
+ self.add16bit(qclass)
+
+class Qunpacker(Unpacker):
+ def getQuestion(self):
+ return self.getname(), self.get16bit(), self.get16bit()
+
+
+# Pack/unpack Message(section 4)
+# NB the order of the base classes is important for __init__()!
+
+class Mpacker(RRpacker, Qpacker, Hpacker):
+ pass
+
+class Munpacker(RRunpacker, Qunpacker, Hunpacker):
+ pass
+
+
+# Routines to print an unpacker to stdout, for debugging.
+# These affect the unpacker's current position!
+
+def dumpM(u):
+ print 'HEADER:',
+ (id, qr, opcode, aa, tc, rd, ra, z, rcode,
+ qdcount, ancount, nscount, arcount) = u.getHeader()
+ print 'id=%d,' % id,
+ print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
+ % (qr, opcode, aa, tc, rd, ra, z, rcode)
+ if tc: print '*** response truncated! ***'
+ if rcode: print '*** nonzero error code! (%d) ***' % rcode
+ print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
+ % (qdcount, ancount, nscount, arcount)
+ for i in range(qdcount):
+ print 'QUESTION %d:' % i,
+ dumpQ(u)
+ for i in range(ancount):
+ print 'ANSWER %d:' % i,
+ dumpRR(u)
+ for i in range(nscount):
+ print 'AUTHORITY RECORD %d:' % i,
+ dumpRR(u)
+ for i in range(arcount):
+ print 'ADDITIONAL RECORD %d:' % i,
+ dumpRR(u)
+
+class DnsResult:
+
+ def __init__(self,u,args):
+ self.header={}
+ self.questions=[]
+ self.answers=[]
+ self.authority=[]
+ self.additional=[]
+ self.args=args
+ self.storeM(u)
+
+ def show(self):
+ import time
+ print '; <<>> PDG.py 1.0 <<>> %s %s'%(self.args['name'],
+ self.args['qtype'])
+ opt=""
+ if self.args['rd']:
+ opt=opt+'recurs '
+ h=self.header
+ print ';; options: '+opt
+ print ';; got answer:'
+ print ';; ->>HEADER<<- opcode %s, status %s, id %d'%(
+ h['opcode'],h['status'],h['id'])
+ flags=filter(lambda x,h=h:h[x],('qr','aa','rd','ra','tc'))
+ print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d'%(
+ string.join(flags),h['qdcount'],h['ancount'],h['nscount'],
+ h['arcount'])
+ print ';; QUESTIONS:'
+ for q in self.questions:
+ print ';; %s, type = %s, class = %s'%(q['qname'],q['qtypestr'],
+ q['qclassstr'])
+ print
+ print ';; ANSWERS:'
+ for a in self.answers:
+ print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
+ a['data'])
+ print
+ print ';; AUTHORITY RECORDS:'
+ for a in self.authority:
+ print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
+ a['data'])
+ print
+ print ';; ADDITIONAL RECORDS:'
+ for a in self.additional:
+ print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
+ a['data'])
+ print
+ if self.args.has_key('elapsed'):
+ print ';; Total query time: %d msec'%self.args['elapsed']
+ print ';; To SERVER: %s'%(self.args['server'])
+ print ';; WHEN: %s'%time.ctime(time.time())
+
+ def storeM(self,u):
+ (self.header['id'], self.header['qr'], self.header['opcode'],
+ self.header['aa'], self.header['tc'], self.header['rd'],
+ self.header['ra'], self.header['z'], self.header['rcode'],
+ self.header['qdcount'], self.header['ancount'],
+ self.header['nscount'], self.header['arcount']) = u.getHeader()
+ self.header['opcodestr']=Opcode.opcodestr(self.header['opcode'])
+ self.header['status']=Status.statusstr(self.header['rcode'])
+ for i in range(self.header['qdcount']):
+ #print 'QUESTION %d:' % i,
+ self.questions.append(self.storeQ(u))
+ for i in range(self.header['ancount']):
+ #print 'ANSWER %d:' % i,
+ self.answers.append(self.storeRR(u))
+ for i in range(self.header['nscount']):
+ #print 'AUTHORITY RECORD %d:' % i,
+ self.authority.append(self.storeRR(u))
+ for i in range(self.header['arcount']):
+ #print 'ADDITIONAL RECORD %d:' % i,
+ self.additional.append(self.storeRR(u))
+
+ def storeQ(self,u):
+ q={}
+ q['qname'], q['qtype'], q['qclass'] = u.getQuestion()
+ q['qtypestr']=Type.typestr(q['qtype'])
+ q['qclassstr']=Class.classstr(q['qclass'])
+ return q
+
+ def storeRR(self,u):
+ r={}
+ r['name'],r['type'],r['class'],r['ttl'],r['rdlength'] = u.getRRheader()
+ r['typename'] = Type.typestr(r['type'])
+ r['classstr'] = Class.classstr(r['class'])
+ #print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
+ # % (name,
+ # type, typename,
+ # klass, Class.classstr(class),
+ # ttl)
+ mname = 'get%sdata' % r['typename']
+ if hasattr(u, mname):
+ r['data']=getattr(u, mname)()
+ else:
+ r['data']=u.getbytes(r['rdlength'])
+ return r
+
+def dumpQ(u):
+ qname, qtype, qclass = u.getQuestion()
+ print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \
+ % (qname,
+ qtype, Type.typestr(qtype),
+ qclass, Class.classstr(qclass))
+
+def dumpRR(u):
+ name, type, klass, ttl, rdlength = u.getRRheader()
+ typename = Type.typestr(type)
+ print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
+ % (name,
+ type, typename,
+ klass, Class.classstr(klass),
+ ttl)
+ mname = 'get%sdata' % typename
+ if hasattr(u, mname):
+ print ' formatted rdata:', getattr(u, mname)()
+ else:
+ print ' binary rdata:', u.getbytes(rdlength)
+
+if __name__ == "__main__":
+ testpacker()
+#
+# $Log: Lib.py,v $
+# Revision 1.11.2.8 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.11.2.7 2009/06/09 18:39:06 customdesigned
+# Built-in SPF support
+#
+# Revision 1.11.2.6 2008/10/15 22:34:06 customdesigned
+# Default to idna encoding.
+#
+# Revision 1.11.2.5 2008/09/17 17:35:14 customdesigned
+# Use 7-bit ascii encoding, because case folding needs to be disabled
+# before utf8 is safe to use, even experimentally.
+#
+# Revision 1.11.2.4 2008/09/17 16:09:53 customdesigned
+# Encode unicode labels as UTF-8
+#
+# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned
+# Fix unpacker underflow.
+#
+# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned
+# Use socket.inetntoa,inetaton.
+#
+# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned
+# Mark utf-8 encoding
+#
+# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
+# converted to class based exceptions (there goes the python1.4 compatibility :)
+#
+# removed a quite gross use of 'eval()'.
+#
+# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.9 2002/03/19 10:30:33 anthonybaxter
+# first round of major bits and pieces. The major stuff here (summarised
+# from my local, off-net CVS server :/ this will cause some oddities with
+# the
+#
+# tests/testPackers.py:
+# a large slab of unit tests for the packer and unpacker code in DNS.Lib
+#
+# DNS/Lib.py:
+# placeholder for addSRV.
+# added 'klass' to addA, make it the same as the other A* records.
+# made addTXT check for being passed a string, turn it into a length 1 list.
+# explicitly check for adding a string of length > 255 (prohibited).
+# a bunch of cleanups from a first pass with pychecker
+# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
+# (disappointly small, actually) improvement, while addr2bin is much
+# much faster now.
+#
+# DNS/Base.py:
+# added DiscoverNameServers. This automatically does the right thing
+# on unix/ win32. No idea how MacOS handles this. *sigh*
+# Incompatible change: Don't use ParseResolvConf on non-unix, use this
+# function, instead!
+# a bunch of cleanups from a first pass with pychecker
+#
+# Revision 1.8 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.7 2001/07/19 07:50:44 anthony
+# Added SRV (RFC 2782) support. Code from Michael Ströder.
+#
+# Revision 1.6 2001/07/19 07:39:18 anthony
+# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder.
+#
+# Revision 1.5 2001/07/19 07:34:19 anthony
+# oops. glitch in storeRR (fixed now).
+# Reported by Bastian Kleineidam and by greg lin.
+#
+# Revision 1.4 2001/07/19 07:16:42 anthony
+# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11.
+# Patch from Timothy J. Miller.
+#
+# Revision 1.3 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+"""
+ $Id: Opcode.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $
+
+ This file is part of the pydns project.
+ Homepage: http://pydns.sourceforge.net
+
+ This code is covered by the standard Python License. See LICENSE for details.
+
+ Opcode values in message header. RFC 1035, 1996, 2136.
+"""
+
+
+
+QUERY = 0
+IQUERY = 1
+STATUS = 2
+NOTIFY = 4
+UPDATE = 5
+
+# Construct reverse mapping dictionary
+
+_names = dir()
+opcodemap = {}
+for _name in _names:
+ if _name[0] != '_': opcodemap[eval(_name)] = _name
+
+def opcodestr(opcode):
+ if opcodemap.has_key(opcode): return opcodemap[opcode]
+ else: return `opcode`
+
+#
+# $Log: Opcode.py,v $
+# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.6 2002/04/23 10:51:43 anthonybaxter
+# Added UPDATE, NOTIFY.
+#
+# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
+# death to leading tabs.
+#
+# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+"""
+ $Id: Status.py,v 1.7.2.1 2011/03/16 20:06:39 customdesigned Exp $
+
+ This file is part of the pydns project.
+ Homepage: http://pydns.sourceforge.net
+
+ This code is covered by the standard Python License. See LICENSE for details.
+
+ Status values in message header
+"""
+
+NOERROR = 0 # No Error [RFC 1035]
+FORMERR = 1 # Format Error [RFC 1035]
+SERVFAIL = 2 # Server Failure [RFC 1035]
+NXDOMAIN = 3 # Non-Existent Domain [RFC 1035]
+NOTIMP = 4 # Not Implemented [RFC 1035]
+REFUSED = 5 # Query Refused [RFC 1035]
+YXDOMAIN = 6 # Name Exists when it should not [RFC 2136]
+YXRRSET = 7 # RR Set Exists when it should not [RFC 2136]
+NXRRSET = 8 # RR Set that should exist does not [RFC 2136]
+NOTAUTH = 9 # Server Not Authoritative for zone [RFC 2136]
+NOTZONE = 10 # Name not contained in zone [RFC 2136]
+BADVERS = 16 # Bad OPT Version [RFC 2671]
+BADSIG = 16 # TSIG Signature Failure [RFC 2845]
+BADKEY = 17 # Key not recognized [RFC 2845]
+BADTIME = 18 # Signature out of time window [RFC 2845]
+BADMODE = 19 # Bad TKEY Mode [RFC 2930]
+BADNAME = 20 # Duplicate key name [RFC 2930]
+BADALG = 21 # Algorithm not supported [RFC 2930]
+
+# Construct reverse mapping dictionary
+
+_names = dir()
+statusmap = {}
+for _name in _names:
+ if _name[0] != '_': statusmap[eval(_name)] = _name
+
+def statusstr(status):
+ if statusmap.has_key(status): return statusmap[status]
+ else: return `status`
+
+#
+# $Log: Status.py,v $
+# Revision 1.7.2.1 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.7 2002/04/23 12:52:19 anthonybaxter
+# cleanup whitespace.
+#
+# Revision 1.6 2002/04/23 10:57:57 anthonybaxter
+# update to complete the list of response codes.
+#
+# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
+# death to leading tabs.
+#
+# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+# -*- encoding: utf-8 -*-
+"""
+ $Id: Type.py,v 1.6.2.3 2011/03/16 20:06:39 customdesigned Exp $
+
+ This file is part of the pydns project.
+ Homepage: http://pydns.sourceforge.net
+
+ This code is covered by the standard Python License. See LICENSE for details.
+
+ TYPE values (section 3.2.2)
+"""
+
+A = 1 # a host address
+NS = 2 # an authoritative name server
+MD = 3 # a mail destination (Obsolete - use MX)
+MF = 4 # a mail forwarder (Obsolete - use MX)
+CNAME = 5 # the canonical name for an alias
+SOA = 6 # marks the start of a zone of authority
+MB = 7 # a mailbox domain name (EXPERIMENTAL)
+MG = 8 # a mail group member (EXPERIMENTAL)
+MR = 9 # a mail rename domain name (EXPERIMENTAL)
+NULL = 10 # a null RR (EXPERIMENTAL)
+WKS = 11 # a well known service description
+PTR = 12 # a domain name pointer
+HINFO = 13 # host information
+MINFO = 14 # mailbox or mail list information
+MX = 15 # mail exchange
+TXT = 16 # text strings
+AAAA = 28 # IPv6 AAAA records (RFC 1886)
+SRV = 33 # DNS RR for specifying the location of services (RFC 2782)
+SPF = 99 # TXT RR for Sender Policy Framework
+
+# Additional TYPE values from host.c source
+
+UNAME = 110
+MP = 240
+
+# QTYPE values (section 3.2.3)
+
+AXFR = 252 # A request for a transfer of an entire zone
+MAILB = 253 # A request for mailbox-related records (MB, MG or MR)
+MAILA = 254 # A request for mail agent RRs (Obsolete - see MX)
+ANY = 255 # A request for all records
+
+# Construct reverse mapping dictionary
+
+_names = dir()
+typemap = {}
+for _name in _names:
+ if _name[0] != '_': typemap[eval(_name)] = _name
+
+def typestr(type):
+ if typemap.has_key(type): return typemap[type]
+ else: return `type`
+#
+# $Log: Type.py,v $
+# Revision 1.6.2.3 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.6.2.2 2009/06/09 18:39:06 customdesigned
+# Built-in SPF support
+#
+# Revision 1.6.2.1 2007/05/22 20:20:39 customdesigned
+# Mark utf-8 encoding
+#
+# Revision 1.6 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.5 2002/03/19 12:26:13 anthonybaxter
+# death to leading tabs.
+#
+# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.3 2001/07/19 07:38:28 anthony
+# added type code for SRV. From Michael Ströder.
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+# -*- encoding: utf-8 -*-
+# $Id: __init__.py,v 1.8.2.9 2011/03/16 20:06:39 customdesigned Exp $
+#
+# This file is part of the pydns project.
+# Homepage: http://pydns.sourceforge.net
+#
+# This code is covered by the standard Python License. See LICENSE for details.
+#
+
+# __init__.py for DNS class.
+
+__version__ = '2.3.5'
+
+import Type,Opcode,Status,Class
+from Base import DnsRequest, DNSError
+from Lib import DnsResult
+from Base import *
+from Lib import *
+Error=DNSError
+from lazy import *
+Request = DnsRequest
+Result = DnsResult
+
+#
+# $Log: __init__.py,v $
+# Revision 1.8.2.9 2011/03/16 20:06:39 customdesigned
+# Refer to explicit LICENSE file.
+#
+# Revision 1.8.2.8 2011/03/03 21:57:15 customdesigned
+# Release 2.3.5
+#
+# Revision 1.8.2.7 2009/06/09 18:05:29 customdesigned
+# Release 2.3.4
+#
+# Revision 1.8.2.6 2008/08/01 04:01:25 customdesigned
+# Release 2.3.3
+#
+# Revision 1.8.2.5 2008/07/28 02:11:07 customdesigned
+# Bump version.
+#
+# Revision 1.8.2.4 2008/07/28 00:17:10 customdesigned
+# Randomize source ports.
+#
+# Revision 1.8.2.3 2008/07/24 20:10:55 customdesigned
+# Randomize tid in requests, and check in response.
+#
+# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
+# utf-8 in __init__.py
+#
+# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
+# Release 2.3.1
+#
+# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
+# found that the old README file called itself release 2.2. So make
+# this one 2.3...
+#
+# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
+# make some sort of reasonable version string. releasewards ho!
+#
+# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
+# converted to class based exceptions (there goes the python1.4 compatibility :)
+#
+# removed a quite gross use of 'eval()'.
+#
+# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.4 2001/11/26 17:57:51 stroeder
+# Added __version__
+#
+# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+# $Id: lazy.py,v 1.5.2.4 2011/03/19 22:15:01 customdesigned Exp $
+#
+# This file is part of the pydns project.
+# Homepage: http://pydns.sourceforge.net
+#
+# This code is covered by the standard Python License. See LICENSE for details.
+#
+
+# routines for lazy people.
+import Base
+import string
+
+from Base import DNSError
+
+def revlookup(name):
+ "convenience routine for doing a reverse lookup of an address"
+ names = revlookupall(name)
+ if not names: return None
+ return names[0] # return shortest name
+
+def revlookupall(name):
+ "convenience routine for doing a reverse lookup of an address"
+ # FIXME: check for IPv6
+ a = string.split(name, '.')
+ a.reverse()
+ b = string.join(a, '.')+'.in-addr.arpa'
+ names = dnslookup(b, qtype = 'ptr')
+ # this will return all records.
+ names.sort(key=str.__len__)
+ return names
+
+def dnslookup(name,qtype):
+ "convenience routine to return just answer data for any query type"
+ if Base.defaults['server'] == []: Base.DiscoverNameServers()
+ result = Base.DnsRequest(name=name, qtype=qtype).req()
+ if result.header['status'] != 'NOERROR':
+ raise DNSError("DNS query status: %s" % result.header['status'])
+ elif len(result.answers) == 0 and Base.defaults['server_rotate']:
+ # check with next DNS server
+ result = Base.DnsRequest(name=name, qtype=qtype).req()
+ if result.header['status'] != 'NOERROR':
+ raise DNSError("DNS query status: %s" % result.header['status'])
+ return map(lambda x: x['data'],result.answers)
+
+def mxlookup(name):
+ """
+ convenience routine for doing an MX lookup of a name. returns a
+ sorted list of (preference, mail exchanger) records
+ """
+ l = dnslookup(name, qtype = 'mx')
+ l.sort()
+ return l
+
+#
+# $Log: lazy.py,v $
+# Revision 1.5.2.4 2011/03/19 22:15:01 customdesigned
+# Added rotation of name servers - SF Patch ID: 2795929
+#
+# Revision 1.5.2.3 2011/03/16 20:06:24 customdesigned
+# Expand convenience methods.
+#
+# Revision 1.5.2.2 2011/03/08 21:06:42 customdesigned
+# Address sourceforge patch requests 2981978, 2795932 to add revlookupall
+# and raise DNSError instead of IndexError on server fail.
+#
+# Revision 1.5.2.1 2007/05/22 20:23:38 customdesigned
+# Lazy call to DiscoverNameServers
+#
+# Revision 1.5 2002/05/06 06:14:38 anthonybaxter
+# reformat, move import to top of file.
+#
+# Revision 1.4 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
+# added identifying header to top of each file
+#
+# Revision 1.2 2001/07/19 06:57:07 anthony
+# cvs keywords added
+#
+#
--- /dev/null
+"""
+ $Id: win32dns.py,v 1.3.2.1 2007/05/22 20:26:49 customdesigned Exp $
+
+ Extract a list of TCP/IP name servers from the registry 0.1
+ 0.1 Strobl 2001-07-19
+ Usage:
+ RegistryResolve() returns a list of ip numbers (dotted quads), by
+ scouring the registry for addresses of name servers
+
+ Tested on Windows NT4 Server SP6a, Windows 2000 Pro SP2 and
+ Whistler Pro (XP) Build 2462 and Windows ME
+ ... all having a different registry layout wrt name servers :-/
+
+ Todo:
+
+ Program doesn't check whether an interface is up or down
+
+ (c) 2001 Copyright by Wolfgang Strobl ws@mystrobl.de,
+ License analog to the current Python license
+"""
+
+import string, re
+import _winreg
+
+def binipdisplay(s):
+ "convert a binary array of ip adresses to a python list"
+ if len(s)%4!= 0:
+ raise EnvironmentError # well ...
+ ol=[]
+ for i in range(len(s)/4):
+ s1=s[:4]
+ s=s[4:]
+ ip=[]
+ for j in s1:
+ ip.append(str(ord(j)))
+ ol.append(string.join(ip,'.'))
+ return ol
+
+def stringdisplay(s):
+ '''convert "d.d.d.d,d.d.d.d" to ["d.d.d.d","d.d.d.d"].
+ also handle u'd.d.d.d d.d.d.d', as reporting on SF
+ '''
+ import re
+ return map(str, re.split("[ ,]",s))
+
+def RegistryResolve():
+ nameservers=[]
+ x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
+ try:
+ y= _winreg.OpenKey(x,
+ r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters")
+ except EnvironmentError: # so it isn't NT/2000/XP
+ # windows ME, perhaps?
+ try: # for Windows ME
+ y= _winreg.OpenKey(x,
+ r"SYSTEM\CurrentControlSet\Services\VxD\MSTCP")
+ nameserver,dummytype=_winreg.QueryValueEx(y,'NameServer')
+ if nameserver and not (nameserver in nameservers):
+ nameservers.extend(stringdisplay(nameserver))
+ except EnvironmentError:
+ pass
+ return nameservers # no idea
+ try:
+ nameserver = _winreg.QueryValueEx(y, "DhcpNameServer")[0].split()
+ except:
+ nameserver = _winreg.QueryValueEx(y, "NameServer")[0].split()
+ if nameserver:
+ nameservers=nameserver
+ nameserver = _winreg.QueryValueEx(y,"NameServer")[0]
+ _winreg.CloseKey(y)
+ try: # for win2000
+ y= _winreg.OpenKey(x,
+ r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\DNSRegisteredAdapters")
+ for i in range(1000):
+ try:
+ n=_winreg.EnumKey(y,i)
+ z=_winreg.OpenKey(y,n)
+ dnscount,dnscounttype=_winreg.QueryValueEx(z,
+ 'DNSServerAddressCount')
+ dnsvalues,dnsvaluestype=_winreg.QueryValueEx(z,
+ 'DNSServerAddresses')
+ nameservers.extend(binipdisplay(dnsvalues))
+ _winreg.CloseKey(z)
+ except EnvironmentError:
+ break
+ _winreg.CloseKey(y)
+ except EnvironmentError:
+ pass
+#
+ try: # for whistler
+ y= _winreg.OpenKey(x,
+ r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces")
+ for i in range(1000):
+ try:
+ n=_winreg.EnumKey(y,i)
+ z=_winreg.OpenKey(y,n)
+ try:
+ nameserver,dummytype=_winreg.QueryValueEx(z,'NameServer')
+ if nameserver and not (nameserver in nameservers):
+ nameservers.extend(stringdisplay(nameserver))
+ except EnvironmentError:
+ pass
+ _winreg.CloseKey(z)
+ except EnvironmentError:
+ break
+ _winreg.CloseKey(y)
+ except EnvironmentError:
+ #print "Key Interfaces not found, just do nothing"
+ pass
+#
+ _winreg.CloseKey(x)
+ return nameservers
+
+if __name__=="__main__":
+ print "Name servers:",RegistryResolve()
+
+#
+# $Log: win32dns.py,v $
+# Revision 1.3.2.1 2007/05/22 20:26:49 customdesigned
+# Fix win32 nameserver discovery.
+#
+# Revision 1.3 2002/05/06 06:15:31 anthonybaxter
+# apparently some versions of windows return servers as unicode
+# string with space sep, rather than strings with comma sep.
+# *sigh*
+#
+# Revision 1.2 2002/03/19 12:41:33 anthonybaxter
+# tabnannied and reindented everything. 4 space indent, no tabs.
+# yay.
+#
+# Revision 1.1 2001/08/09 09:22:28 anthonybaxter
+# added what I hope is win32 resolver lookup support. I'll need to try
+# and figure out how to get the CVS checkout onto my windows machine to
+# make sure it works (wow, doing something other than games on the
+# windows machine :)
+#
+# Code from Wolfgang.Strobl@gmd.de
+# win32dns.py from
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
+#
+# Really, ParseResolvConf() should be renamed "FindNameServers" or
+# some such.
+#
+#
--- /dev/null
+#!/usr/bin/python
+
+import commands
+import os
+import sys
+import re
+import socket
+import struct
+import time
+
+#import ctypes
+# TODO: maybe when there's more time; for better readability.
+#class History(Structure):
+# _fields_ = [ ("version", c_int),
+# ("index", c_int),
+# ("history", c_float * HISTORY_LENGTH), ]
+
+# allocate fixed space on disk to save persistent state.
+# what to store in this file?
+# slice_history : x,x,x,x,x,...
+# root_history : y,y,y,y,y,y...
+
+HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour
+HISTORY_fmt = ('iif', 'f'*HISTORY_LENGTH )
+HISTORY_version = 1
+
+
+def get_network_bytes(interface):
+ for line in open('/proc/net/dev', 'r'):
+ if interface in line:
+ data = line.split('%s:' % interface)[1].split()
+ rx_bytes, tx_bytes = (data[0], data[8])
+ return (float(rx_bytes), float(tx_bytes))
+ return None
+
+def read_safe_history(filename):
+ """
+ This function guarantees that space is preserved.
+ If one of the file operations fail, it will throw an exception.
+ """
+ if os.path.exists(filename):
+ # read existing data
+ fd = os.open(filename, os.O_RDONLY)
+ a = os.read(fd, os.path.getsize(filename))
+ try:
+ (version, i, last_value) = struct.unpack_from(HISTORY_fmt[0], a, 0)
+ assert version == HISTORY_version
+ history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0]))
+ history = [ h for h in history ]
+ except:
+ # TODO: in the future a more clever version migration might be nice.
+ os.remove(filename) # just nuke the old version
+ # create for the first time, with empty data
+ (i, last_value, history) = (0, 0.0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, last_value, history), False)
+
+ os.close(fd)
+
+ else:
+ # create for the first time, with empty data
+ (i, last_value, history) = (0, 0.0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, last_value, history), False)
+
+ return (i, last_value, history)
+
+def write_safe_history(filename, (i, last_value, history), check_for_file=True):
+ # length should match, and the file should already exist
+ assert len(history) == HISTORY_LENGTH
+ if check_for_file:
+ assert os.path.exists(filename)
+
+ # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk
+ fd = os.open(filename, os.O_WRONLY|os.O_CREAT)
+ os.lseek(fd, 0, 0)
+ ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i, last_value))
+ ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history))
+ os.close(fd)
+ return ret
+
+def add_to_history((i, last_value, history), data):
+ try:
+ # note, this won't be the case when reboot occurs, or on first run.
+ assert last_value > 0.0
+ assert data > last_value
+ #print "Recording: %s"% (data-last_value)
+ history[i] = data-last_value
+ i += 1
+ i = i % HISTORY_LENGTH
+ except:
+ # on init when last_value is 0, or reboot when counter resets.
+ # do not record data except for last_value, do not increment index
+ pass
+
+ last_value = data
+ return (i, last_value, history)
+
+def record_data(filename, data):
+ rh = read_safe_history(filename)
+ return write_safe_history(filename, add_to_history(rh, data))
+
+def get_percentile(filename, percentile):
+ (idx,last_version, history) = read_safe_history(filename)
+ summary = history[idx:] + history[:idx]
+ measured = filter(lambda x: x != 0, summary)
+ if len(measured) == 0:
+ return 0
+
+ # convert bytes to bw
+ bw = map(lambda x: x/(60*60*24.0), measured)
+ bw.sort()
+ l = len(bw)
+ pct = bw[int(l*percentile)]
+ #print bw
+
+ return pct
+
+def timed(method):
+
+ def timeit(*args, **kw):
+ ts = time.time()
+ result = method(*args, **kw)
+ te = time.time()
+
+ #print '%r (%r, %r) %2.2f sec' % \
+ # (method.__name__, args, kw, te-ts)
+ return (result, te-ts)
+
+ return timeit
+
+@timed
+def check_dns(ip, protocol='udp'):
+ try:
+ #ip = ip[:-1] + "0"
+ ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip)
+ r = ro.req(protocol=protocol)
+ r = "OK"
+ except DNS.Base.DNSError, e:
+ r = "Error: %s" % e
+ return r
+
+def get_nameserver_ips(filename):
+ ip_re = re.compile("\d+\.\d+\.\d+\.\d+")
+ ret = {}
+ if not os.path.exists(filename):
+ return ret
+
+ f = open(filename, 'r')
+
+ if 'resolv' in filename:
+ for l in f:
+ for field in l.strip().split():
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+
+ if 'ifcfg' in filename:
+ for l in f:
+ if 'DNS' not in l:
+ continue
+ for field in l.strip().split('='):
+ field = field.replace('"', '')
+ field = field.replace("'", '')
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+ return ret
+
+def main():
+
+ for interface in ['eth0', 'eth1', 'eth2', 'eth3']:
+ t_bytes = get_network_bytes(interface)
+ if t_bytes != None:
+ break
+ if t_bytes == None:
+ # massive fail. cannot continue.
+ sys.exit(1)
+
+ # take diff b/t sum(t_bytes) and last_value
+ record_data("bw_history.dat", sum(t_bytes))
+ record_data("bw_history_rx.dat", t_bytes[0])
+ record_data("bw_history_tx.dat", t_bytes[1])
+
+ print get_percentile("bw_history.dat", 0.90),
+ print get_percentile("bw_history_rx.dat", 0.90),
+ print get_percentile("bw_history_tx.dat", 0.90),
+
+ print ""
+
+
+if __name__ == "__main__":
+ main()
+
+
+# TODO: comon?
+#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip()
--- /dev/null
+#!/usr/bin/python
+
+# can't probe comon directly from node.
+# http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="planetlab-01.cs.princeton.edu"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'
+
+import commands
+import os
+import re
+import socket
+import struct
+import DNS
+import time
+#import ctypes
+# TODO: maybe when there's more time; for better readability.
+#class History(Structure):
+# _fields_ = [ ("version", c_int),
+# ("index", c_int),
+# ("history", c_float * HISTORY_LENGTH), ]
+
+# allocate fixed space on disk to save persistent state.
+# what to store in this file?
+# slice_history : x,x,x,x,x,...
+# root_history : y,y,y,y,y,y...
+
+HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour
+HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH )
+HISTORY_version = 1
+
+def read_safe_history(filename):
+ """
+ This function guarantees that space is preserved.
+ If one of the file operations fail, it will throw an exception.
+ """
+ if os.path.exists(filename):
+ # read existing data
+ fd = os.open(filename, os.O_RDONLY)
+ a = os.read(fd, os.path.getsize(filename))
+ try:
+ (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0)
+ assert version == HISTORY_version
+ history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0]))
+ history = [ h for h in history ]
+ except:
+ # TODO: in the future a more clever version migration might be nice.
+ os.remove(filename) # just nuke the old version
+ # create for the first time, with empty data
+ (i, history) = (0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, history), False)
+
+ os.close(fd)
+
+ else:
+ # create for the first time, with empty data
+ (i, history) = (0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, history), False)
+
+ return (i, history)
+
+def write_safe_history(filename, (i, history), check_for_file=True):
+ # length should match, and the file should already exist
+ assert len(history) == HISTORY_LENGTH
+ if check_for_file:
+ assert os.path.exists(filename)
+
+ # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk
+ fd = os.open(filename, os.O_WRONLY|os.O_CREAT)
+ os.lseek(fd, 0, 0)
+ ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i))
+ ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history))
+ os.close(fd)
+ return ret
+
+def add_to_history((i, history), data):
+ history[i] = data
+ i += 1
+ i = i % HISTORY_LENGTH
+ return (i, history)
+
+def record_status_record(filename, status):
+ rh = read_safe_history(filename)
+ return write_safe_history(filename, add_to_history(rh, status))
+
+def get_success_ratio(filename):
+ rh = read_safe_history(filename)
+ idx = rh[0]
+ summary = rh[1][idx:] + rh[1][:idx]
+ measured = filter(lambda x: x != 0, summary)
+ if len(measured) == 0:
+ return 0
+
+ return float(len(filter(lambda x: x > 0, measured)))/float(len(measured))
+
+def timed(method):
+
+ def timeit(*args, **kw):
+ ts = time.time()
+ result = method(*args, **kw)
+ te = time.time()
+
+ #print '%r (%r, %r) %2.2f sec' % \
+ # (method.__name__, args, kw, te-ts)
+ return (result, te-ts)
+
+ return timeit
+
+@timed
+def check_dns(ip, protocol='udp'):
+ try:
+ #ip = ip[:-1] + "0"
+ ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip)
+ r = ro.req(protocol=protocol)
+ r = "OK"
+ except DNS.Base.DNSError, e:
+ r = "Error: %s" % e
+ return r
+
+def get_nameserver_ips(filename):
+ ip_re = re.compile("\d+\.\d+\.\d+\.\d+")
+ ret = {}
+ if not os.path.exists(filename):
+ return ret
+
+ f = open(filename, 'r')
+
+ if 'resolv' in filename:
+ for l in f:
+ for field in l.strip().split():
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+
+ if 'ifcfg' in filename:
+ for l in f:
+ if 'DNS' not in l:
+ continue
+ for field in l.strip().split('='):
+ field = field.replace('"', '')
+ field = field.replace("'", '')
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+ return ret
+
+def main():
+
+ root_ips = get_nameserver_ips('/etc/resolv.conf')
+ slice_ips = get_nameserver_ips( '/vservers/princeton_comon/etc/resolv.conf')
+
+ for i,ip in enumerate(root_ips.keys()):
+ (s,t) = check_dns(ip, 'udp')
+ if "Error" in s: t = -1
+ record_status_record("dns_history_root_udp%s.dat" % i, t)
+
+ (s,t) = check_dns(ip, 'tcp')
+ if "Error" in s: t = -1
+ record_status_record("dns_history_root_tcp%s.dat" % i, t)
+
+ for i,ip in enumerate(slice_ips.keys()):
+ (s,t) = check_dns(ip, 'udp')
+ if "Error" in s: t = -1
+ record_status_record("dns_history_slice_udp%s.dat" % i, t)
+
+ (s,t) = check_dns(ip, 'tcp')
+ if "Error" in s: t = -1
+ record_status_record("dns_history_slice_tcp%s.dat" % i, t)
+
+ if set(root_ips.keys()) == set(slice_ips.keys()):
+ print "CONF-ROOT_SLICE-MATCH",
+ else:
+ print "CONF-ROOT_SLICE-MISMATCH",
+ #if set(root_ips.keys()) != set(slice_ips.keys()):
+ #if set(root_ips.keys()) != set(ifcfg_ips.keys()) and len(set(ifcfg_ips.keys())) > 0:
+ # print "CONF-IFCFG_ROOT-MISMATCH",
+
+ print get_success_ratio('dns_history_root_udp0.dat'),
+ print get_success_ratio('dns_history_root_udp1.dat'),
+ print get_success_ratio('dns_history_slice_udp0.dat'),
+ print get_success_ratio('dns_history_slice_udp1.dat'),
+ c_dns = os.popen("curl -s http://localhost:3121 | grep -a DNSFail").read().strip()
+ if len(c_dns) > 9 and "DNS" in c_dns:
+ c_dns = "cm " + c_dns[9:]
+ else:
+ c_dns = ""
+ print c_dns,
+
+ print ""
+
+
+if __name__ == "__main__":
+ main()
+
+
+# TODO: comon?
+#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip()
--- /dev/null
+#!/usr/bin/python
+
+import commands
+import os
+import sys
+import re
+import socket
+import struct
+import time
+
+#import ctypes
+# TODO: maybe when there's more time; for better readability.
+#class History(Structure):
+# _fields_ = [ ("version", c_int),
+# ("index", c_int),
+# ("history", c_float * HISTORY_LENGTH), ]
+
+# allocate fixed space on disk to save persistent state.
+# what to store in this file?
+# slice_history : x,x,x,x,x,...
+# root_history : y,y,y,y,y,y...
+
+HISTORY_LENGTH = 24*30 # 30 days, if checked once an hour
+HISTORY_fmt = ('ii', 'f'*HISTORY_LENGTH )
+HISTORY_version = 1
+
+
+def get_network_bytes(interface):
+ for line in open('/proc/net/dev', 'r'):
+ if interface in line:
+ data = line.split('%s:' % interface)[1].split()
+ rx_bytes, tx_bytes = (data[0], data[8])
+ return (float(rx_bytes), float(tx_bytes))
+ return None
+
+def get_uptime():
+ for line in open('/proc/uptime', 'r'):
+ data = line.split()[0]
+ return float(data)
+ return None
+
+def read_safe_history(filename):
+ """
+ This function guarantees that space is preserved.
+ If one of the file operations fail, it will throw an exception.
+ """
+ if os.path.exists(filename):
+ # read existing data
+ fd = os.open(filename, os.O_RDONLY)
+ a = os.read(fd, os.path.getsize(filename))
+ try:
+ (version, i) = struct.unpack_from(HISTORY_fmt[0], a, 0)
+ assert version == HISTORY_version
+ history = struct.unpack_from(HISTORY_fmt[1], a, struct.calcsize(HISTORY_fmt[0]))
+ history = [ h for h in history ]
+ except:
+ # TODO: in the future a more clever version migration might be nice.
+ os.remove(filename) # just nuke the old version
+ # create for the first time, with empty data
+ (i, history) = (0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, history), False)
+
+ os.close(fd)
+
+ else:
+ # create for the first time, with empty data
+ (i, history) = (0, [0]*HISTORY_LENGTH)
+ write_safe_history(filename, (i, history), False)
+
+ return (i, history)
+
+def write_safe_history(filename, (i, history), check_for_file=True):
+ # length should match, and the file should already exist
+ assert len(history) == HISTORY_LENGTH
+ if check_for_file:
+ assert os.path.exists(filename)
+
+ # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk
+ fd = os.open(filename, os.O_WRONLY|os.O_CREAT)
+ os.lseek(fd, 0, 0)
+ ret = os.write(fd, struct.pack(HISTORY_fmt[0], HISTORY_version, i ))
+ ret += os.write(fd, struct.pack(HISTORY_fmt[1], *history))
+ os.close(fd)
+ return ret
+
+def add_to_history((i, history), data):
+ try:
+ assert data > 0.0
+ history[i] = data
+ i += 1
+ i = i % HISTORY_LENGTH
+ except:
+ # do not record data if data <= 0
+ pass
+ return (i, history)
+
+def record_data(filename, data):
+ rh = read_safe_history(filename)
+ return write_safe_history(filename, add_to_history(rh, data))
+
+def get_avg_uptime(filename):
+ (idx, history) = read_safe_history(filename)
+ summary = history[idx:] + history[:idx]
+ measured = filter(lambda x: x != 0, summary)
+ if len(measured) == 0:
+ return 0
+ return float(sum(measured))/float(len(measured))
+
+def timed(method):
+
+ def timeit(*args, **kw):
+ ts = time.time()
+ result = method(*args, **kw)
+ te = time.time()
+
+ #print '%r (%r, %r) %2.2f sec' % \
+ # (method.__name__, args, kw, te-ts)
+ return (result, te-ts)
+
+ return timeit
+
+@timed
+def check_dns(ip, protocol='udp'):
+ try:
+ #ip = ip[:-1] + "0"
+ ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip)
+ r = ro.req(protocol=protocol)
+ r = "OK"
+ except DNS.Base.DNSError, e:
+ r = "Error: %s" % e
+ return r
+
+def get_nameserver_ips(filename):
+ ip_re = re.compile("\d+\.\d+\.\d+\.\d+")
+ ret = {}
+ if not os.path.exists(filename):
+ return ret
+
+ f = open(filename, 'r')
+
+ if 'resolv' in filename:
+ for l in f:
+ for field in l.strip().split():
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+
+ if 'ifcfg' in filename:
+ for l in f:
+ if 'DNS' not in l:
+ continue
+ for field in l.strip().split('='):
+ field = field.replace('"', '')
+ field = field.replace("'", '')
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+ return ret
+
+def main():
+
+ ut = get_uptime()
+ if ut == None:
+ # massive fail. cannot continue.
+ sys.exit(1)
+
+ record_data("uptime_history.dat", ut)
+
+ print get_avg_uptime("uptime_history.dat"),
+
+ print ""
+
+
+if __name__ == "__main__":
+ main()
+
+
+# TODO: comon?
+#url = """http://comon.cs.princeton.edu/status/tabulator.cgi?table=table_nodeviewshort&select='dns1udp>80 && dns2udp>80&&name=="%s"'&format=formatcsv&dumpcols='dns1udp,dns1tcp,dns2udp,dns2tcp'""" % os.popen("hostname").read().strip()
--- /dev/null
+#!/bin/bash
+
+if [ -f /etc/planetlab/plc_config ]; then
+ source /etc/planetlab/plc_config
+else
+ PLC_SLICE_PREFIX='pl'
+fi
+
+IP=IPADDR
+DIR=multiops
+FILE=bootstrap.tar.gz
+HDIR=/home/${PLC_SLICE_PREFIX}_myops
+
+mkdir -p $HDIR
+cd $HDIR
+
+# before update
+if [ -f $FILE ] ; then
+ mod_time_before=`stat -c %Y $FILE`
+ CURL_ARGS="-z $FILE"
+else
+ mod_time_before=0
+ CURL_ARGS=""
+fi
+
+# if bootstrap file has been updated
+curl $CURL_ARGS -s -O --insecure https://$IP/$DIR/$FILE
+
+if [ -f $FILE ] ; then
+ mod_time_after=`stat -c %Y $FILE`
+else
+ mod_time_after=0
+fi
+
+if [[ $mod_time_after -gt $mod_time_before ]] ; then
+ # then an update occurred, and we need to unpack it.
+ tar -xzf $FILE
+ chmod 755 ./*.sh ./*.py
+ ./bootstrap.sh
+fi
+