X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=utilities%2Fbugtool%2Fovs-bugtool.in;h=f91a3b32c5d41a00c9964e36e419b0e6ec6be86e;hb=5ca1ba484bd9ade5116a49cf241cb98219d7d696;hp=422b2dd44653864e3ed4fa3484caf9c2b4fd76eb;hpb=cdc5f488517d7f012df6ff01e70a58af3db332a6;p=sliver-openvswitch.git diff --git a/utilities/bugtool/ovs-bugtool.in b/utilities/bugtool/ovs-bugtool.in index 422b2dd44..f91a3b32c 100755 --- a/utilities/bugtool/ovs-bugtool.in +++ b/utilities/bugtool/ovs-bugtool.in @@ -14,7 +14,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Copyright (c) 2005, 2007 XenSource Ltd. -# Copyright (c) 2010, 2011 Nicira Networks. +# Copyright (c) 2010, 2011, 2012 Nicira, Inc. # # To add new entries to the bugtool, you need to: @@ -67,7 +67,7 @@ OS_RELEASE = platform.release() APT_SOURCES_LIST = "/etc/apt/sources.list" APT_SOURCES_LIST_D = "/etc/apt/sources.list.d" BUG_DIR = "/var/log/ovs-bugtool" -PLUGIN_DIR = "@sysconfdir@/openvswitch/bugtool-plugins" +PLUGIN_DIR = "@pkgdatadir@/bugtool-plugins" GRUB_CONFIG = '/boot/grub/menu.lst' BOOT_KERNEL = '/boot/vmlinuz-' + OS_RELEASE BOOT_INITRD = '/boot/initrd-' + OS_RELEASE + '.img' @@ -111,11 +111,11 @@ HOSTS = '/etc/hosts' HOSTS_ALLOW = '/etc/hosts.allow' HOSTS_DENY = '/etc/hosts.deny' DHCP_LEASE_DIR = ['/var/lib/dhclient', '/var/lib/dhcp3'] -OPENVSWITCH_LOG_DIR = '@LOGDIR@' +OPENVSWITCH_LOG_DIR = '@LOGDIR@/' OPENVSWITCH_DEFAULT_SWITCH = '/etc/default/openvswitch-switch' # Debian OPENVSWITCH_SYSCONFIG_SWITCH = '/etc/sysconfig/openvswitch' # RHEL OPENVSWITCH_DEFAULT_CONTROLLER = '/etc/default/openvswitch-controller' -OPENVSWITCH_CONF_DB = '@sysconfdir@/openvswitch/conf.db' +OPENVSWITCH_CONF_DB = '@DBDIR@/conf.db' OPENVSWITCH_VSWITCHD_PID = '@RUNDIR@/ovs-vswitchd.pid' COLLECTD_LOGS_DIR = '/var/lib/collectd/rrd' VAR_LOG_DIR = '/var/log/' @@ -135,7 +135,6 @@ KRB5_CONF = '/etc/krb5.conf' os.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:@pkgdatadir@/scripts' ARP = 'arp' -BRCTL = 'brctl' CAT = 'cat' CHKCONFIG = 'chkconfig' DF = 'df' @@ -308,7 +307,7 @@ def output(x): def output_ts(x): output("[%s] %s" % (time.strftime("%x %X %Z"), x)) -def cmd_output(cap, args, label = None, filter = None): +def cmd_output(cap, args, label=None, filter=None): if cap in entries: if not label: if isinstance(args, list): @@ -319,30 +318,42 @@ def cmd_output(cap, args, label = None, filter = None): label = args data[label] = {'cap': cap, 'cmd_args': args, 'filter': filter} -def file_output(cap, path_list): +def file_output(cap, path_list, newest_first=False): + """ + If newest_first is True, the list of files in path_list is sorted + by file modification time in descending order, else its sorted + in ascending order. + """ if cap in entries: - for p in path_list: - if os.path.exists(p): - if unlimited_data or caps[cap][MAX_SIZE] == -1 or \ - cap_sizes[cap] < caps[cap][MAX_SIZE]: - data[p] = {'cap': cap, 'filename': p} - try: - s = os.stat(p) - cap_sizes[cap] += s.st_size - except: - pass - else: - output("Omitting %s, size constraint of %s exceeded" % (p, cap)) + path_entries = [] + for path in path_list: + try: + s = os.stat(path) + except OSError, e: + continue + path_entries.append((path, s)) -def tree_output(cap, path, pattern = None, negate = False): + mtime = lambda(path, stat): stat.st_mtime + path_entries.sort(key=mtime, reverse=newest_first) + for p in path_entries: + if unlimited_data or caps[cap][MAX_SIZE] == -1 or \ + cap_sizes[cap] < caps[cap][MAX_SIZE]: + data[p] = {'cap': cap, 'filename': p[0]} + cap_sizes[cap] += p[1].st_size + else: + output("Omitting %s, size constraint of %s exceeded" % (p[0], cap)) + +def tree_output(cap, path, pattern=None, negate=False, newest_first=False): + """ + Walks the directory tree rooted at path. Files in current dir are processed + before files in sub-dirs. + """ if cap in entries: if os.path.exists(path): - for f in os.listdir(path): - fn = os.path.join(path, f) - if os.path.isfile(fn) and matches(fn, pattern, negate): - file_output(cap, [fn]) - elif os.path.isdir(fn): - tree_output(cap, fn, pattern, negate) + for root, dirs, files in os.walk(path): + fns = [fn for fn in [os.path.join(root, f) for f in files] + if os.path.isfile(fn) and matches(fn, pattern, negate)] + file_output(cap, fns, newest_first=newest_first) def func_output(cap, label, func): if cap in entries: @@ -388,10 +399,14 @@ def collect_data(): run_procs(process_lists.values()) -def main(argv = None): +def main(argv=None): global ANSWER_YES_TO_ALL, SILENT_MODE global entries, data, dbg, unlimited_data + # Filter flags + only_ovs_info = False + collect_all_info = True + # we need access to privileged files, exit if we are not running as root if os.getuid() != 0: print >>sys.stderr, "Error: ovs-bugtool must be run as root" @@ -408,7 +423,7 @@ def main(argv = None): (options, params) = getopt.gnu_getopt( argv, 'sy', ['capabilities', 'silent', 'yestoall', 'entries=', 'output=', 'outfd=', 'outfile=', 'all', 'unlimited', - 'debug']) + 'debug', 'ovs']) except getopt.GetoptError, opterr: print >>sys.stderr, opterr return 2 @@ -466,6 +481,10 @@ def main(argv = None): dbg = True ProcOutput.debug = True + if k == '--ovs': + only_ovs_info = True + collect_all_info = False + if len(params) != 1: print >>sys.stderr, "Invalid additional arguments", str(params) return 2 @@ -535,7 +554,7 @@ exclude those logs from the archive. for d in disk_list(): cmd_output(CAP_HDPARM_T, [HDPARM, '-tT', '/dev/%s' % d]) - file_output(CAP_KERNEL_INFO, [PROC_VERSION, PROC_MODULES, PROC_DEVICES, + file_output(CAP_KERNEL_INFO, [PROC_VERSION, PROC_MODULES, PROC_DEVICES, PROC_FILESYSTEMS, PROC_CMDLINE]) cmd_output(CAP_KERNEL_INFO, [ZCAT, PROC_CONFIG], label='config') cmd_output(CAP_KERNEL_INFO, [SYSCTL, '-A']) @@ -549,7 +568,7 @@ exclude those logs from the archive. cmd_output(CAP_MULTIPATH, [DMSETUP, 'table']) func_output(CAP_MULTIPATH, 'multipathd_topology', multipathd_topology) cmd_output(CAP_MULTIPATH, [MPPUTIL, '-a']) - if CAP_MULTIPATH in entries: + if CAP_MULTIPATH in entries and collect_all_info: dump_rdac_groups(CAP_MULTIPATH) tree_output(CAP_NETWORK_CONFIG, SYSCONFIG_NETWORK_SCRIPTS, IFCFG_RE) @@ -564,20 +583,20 @@ exclude those logs from the archive. cmd_output(CAP_NETWORK_STATUS, [NETSTAT, '-an']) for dir in DHCP_LEASE_DIR: tree_output(CAP_NETWORK_STATUS, dir) - cmd_output(CAP_NETWORK_STATUS, [BRCTL, 'show']) cmd_output(CAP_NETWORK_STATUS, [IPTABLES, '-nL']) for p in os.listdir('/sys/class/net/'): try: f = open('/sys/class/net/%s/type' % p, 'r') t = f.readline() f.close() - if int(t) == 1: + if os.path.islink('/sys/class/net/%s/device' % p) and int(t) == 1: # ARPHRD_ETHER cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, p]) cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-S', p]) cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-k', p]) cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-i', p]) cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-c', p]) + if int(t) == 1: cmd_output(CAP_NETWORK_STATUS, [TC, '-s', '-d', 'class', 'show', 'dev', p]) except: @@ -586,9 +605,8 @@ exclude those logs from the archive. tree_output(CAP_NETWORK_STATUS, PROC_NET_VLAN_DIR) cmd_output(CAP_NETWORK_STATUS, [TC, '-s', 'qdisc']) file_output(CAP_NETWORK_STATUS, [PROC_NET_SOFTNET_STAT]) - tree_output(CAP_NETWORK_STATUS, OPENVSWITCH_LOG_DIR) if os.path.exists(OPENVSWITCH_VSWITCHD_PID): - cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'show']) + cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'show', '-s']) for d in dp_list(): cmd_output(CAP_NETWORK_STATUS, [OVS_OFCTL, 'show', d]) cmd_output(CAP_NETWORK_STATUS, [OVS_OFCTL, 'dump-flows', d]) @@ -610,17 +628,18 @@ exclude those logs from the archive. cmd_output(CAP_PROCESS_LIST, [PS, 'wwwaxf', '-eo', 'pid,tty,stat,time,nice,psr,pcpu,pmem,nwchan,wchan:25,args'], label='process-tree') func_output(CAP_PROCESS_LIST, 'fd_usage', fd_usage) + logs = ([ VAR_LOG_DIR + x for x in + [ 'crit.log', 'kern.log', 'daemon.log', 'user.log', + 'syslog', 'messages', 'secure', 'debug', 'dmesg', 'boot' ]] + + [ OPENVSWITCH_LOG_DIR + x for x in + [ 'ovs-vswitchd.log', 'ovsdb-server.log', + 'ovs-xapi-sync.log', 'ovs-monitor-ipsec.log' ]]) + file_output(CAP_SYSTEM_LOGS, logs) file_output(CAP_SYSTEM_LOGS, - [ VAR_LOG_DIR + x for x in - [ 'crit.log', 'kern.log', 'daemon.log', 'user.log', 'syslog', - 'messages', 'secure', 'debug', 'dmesg', 'boot'] + - [ f % n for n in range(1, 20) \ - for f in ['crit.log.%d', 'crit.log.%d.gz', - 'kern.log.%d', 'kern.log.%d.gz', - 'daemon.log.%d', 'daemon.log.%d.gz', - 'user.log.%d', 'user.log.%d.gz', - 'messages.%d', 'messages.%d.gz', - 'syslog.%d', 'syslog.%d.gz']]]) + [ '%s.%d' % (f, n) for n in range(20) for f in logs ]) + file_output(CAP_SYSTEM_LOGS, + [ '%s.%d.gz' % (f, n) for n in range(20) for f in logs ]) + if not os.path.exists('/var/log/dmesg') and not os.path.exists('/var/log/boot'): cmd_output(CAP_SYSTEM_LOGS, [DMESG]) @@ -637,15 +656,44 @@ exclude those logs from the archive. tree_output(CAP_YUM, APT_SOURCES_LIST_D) cmd_output(CAP_YUM, [DPKG_QUERY, '-W', '-f=${Package} ${Version} ${Status}\n'], 'dpkg-packages') + # Filter out ovs relevant information if --ovs option passed + # else collect all information + filters = set() + if only_ovs_info: + filters.add('ovs') + ovs_info_caps = [CAP_NETWORK_STATUS, CAP_SYSTEM_LOGS, + CAP_NETWORK_CONFIG] + ovs_info_list = ['process-tree'] + # We cannot use iteritems, since we modify 'data' as we pass through + for (k, v) in data.items(): + cap = v['cap'] + if 'filename' in v: + info = k[0] + else: + info = k + if info not in ovs_info_list and cap not in ovs_info_caps: + del data[k] + + if filters: + filter = ",".join(filters) + else: + filter = None + try: - load_plugins() + load_plugins(filter=filter) except: pass - + # permit the user to filter out data - for k in sorted(data.keys()): - if not ANSWER_YES_TO_ALL and not yes("Include '%s'? [Y/n]: " % k): - del data[k] + # We cannot use iteritems, since we modify 'data' as we pass through + for (k, v) in sorted(data.items()): + cap = v['cap'] + if 'filename' in v: + key = k[0] + else: + key = k + if not ANSWER_YES_TO_ALL and not yes("Include '%s'? [Y/n]: " % key): + del data[k] # collect selected data now output_ts('Running commands to collect data') @@ -760,7 +808,7 @@ def module_info(cap): def multipathd_topology(cap): - pipe = Popen([MULTIPATHD, '-k'], bufsize=1, stdin=PIPE, + pipe = Popen([MULTIPATHD, '-k'], bufsize=1, stdin=PIPE, stdout=PIPE, stderr=dev_null) stdout, stderr = pipe.communicate('show topology') @@ -824,7 +872,7 @@ def dump_rdac_groups(cap): group, _ = line.split(None, 1) cmd_output(cap, [MPPUTIL, '-g', group]) -def load_plugins(just_capabilities = False): +def load_plugins(just_capabilities=False, filter=None): def getText(nodelist): rc = "" for node in nodelist: @@ -832,13 +880,13 @@ def load_plugins(just_capabilities = False): rc += node.data return rc.encode() - def getBoolAttr(el, attr, default = False): + def getBoolAttr(el, attr, default=False): ret = default val = el.getAttribute(attr).lower() if val in ['true', 'false', 'yes', 'no']: ret = val in ['true', 'yes'] return ret - + for dir in [d for d in os.listdir(PLUGIN_DIR) if os.path.isdir(os.path.join(PLUGIN_DIR, d))]: if not caps.has_key(dir): if not os.path.exists("%s/%s.xml" % (PLUGIN_DIR, dir)): @@ -868,20 +916,32 @@ def load_plugins(just_capabilities = False): if just_capabilities: continue - + plugdir = os.path.join(PLUGIN_DIR, dir) for file in [f for f in os.listdir(plugdir) if f.endswith('.xml')]: xmldoc = parse(os.path.join(plugdir, file)) assert xmldoc.documentElement.tagName == "collect" for el in xmldoc.documentElement.getElementsByTagName("*"): + filters_tmp = el.getAttribute("filters") + if filters_tmp == '': + filters = [] + else: + filters = filters_tmp.split(',') + if not(filter is None or filter in filters): + continue if el.tagName == "files": - file_output(dir, getText(el.childNodes).split()) + newest_first = getBoolAttr(el, 'newest_first') + file_output(dir, getText(el.childNodes).split(), + newest_first=newest_first) elif el.tagName == "directory": pattern = el.getAttribute("pattern") if pattern == '': pattern = None negate = getBoolAttr(el, 'negate') - tree_output(dir, getText(el.childNodes), pattern and re.compile(pattern) or None, negate) + newest_first = getBoolAttr(el, 'newest_first') + tree_output(dir, getText(el.childNodes), + pattern and re.compile(pattern) or None, + negate=negate, newest_first=newest_first) elif el.tagName == "command": label = el.getAttribute("label") if label == '': label = None @@ -965,7 +1025,7 @@ def make_zip(subdir, output_file): pass finally: zf.close() - + output ('Writing archive %s successful.' % filename) if SILENT_MODE: print filename @@ -1045,7 +1105,7 @@ def update_cap(cap, k, v): caps[cap] = tuple(l) -def size_of_dir(d, pattern = None, negate = False): +def size_of_dir(d, pattern=None, negate=False): if os.path.isdir(d): return size_of_all([os.path.join(d, fn) for fn in os.listdir(d)], pattern, negate) @@ -1053,7 +1113,7 @@ def size_of_dir(d, pattern = None, negate = False): return 0 -def size_of_all(files, pattern = None, negate = False): +def size_of_all(files, pattern=None, negate=False): return sum([size_of(f, pattern, negate) for f in files]) @@ -1159,6 +1219,7 @@ class ProcOutput: def terminate(self): if self.running: try: + self.proc.stdout.close() os.kill(self.proc.pid, SIGTERM) except: pass @@ -1171,6 +1232,7 @@ class ProcOutput: line = self.proc.stdout.readline() if line == '': # process exited + self.proc.stdout.close() self.status = self.proc.wait() self.proc = None self.running = False @@ -1233,7 +1295,7 @@ def pidof(name): class StringIOmtime(StringIO.StringIO): - def __init__(self, buf = ''): + def __init__(self, buf=''): StringIO.StringIO.__init__(self, buf) self.mtime = time.time()