# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright (c) 2005, 2007 XenSource Ltd.
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
+# Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
#
# To add new entries to the bugtool, you need to:
OPENVSWITCH_LOG_DIR = '@LOGDIR@/'
OPENVSWITCH_DEFAULT_SWITCH = '/etc/default/openvswitch-switch' # Debian
OPENVSWITCH_SYSCONFIG_SWITCH = '/etc/sysconfig/openvswitch' # RHEL
-OPENVSWITCH_DEFAULT_CONTROLLER = '/etc/default/openvswitch-controller'
OPENVSWITCH_CONF_DB = '@DBDIR@/conf.db'
+OPENVSWITCH_COMPACT_DB = '@DBDIR@/bugtool-compact-conf.db'
OPENVSWITCH_VSWITCHD_PID = '@RUNDIR@/ovs-vswitchd.pid'
VAR_LOG_DIR = '/var/log/'
VAR_LOG_CORE_DIR = '/var/log/core'
CAP_NETWORK_CONFIG = 'network-config'
CAP_NETWORK_INFO = 'network-info'
CAP_NETWORK_STATUS = 'network-status'
-CAP_OPENVSWITCH_LOGS = 'ovs-system-logs'
+CAP_OPENVSWITCH_LOGS = 'ovs-system-logs'
CAP_PROCESS_LIST = 'process-list'
CAP_SYSTEM_LOGS = 'system-logs'
CAP_SYSTEM_SERVICES = 'system-services'
cap_sizes = {}
unlimited_data = False
dbg = False
-# Default value for the number of rotated logs.
+# Default value for the number of days to collect logs.
log_days = 20
+log_last_mod_time = None
+free_disk_space = None
def cap(key, pii=PII_MAYBE, min_size=-1, max_size=-1, min_time=-1,
max_time=-1, mime=MIME_TEXT, checked=True, hidden=False):
max_time=5)
cap(CAP_DISK_INFO, PII_MAYBE, max_size=50*KB,
max_time=20)
-cap(CAP_HARDWARE_INFO, PII_MAYBE, max_size=30*KB,
+cap(CAP_HARDWARE_INFO, PII_MAYBE, max_size=2*MB,
max_time=20)
cap(CAP_KERNEL_INFO, PII_MAYBE, max_size=120*KB,
max_time=5)
cap(CAP_MULTIPATH, PII_MAYBE, max_size=20*KB,
max_time=10)
cap(CAP_NETWORK_CONFIG, PII_IF_CUSTOMIZED,
- min_size=0, max_size=40*KB)
+ min_size=0, max_size=5*MB)
cap(CAP_NETWORK_INFO, PII_YES, max_size=50*MB,
max_time=30)
cap(CAP_NETWORK_STATUS, PII_YES, max_size=-1,
data[label] = {'cap': cap, 'cmd_args': args, 'filter': filter,
'binary': binary}
-def file_output(cap, path_list, newest_first=False):
+
+def file_output(cap, path_list, newest_first=False, last_mod_time=None):
"""
If newest_first is True, the list of files in path_list is sorted
by file modification time in descending order, else its sorted
s = os.stat(path)
except OSError, e:
continue
- path_entries.append((path, s))
+ if last_mod_time is None or s.st_mtime >= last_mod_time:
+ path_entries.append((path, s))
mtime = lambda(path, stat): stat.st_mtime
path_entries.sort(key=mtime, reverse=newest_first)
for p in path_entries:
- if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
- cap_sizes[cap] < caps[cap][MAX_SIZE]:
+ if check_space(cap, p[0], p[1].st_size):
data[p] = {'cap': cap, 'filename': p[0]}
- cap_sizes[cap] += p[1].st_size
- else:
- output("Omitting %s, size constraint of %s exceeded" % (p[0], cap))
-def tree_output(cap, path, pattern=None, negate=False, newest_first=False):
+
+def tree_output(cap, path, pattern=None, negate=False, newest_first=False,
+ last_mod_time=None):
"""
Walks the directory tree rooted at path. Files in current dir are processed
before files in sub-dirs.
for root, dirs, files in os.walk(path):
fns = [fn for fn in [os.path.join(root, f) for f in files]
if os.path.isfile(fn) and matches(fn, pattern, negate)]
- file_output(cap, fns, newest_first=newest_first)
+ file_output(cap, fns, newest_first=newest_first,
+ last_mod_time=last_mod_time)
+
+
+def prefix_output(cap, prefix, newest_first=False, last_mod_time=None):
+ """
+ Output files with the same prefix.
+ """
+ fns = []
+ for root, dirs, files in os.walk(os.path.dirname(prefix)):
+ fns += [fn for fn in [os.path.join(root, f) for f in files]
+ if fn.startswith(prefix)]
+ file_output(cap, fns, newest_first=newest_first,
+ last_mod_time=last_mod_time)
+
def func_output(cap, label, func):
if cap in entries:
t = str(func).split()
data[label] = {'cap': cap, 'func': func}
-def log_output(cap, logs, newest_first=False):
- global log_days
- file_output(cap, logs)
- file_output(cap,
- ['%s.%d' % (f, n) for n in range(1, log_days+1) for f in logs], \
- newest_first=newest_first)
- file_output(cap,
- ['%s.%d.gz' % (f, n) for n in range(1, log_days+1) for f in logs], \
- newest_first=newest_first)
-
def collect_data():
process_lists = {}
f = open(v['filename'], 'r')
s = f.read()
f.close()
- if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
- cap_sizes[cap] < caps[cap][MAX_SIZE]:
+ if check_space(cap, v['filename'], len(s)):
v['output'] = StringIOmtime(s)
- cap_sizes[cap] += len(s)
- else:
- output("Omitting %s, size constraint of %s exceeded" % (v['filename'], cap))
except:
pass
elif v.has_key('func'):
s = v['func'](cap)
except Exception, e:
s = str(e)
- if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
- cap_sizes[cap] < caps[cap][MAX_SIZE]:
+ if check_space(cap, k, len(s)):
v['output'] = StringIOmtime(s)
- cap_sizes[cap] += len(s)
- else:
- output("Omitting %s, size constraint of %s exceeded" % (k, cap))
run_procs(process_lists.values())
def main(argv=None):
global ANSWER_YES_TO_ALL, SILENT_MODE
- global entries, data, dbg, unlimited_data, log_days
+ global entries, data, dbg, unlimited_data, free_disk_space
+ global log_days, log_last_mod_time
# Filter flags
only_ovs_info = False
if k == '--log-days':
log_days = int(v)
+
if len(params) != 1:
print >>sys.stderr, "Invalid additional arguments", str(params)
return 2
print >>sys.stderr, "Cannot set both '--outfd' and '--outfile'"
return 2
+ if output_file is not None and not unlimited_data:
+ free_disk_space = get_free_disk_space(output_file) * 90 / 100
+
+ log_last_mod_time = int(time.time()) - log_days * 86400
+
if ANSWER_YES_TO_ALL:
output("Warning: '--yestoall' argument provided, will not prompt for individual files.")
tree_output(CAP_NETWORK_CONFIG, SYSCONFIG_NETWORK_SCRIPTS, ROUTE_RE)
file_output(CAP_NETWORK_CONFIG, [SYSCONFIG_NETWORK, RESOLV_CONF, NSSWITCH_CONF, HOSTS])
file_output(CAP_NETWORK_CONFIG, [NTP_CONF, IPTABLES_CONFIG, HOSTS_ALLOW, HOSTS_DENY])
- file_output(CAP_NETWORK_CONFIG, [OPENVSWITCH_CONF_DB])
+ file_output(CAP_NETWORK_CONFIG, [OPENVSWITCH_DEFAULT_SWITCH,
+ OPENVSWITCH_SYSCONFIG_SWITCH])
cmd_output(CAP_NETWORK_INFO, [IFCONFIG, '-a'])
cmd_output(CAP_NETWORK_INFO, [ROUTE, '-n'])
tree_output(CAP_NETWORK_INFO, PROC_NET_VLAN_DIR)
cmd_output(CAP_NETWORK_INFO, [TC, '-s', 'qdisc'])
file_output(CAP_NETWORK_INFO, [PROC_NET_SOFTNET_STAT])
+
+ collect_ovsdb()
if os.path.exists(OPENVSWITCH_VSWITCHD_PID):
cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'show', '-s'])
for d in dp_list():
system_logs = ([ VAR_LOG_DIR + x for x in
['crit.log', 'kern.log', 'daemon.log', 'user.log',
'syslog', 'messages', 'secure', 'debug', 'dmesg', 'boot']])
+ for log in system_logs:
+ prefix_output(CAP_SYSTEM_LOGS, log, last_mod_time=log_last_mod_time)
+
ovs_logs = ([ OPENVSWITCH_LOG_DIR + x for x in
['ovs-vswitchd.log', 'ovsdb-server.log',
'ovs-xapi-sync.log', 'ovs-monitor-ipsec.log', 'ovs-ctl.log']])
- log_output(CAP_SYSTEM_LOGS, system_logs)
- log_output(CAP_OPENVSWITCH_LOGS, ovs_logs)
+ for log in ovs_logs:
+ prefix_output(CAP_OPENVSWITCH_LOGS, log, last_mod_time=log_last_mod_time)
if not os.path.exists('/var/log/dmesg') and not os.path.exists('/var/log/boot'):
cmd_output(CAP_SYSTEM_LOGS, [DMESG])
for c in caps.keys():
print >>sys.stderr, " %s (%d, %d)" % (c, caps[c][MAX_SIZE],
cap_sizes[c])
+
+ cleanup_ovsdb()
return 0
def dump_scsi_hosts(cap):
return output.getvalue().splitlines()
return []
+def collect_ovsdb():
+ if not os.path.isfile(OPENVSWITCH_CONF_DB):
+ return
+
+ max_size = 10*MB
+
+ try:
+ if os.path.getsize(OPENVSWITCH_CONF_DB) > max_size:
+ if os.path.isfile(OPENVSWITCH_COMPACT_DB):
+ os.unlink(OPENVSWITCH_COMPACT_DB)
+
+ output = StringIO.StringIO()
+ max_time = 5
+ procs = [ProcOutput(['ovsdb-tool', 'compact',
+ OPENVSWITCH_CONF_DB, OPENVSWITCH_COMPACT_DB],
+ max_time, output)]
+ run_procs([procs])
+ file_output(CAP_NETWORK_STATUS, [OPENVSWITCH_COMPACT_DB])
+ else:
+ file_output(CAP_NETWORK_STATUS, [OPENVSWITCH_CONF_DB])
+ except OSError, e:
+ return
+
+def cleanup_ovsdb():
+ try:
+ if os.path.isfile(OPENVSWITCH_COMPACT_DB):
+ os.unlink(OPENVSWITCH_COMPACT_DB)
+ except:
+ return
+
def fd_usage(cap):
output = ''
fd_dict = {}
cmd_output(cap, [MPPUTIL, '-g', group])
def load_plugins(just_capabilities=False, filter=None):
+ global log_last_mod_time
def getText(nodelist):
rc = ""
for node in nodelist:
if el.tagName == "files":
newest_first = getBoolAttr(el, 'newest_first')
if el.getAttribute("type") == "logs":
- log_output(dir, getText(el.childNodes).split(),
- newest_first=newest_first)
+ for fn in getText(el.childNodes).split():
+ prefix_output(dir, fn, newest_first=newest_first,
+ last_mod_time=log_last_mod_time)
else:
file_output(dir, getText(el.childNodes).split(),
newest_first=newest_first)
if pattern == '': pattern = None
negate = getBoolAttr(el, 'negate')
newest_first = getBoolAttr(el, 'newest_first')
- tree_output(dir, getText(el.childNodes),
- pattern and re.compile(pattern) or None,
- negate=negate, newest_first=newest_first)
+ if el.getAttribute("type") == "logs":
+ tree_output(dir, getText(el.childNodes),
+ pattern and re.compile(pattern) or None,
+ negate=negate, newest_first=newest_first,
+ last_mod_time=log_last_mod_time)
+ else:
+ tree_output(dir, getText(el.childNodes),
+ pattern and re.compile(pattern) or None,
+ negate=negate, newest_first=newest_first)
elif el.tagName == "command":
label = el.getAttribute("label")
if label == '': label = None
return pids
+def check_space(cap, name, size):
+ global free_disk_space
+ if free_disk_space is not None and size > free_disk_space:
+ output("Omitting %s, out of disk space (requested: %u, allowed: %u)" %
+ (name, size, free_disk_space))
+ return False
+ elif unlimited_data or caps[cap][MAX_SIZE] == -1 or \
+ cap_sizes[cap] < caps[cap][MAX_SIZE]:
+ cap_sizes[cap] += size
+ if free_disk_space is not None:
+ free_disk_space -= size
+ return True
+ else:
+ output("Omitting %s, size constraint of %s exceeded" % (name, cap))
+ return False
+
+
+def get_free_disk_space(path):
+ path = os.path.abspath(path)
+ while not os.path.exists(path):
+ path = os.path.dirname(path)
+ s = os.statvfs(path)
+ return s.f_frsize * s.f_bfree
+
+
class StringIOmtime(StringIO.StringIO):
def __init__(self, buf=''):
StringIO.StringIO.__init__(self, buf)