From 4d67defe979b409f82bbee2168bfe90ffc184867 Mon Sep 17 00:00:00 2001 From: Stephen Soltesz Date: Fri, 19 Dec 2008 22:08:13 +0000 Subject: [PATCH] modified findbad and findbadpcu to use scanapi. need to combine these files. controllers.py references scanapi. --- findbad.py | 309 +----------------- findbadpcu.py | 277 +--------------- monitor/scanapi.py | 3 +- web/MonitorWeb/dev.cfg | 3 +- web/MonitorWeb/monitorweb/controllers.py | 6 +- .../monitorweb/templates/pculist.kid | 2 - 6 files changed, 18 insertions(+), 582 deletions(-) diff --git a/findbad.py b/findbad.py index 4d1beed..4c73e45 100755 --- a/findbad.py +++ b/findbad.py @@ -16,6 +16,7 @@ from monitor.database.info.model import FindbadNodeRecordSync, FindbadNodeRecord from monitor.sources import comon from monitor.wrapper import plc, plccache +from monitor.scanapi import * from nodequery import verify,query_to_dict,node_select import traceback @@ -36,314 +37,11 @@ round = 1 global_round = round count = 0 -def collectNMAP(nodename, cohash): - #### RUN NMAP ############################### - values = {} - nmap = util.command.CMD() - print "nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename - (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename) - # NOTE: an empty / error value for oval, will still work. - (values['port_status'], continue_probe) = nmap_port_status(oval) - - values['date_checked'] = datetime.now() - - return (nodename, values) - -def collectPingAndSSH(nodename, cohash): - ### RUN PING ###################### - ping = command.CMD() - (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename) - - try: - values = {} - - if oval == "": - # An error occurred - values['ping_status'] = False - else: - values['ping_status'] = True - - try: - for port in [22, 806]: - ssh = command.SSH('root', nodename, port) - - (oval, errval) = ssh.run_noexcept2(""" <<\EOF - echo "{" - echo ' "kernel_version":"'`uname -a`'",' - echo ' "bmlog":"'`ls /tmp/bm.log`'",' - echo ' "bootcd_version":"'`cat /mnt/cdrom/bootme/ID`'",' - echo ' "nm_status":"'`ps ax | grep nm.py | grep -v grep`'",' - echo ' "fs_status":"'`touch /var/log/monitor 2>&1`'",' - echo ' "dns_status":"'`host boot.planet-lab.org 2>&1`'",' - echo ' "princeton_comon_dir":"'`ls -d /vservers/princeton_comon`'",' - - ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'` - echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",' - echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",' - echo "}" -EOF """) - - values['ssh_error'] = errval - if len(oval) > 0: - #print "OVAL: %s" % oval - values.update(eval(oval)) - values['ssh_portused'] = port - break - else: - values.update({'kernel_version': "", 'bmlog' : "", 'bootcd_version' : '', - 'nm_status' : '', - 'fs_status' : '', - 'dns_status' : '', - 'princeton_comon_dir' : "", - 'princeton_comon_running' : "", - 'princeton_comon_procs' : "", 'ssh_portused' : None}) - except: - print traceback.print_exc() - sys.exit(1) - - ### RUN SSH ###################### - b_getbootcd_id = True - #ssh = command.SSH('root', nodename) - #oval = "" - #errval = "" - #(oval, errval) = ssh.run_noexcept('echo `uname -a ; ls /tmp/bm.log`') - - oval = values['kernel_version'] - if "2.6.17" in oval or "2.6.2" in oval: - values['ssh_status'] = True - values['observed_category'] = 'PROD' - if "bm.log" in values['bmlog']: - values['observed_status'] = 'DEBUG' - else: - values['observed_status'] = 'BOOT' - elif "2.6.12" in oval or "2.6.10" in oval: - values['ssh_status'] = True - values['observed_category'] = 'OLDPROD' - if "bm.log" in values['bmlog']: - values['observed_status'] = 'DEBUG' - else: - values['observed_status'] = 'BOOT' - - # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot command fails. I have no idea why. - elif "2.4" in oval or "2.6.8" in oval: - b_getbootcd_id = False - values['ssh_status'] = True - values['observed_category'] = 'OLDBOOTCD' - values['observed_status'] = 'DEBUG' - elif oval != "": - values['ssh_status'] = True - values['observed_category'] = 'UNKNOWN' - if "bm.log" in values['bmlog']: - values['observed_status'] = 'DEBUG' - else: - values['observed_status'] = 'BOOT' - else: - # An error occurred. - b_getbootcd_id = False - values['ssh_status'] = False - values['observed_category'] = 'ERROR' - values['observed_status'] = 'DOWN' - val = errval.strip() - values['ssh_error'] = val - values['kernel_version'] = "" - - #values['kernel_version'] = val - - if b_getbootcd_id: - # try to get BootCD for all nodes that are not 2.4 nor inaccessible - #(oval, errval) = ssh.run_noexcept('cat /mnt/cdrom/bootme/ID') - oval = values['bootcd_version'] - if "BootCD" in oval: - values['bootcd_version'] = oval - if "v2" in oval and \ - ( nodename is not "planetlab1.cs.unc.edu" and \ - nodename is not "planetlab2.cs.unc.edu" ): - values['observed_category'] = 'OLDBOOTCD' - else: - values['bootcd_version'] = "" - else: - values['bootcd_version'] = "" - - # TODO: get bm.log for debug nodes. - # 'zcat /tmp/bm.log' - - #(oval, errval) = ssh.run_noexcept('ps ax | grep nm.py | grep -v grep') - oval = values['nm_status'] - if "nm.py" in oval: - values['nm_status'] = "Y" - else: - values['nm_status'] = "N" - - continue_slice_check = True - #(oval, errval) = ssh.run_noexcept('ls -d /vservers/princeton_comon') - oval = values['princeton_comon_dir'] - if "princeton_comon_dir" in oval: - values['princeton_comon_dir'] = True - else: - values['princeton_comon_dir'] = False - continue_slice_check = False - - if continue_slice_check: - #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; ls -d /proc/virtual/$ID') - oval = values['princeton_comon_running'] - if len(oval) > len('/proc/virtual/'): - values['princeton_comon_running'] = True - else: - values['princeton_comon_running'] = False - continue_slice_check = False - else: - values['princeton_comon_running'] = False - - if continue_slice_check: - #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; vps ax | grep $ID | grep -v grep | wc -l') - oval = values['princeton_comon_procs'] - values['princeton_comon_procs'] = int(oval) - else: - values['princeton_comon_procs'] = None - - - if nodename in cohash: - values['comon_stats'] = cohash[nodename] - else: - values['comon_stats'] = {'resptime': '-1', - 'uptime': '-1', - 'sshstatus': '-1', - 'lastcotop': '-1', - 'cpuspeed' : "null", - 'disksize' : 'null', - 'memsize' : 'null'} - # include output value - ### GET PLC NODE ###################### - plc_lock.acquire() - d_node = None - try: - d_node = plc.getNodes({'hostname': nodename}, ['pcu_ids', 'site_id', 'date_created', - 'last_updated', 'last_contact', 'boot_state', 'nodegroup_ids'])[0] - except: - traceback.print_exc() - plc_lock.release() - values['plc_node_stats'] = d_node - - ##### NMAP ################### - (n, v) = collectNMAP(nodename, None) - values.update(v) - - ### GET PLC PCU ###################### - site_id = -1 - d_pcu = None - if d_node: - pcu = d_node['pcu_ids'] - if len(pcu) > 0: - d_pcu = pcu[0] - - site_id = d_node['site_id'] - - values['plc_pcuid'] = d_pcu - - ### GET PLC SITE ###################### - plc_lock.acquire() - d_site = None - values['loginbase'] = "" - try: - d_site = plc.getSites({'site_id': site_id}, - ['max_slices', 'slice_ids', 'node_ids', 'login_base'])[0] - values['loginbase'] = d_site['login_base'] - except: - traceback.print_exc() - plc_lock.release() - - values['plc_site_stats'] = d_site - values['date_checked'] = datetime.now() - except: - print traceback.print_exc() - - return (nodename, values) - -def recordPingAndSSH(request, result): - global global_round - global count - (nodename, values) = result - - try: - if values is not None: - #fbsync = FindbadNodeRecordSync.findby_or_create(hostname="global", - # if_new_set={'round' : global_round}) - #global_round = fbsync.round - fbnodesync = FindbadNodeRecordSync.findby_or_create(hostname=nodename, - if_new_set={'round' : global_round}) - - # NOTE: This code will either add a new record for the new global_round, - # OR it will find the previous value, and update it - # with new information. - # The data that is 'lost' is not that important, b/c older - # history still exists. - fbrec = FindbadNodeRecord.findby_or_create( - round=global_round, - hostname=nodename) - - fbrec.set( **values ) - #date_checked=values['date_checked'], - #loginbase=values['loginbase'], - #kernel_version=values['kernel_version'], - #bootcd_version=values['bootcd_version'], - #nm_status=values['nm_status'], - #fs_status=values['fs_status'], - #dns_status=values['dns_status'], - #princeton_comon_dir=values['princeton_comon_dir'], - #princeton_comon_running=values['princeton_comon_running'], - #princeton_comon_procs=values['princeton_comon_procs'], - #plc_node_stats = values['plc_node_stats'], - #plc_site_stats = values['plc_site_stats'], - #plc_pcuid = values['plc_pcuid'], - #comon_stats = values['comon_stats'], - #ping_status = values['ping_status'], - #ssh_portused = values['ssh_portused'], - #ssh_status = values['ssh_status'], - #ssh_error = values['ssh_error'], - #observed_status = values['observed_status'], - #observed_category = values['observed_category']) - - #for v in before.keys(): - # if before[v] == after[v]: - # print "SAME FOR KEY %s" % v - # print "%s : %s\t%s" % ( v, before[v], after[v] ) - - fbrec.flush() - fbnodesync.round = global_round - fbnodesync.flush() - #fbsync.flush() - - count += 1 - print "%d %s %s" % (count, nodename, values) - except: - print "ERROR:" - print traceback.print_exc() - # this will be called when an exception occurs within a thread def handle_exception(request, result): print "Exception occured in request %s" % request.requestID for i in result: print "Result: %s" % i - -def externalprobe(hostname): - try: - (nodename, values) = collectNMAP(hostname, {}) - recordPingAndSSH(None, (nodename, values)) - session.flush() - return True - except: - print traceback.print_exc() - return False - -def probe(hostname): - try: - (nodename, values) = collectPingAndSSH(hostname, {}) - recordPingAndSSH(None, (nodename, values)) - session.flush() - return True - except: - print traceback.print_exc() - return False def checkAndRecordState(l_nodes, cohash): @@ -351,6 +49,7 @@ def checkAndRecordState(l_nodes, cohash): global count tp = threadpool.ThreadPool(20) + scannode = ScanNodeInternal(global_round) # CREATE all the work requests for nodename in l_nodes: @@ -361,8 +60,8 @@ def checkAndRecordState(l_nodes, cohash): if node_round < global_round or config.force: # recreate node stats when refreshed #print "%s" % nodename - req = threadpool.WorkRequest(collectPingAndSSH, [nodename, cohash], {}, - None, recordPingAndSSH, handle_exception) + req = threadpool.WorkRequest(scannode.collectInternal, [nodename, cohash], {}, + None, scannode.record, handle_exception) tp.putRequest(req) else: # We just skip it, since it's "up to date" diff --git a/findbadpcu.py b/findbadpcu.py index 0d06d1e..9feb62d 100755 --- a/findbadpcu.py +++ b/findbadpcu.py @@ -21,285 +21,25 @@ from monitor import util from monitor.wrapper import plc, plccache from nodequery import pcu_select from nodecommon import nmap_port_status +from monitor.scanapi import * plc_lock = threading.Lock() global_round = 1 errorState = {} count = 0 -def get_pcu(pcuname): - plc_lock.acquire() - try: - #print "GetPCU from PLC %s" % pcuname - l_pcu = plc.GetPCUs({'pcu_id' : pcuname}) - #print l_pcu - if len(l_pcu) > 0: - l_pcu = l_pcu[0] - except: - try: - #print "GetPCU from file %s" % pcuname - l_pcus = plccache.l_pcus - for i in l_pcus: - if i['pcu_id'] == pcuname: - l_pcu = i - except: - traceback.print_exc() - l_pcu = None - - plc_lock.release() - return l_pcu - -def get_nodes(node_ids): - plc_lock.acquire() - l_node = [] - try: - l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports']) - except: - try: - plc_nodes = plccache.l_plcnodes - for n in plc_nodes: - if n['node_id'] in node_ids: - l_node.append(n) - except: - traceback.print_exc() - l_node = None - - plc_lock.release() - if l_node == []: - l_node = None - return l_node - - -def get_plc_pcu_values(pcuname): - """ - Try to contact PLC to get the PCU info. - If that fails, try a backup copy from the last run. - If that fails, return None - """ - values = {} - - l_pcu = get_pcu(pcuname) - - if l_pcu is not None: - site_id = l_pcu['site_id'] - node_ids = l_pcu['node_ids'] - l_node = get_nodes(node_ids) - - if l_node is not None: - for node in l_node: - values[node['hostname']] = node['ports'][0] - - values['nodenames'] = [node['hostname'] for node in l_node] - - # NOTE: this is for a dry run later. It doesn't matter which node. - values['node_id'] = l_node[0]['node_id'] - - values.update(l_pcu) - else: - values = None - - return values - -def get_plc_site_values(site_id): - ### GET PLC SITE ###################### - plc_lock.acquire() - values = {} - d_site = None - - try: - d_site = plc.getSites({'site_id': site_id}, ['max_slices', 'slice_ids', 'node_ids', 'login_base']) - if len(d_site) > 0: - d_site = d_site[0] - except: - try: - plc_sites = plccache.l_plcsites - for site in plc_sites: - if site['site_id'] == site_id: - d_site = site - break - except: - traceback.print_exc() - values = None - - plc_lock.release() - - if d_site is not None: - max_slices = d_site['max_slices'] - num_slices = len(d_site['slice_ids']) - num_nodes = len(d_site['node_ids']) - loginbase = d_site['login_base'] - values['plcsite'] = {'num_nodes' : num_nodes, - 'max_slices' : max_slices, - 'num_slices' : num_slices, - 'login_base' : loginbase, - 'status' : 'SUCCESS'} - else: - values = None - - - return values - - -def collectPingAndSSH(pcuname, cohash): - - continue_probe = True - errors = None - values = {'reboot' : 'novalue'} - ### GET PCU ###################### - try: - b_except = False - try: - v = get_plc_pcu_values(pcuname) - if v['hostname'] is not None: v['hostname'] = v['hostname'].strip() - if v['ip'] is not None: v['ip'] = v['ip'].strip() - - if v is not None: - values['plc_pcu_stats'] = v - else: - continue_probe = False - except: - b_except = True - traceback.print_exc() - continue_probe = False - - if b_except or not continue_probe: return (None, None, None) - - #### RUN NMAP ############################### - if continue_probe: - nmap = util.command.CMD() - print "nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats']) - (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats'])) - # NOTE: an empty / error value for oval, will still work. - (values['port_status'], continue_probe) = nmap_port_status(oval) - else: - values['port_status'] = None - - #### COMPLETE ENTRY ####################### - - values['entry_complete'] = [] - #if values['protocol'] is None or values['protocol'] is "": - # values['entry_complete'] += ["protocol"] - if values['plc_pcu_stats']['model'] is None or values['plc_pcu_stats']['model'] is "": - values['entry_complete'] += ["model"] - # Cannot continue due to this condition - continue_probe = False - - if values['plc_pcu_stats']['password'] is None or values['plc_pcu_stats']['password'] is "": - values['entry_complete'] += ["password"] - # Cannot continue due to this condition - continue_probe = False - - if len(values['entry_complete']) > 0: - continue_probe = False - - if values['plc_pcu_stats']['hostname'] is None or values['plc_pcu_stats']['hostname'] is "": - values['entry_complete'] += ["hostname"] - if values['plc_pcu_stats']['ip'] is None or values['plc_pcu_stats']['ip'] is "": - values['entry_complete'] += ["ip"] - - # If there are no nodes associated with this PCU, then we cannot continue. - if len(values['plc_pcu_stats']['node_ids']) == 0: - continue_probe = False - values['entry_complete'] += ['nodeids'] - - - #### DNS and IP MATCH ####################### - if values['plc_pcu_stats']['hostname'] is not None and values['plc_pcu_stats']['hostname'] is not "" and \ - values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "": - #print "Calling socket.gethostbyname(%s)" % values['hostname'] - try: - ipaddr = socket.gethostbyname(values['plc_pcu_stats']['hostname']) - if ipaddr == values['plc_pcu_stats']['ip']: - values['dns_status'] = "DNS-OK" - else: - values['dns_status'] = "DNS-MISMATCH" - continue_probe = False - - except Exception, err: - values['dns_status'] = "DNS-NOENTRY" - values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip'] - #print err - else: - if values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "": - values['dns_status'] = "NOHOSTNAME" - values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip'] - else: - values['dns_status'] = "NO-DNS-OR-IP" - values['plc_pcu_stats']['hostname'] = "No_entry_in_DB" - continue_probe = False - - - ###### DRY RUN ############################ - if 'node_ids' in values['plc_pcu_stats'] and len(values['plc_pcu_stats']['node_ids']) > 0: - rb_ret = reboot.reboot_test_new(values['plc_pcu_stats']['nodenames'][0], - values, 1, True) - else: - rb_ret = "Not_Run" # No nodes to test" - - values['reboot'] = rb_ret - - except: - print "____________________________________" - print values - errors = values - print "____________________________________" - errors['traceback'] = traceback.format_exc() - print errors['traceback'] - values['reboot'] = errors['traceback'] - - values['date_checked'] = time.time() - return (pcuname, values, errors) - -def recordPingAndSSH(request, result): - global errorState - global count - global global_round - (nodename, values, errors) = result - - if values is not None: - pcu_id = int(nodename) - #fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0, - # if_new_set={'round': global_round}) - #global_round = fbsync.round - fbnodesync = FindbadPCURecordSync.findby_or_create(plc_pcuid=pcu_id, - if_new_set={'round' : global_round}) - - fbrec = FindbadPCURecord( - date_checked=datetime.fromtimestamp(values['date_checked']), - round=global_round, - plc_pcuid=pcu_id, - plc_pcu_stats=values['plc_pcu_stats'], - dns_status=values['dns_status'], - port_status=values['port_status'], - entry_complete=" ".join(values['entry_complete']), - reboot_trial_status="%s" % values['reboot'], - ) - fbnodesync.round = global_round - - fbnodesync.flush() - #fbsync.flush() - fbrec.flush() - - count += 1 - print "%d %s %s" % (count, nodename, values) - - if errors is not None: - pcu_id = "id_%s" % nodename - errorState[pcu_id] = errors - database.dbDump("findbadpcu_errors", errorState) - # this will be called when an exception occurs within a thread def handle_exception(request, result): print "Exception occured in request %s" % request.requestID for i in result: print "Result: %s" % i - -def checkAndRecordState(l_pcus, cohash): +def checkPCUs(l_pcus, cohash): global global_round global count tp = threadpool.ThreadPool(10) + scanpcu = ScanPCU(global_round) # CREATE all the work requests for pcuname in l_pcus: @@ -311,8 +51,8 @@ def checkAndRecordState(l_pcus, cohash): if node_round < global_round or config.force: # recreate node stats when refreshed #print "%s" % nodename - req = threadpool.WorkRequest(collectPingAndSSH, [pcuname, cohash], {}, - None, recordPingAndSSH, handle_exception) + req = threadpool.WorkRequest(scanpcu.collectInternal, [int(pcuname), cohash], {}, + None, scanpcu.record, handle_exception) tp.putRequest(req) else: # We just skip it, since it's "up to date" @@ -344,15 +84,14 @@ def checkAndRecordState(l_pcus, cohash): def main(): global global_round - # monitor.database.if_cached_else_refresh(1, config.refresh, "pculist", lambda : plc.GetPCUs()) l_pcus = plccache.l_pcus cohash = {} - fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0, if_new_set={'round' : global_round}) + fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0, + if_new_set={'round' : global_round}) global_round = fbsync.round - if config.site is not None: api = plc.getAuthAPI() site = api.GetSites(config.site) @@ -382,7 +121,7 @@ def main(): # update global round number to force refreshes across all nodes global_round += 1 - checkAndRecordState(l_pcus, cohash) + checkPCUs(l_pcus, cohash) if config.increment: # update global round number to force refreshes across all nodes diff --git a/monitor/scanapi.py b/monitor/scanapi.py index 3e95ef2..4a00eef 100644 --- a/monitor/scanapi.py +++ b/monitor/scanapi.py @@ -342,7 +342,7 @@ class ScanNodeInternal(ScanInterface): values['plc_node_stats'] = d_node ##### NMAP ################### - (n, v) = collectNMAP(nodename, None) + (n, v) = self.collectNMAP(nodename, None) values.update(v) ### GET PLC PCU ###################### @@ -376,7 +376,6 @@ class ScanNodeInternal(ScanInterface): return (nodename, values) - def internalprobe(hostname): fbsync = FindbadNodeRecordSync.findby_or_create(hostname="global", if_new_set={'round' : 1}) diff --git a/web/MonitorWeb/dev.cfg b/web/MonitorWeb/dev.cfg index d5915c7..8743983 100644 --- a/web/MonitorWeb/dev.cfg +++ b/web/MonitorWeb/dev.cfg @@ -26,7 +26,8 @@ sqlalchemy.dburi="sqlite:///devdata.sqlite" # Enable the debug output at the end on pages. # log_debug_info_filter.on = False -server.environment="development" +#server.environment="development" +server.environment="production" autoreload.package="monitorweb" diff --git a/web/MonitorWeb/monitorweb/controllers.py b/web/MonitorWeb/monitorweb/controllers.py index a3e3021..0cb05d4 100644 --- a/web/MonitorWeb/monitorweb/controllers.py +++ b/web/MonitorWeb/monitorweb/controllers.py @@ -19,7 +19,7 @@ from monitor.wrapper.plccache import plcdb_lb2hn as site_lb2hn from monitorweb.templates.links import * -import findbad +from monitor import scanapi def query_to_dict(query): @@ -251,10 +251,10 @@ class Root(controllers.RootController): flash("Reboot appeared to work. All at most 5 minutes. Run ExternalScan to check current status.") elif action == "ExternalScan": - findbad.externalprobe(str(hostname)) + scanapi.externalprobe(str(hostname)) flash("External Scan Successful!") elif action == "InternalScan": - findbad.probe(str(hostname)) + scanapi.internalprobe(str(hostname)) flash("Internal Scan Successful!") else: # unknown action diff --git a/web/MonitorWeb/monitorweb/templates/pculist.kid b/web/MonitorWeb/monitorweb/templates/pculist.kid index f6043dd..c06717d 100644 --- a/web/MonitorWeb/monitorweb/templates/pculist.kid +++ b/web/MonitorWeb/monitorweb/templates/pculist.kid @@ -29,7 +29,6 @@ from links import * Site PCU Name - Config Port Status Test Results Model @@ -53,7 +52,6 @@ from links import * - 80 -- 2.43.0