Integrate the zabbix Elixir model into the monitor module.
authorStephen Soltesz <soltesz@cs.princeton.edu>
Fri, 14 Nov 2008 23:46:18 +0000 (23:46 +0000)
committerStephen Soltesz <soltesz@cs.princeton.edu>
Fri, 14 Nov 2008 23:46:18 +0000 (23:46 +0000)
Added two scripts under zabbix/ for syncing site info from PLCdb to ZABBIXdb
Added these directories to setup.py for the monitor module

monitor/database/__init__.py
monitor/database/dborm.py
monitor/database/zabbixapi/emailZabbix.py [new file with mode: 0644]
monitor/database/zabbixapi/model.py [new file with mode: 0644]
monitor/database/zabbixapi/site.py [new file with mode: 0644]
setup.py
zabbix/zabbixsite.py [new file with mode: 0755]
zabbix/zabbixsync.py [new file with mode: 0755]

index 653b346..1c90997 100644 (file)
@@ -1,2 +1 @@
 from dbpickle import *
-from dborm import *
index 3e47ea2..e663a05 100644 (file)
@@ -3,7 +3,17 @@
 import sqlalchemy
 import elixir
 import monitor.config as config
-elixir.metadata.bind = sqlalchemy.create_engine(config.databaseuri, echo=False)
-elixir.session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(autoflush=True,autocommit=True))
 
-from infovacuum.model import *
+#elixir.metadata.bind = sqlalchemy.create_engine(config.databaseuri, echo=False)
+#elixir.session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(autoflush=True,autocommit=True))
+#infovacuum_db = sqlalchemy.MetaData(bind=sqlalchemy.create_engine(config.monitor_dburi, echo=False))
+#infovacuum_session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(autoflush=True,autocommit=True))
+
+zabbix_engine = sqlalchemy.create_engine(config.zabbix_dburi, echo=config.echo)
+metadata = sqlalchemy.MetaData()
+metadata.bind = zabbix_engine
+session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(autoflush=False,autocommit=True))
+elixir.session, elixir.metadata = session, metadata
+
+#from monitor.database.infovacuum.model import *
+from monitor.database.zabbixapi.model import *
diff --git a/monitor/database/zabbixapi/emailZabbix.py b/monitor/database/zabbixapi/emailZabbix.py
new file mode 100644 (file)
index 0000000..828095c
--- /dev/null
@@ -0,0 +1,132 @@
+class mailtxt:
+       @classmethod
+       def reformat(cls, arguments={'hostname' : "your.host.name", 
+                                                                'support_email' : 'support@your.host.name'}):
+               fields = dir(cls)
+               for f in fields:
+                       #print "looking at %s" % f
+                       if "__" not in f and 'reformat' not in f:
+                               attr = getattr(cls,f)
+                               #print attr
+                               setattr(cls, f, attr % arguments)
+                               #print getattr(cls,f)
+               return
+
+       nodedown_one_subject="Server {HOSTNAME} is unreachable: First Notice"
+       nodedown_one = """
+Hello,
+
+We hope that you're having a good day.  As part of PlanetLab node monitoring, we noticed the following node is down at your site:
+
+    {HOSTNAME} : Since {EVENT.AGE}
+
+We're writing because we need your help returning them to their regular operation.
+
+To help, please confirm that a verison 3.0 or greater BootCD is installed in the machine.  Then, after checking that the node is properly networked, power cycle the machine.  Note that rebooting the machine may not fully resolve the problems we are seeing.  Once the machine has come back up, please visit the monitor status page to verify that your node is accessible.  
+
+If the machine has booted successfully, you may check directly by logging in with your site_admin account, and running:
+
+    sudo /usr/sbin/vps ax
+
+If you have a BootCD older than 3.0, you will need to create a new BootImage on CD or USB.  You can find instructions for this at the Technical Contact's Guide:
+
+    https://www.planet-lab.org/doc/guides/bootcdsetup
+
+There is no need to respond to this message unless there are any console messages relating to the node's failure.  In this case, please report them to PlanetLab support (%(support_email)s) so we can help resolve the issue.   Including this message in your reply will help us coordinate our records with the actions you've taken.
+
+Finally, you can track the current status of your machines using this Google Gadget:
+
+    http://fusion.google.com/add?source=atgs&moduleurl=http://%(hostname)s/monitor/sitemonitor.xml
+
+Thank you for your help,
+  -- PlanetLab Central (%(support_email)s)
+"""
+
+       nodedown_two_subject="Server {HOSTNAME} is unreachable: Second Notice"
+       nodedown_two = """
+Hello, 
+
+We hope that you're having a good day.  As part of PlanetLab node monitoring, we noticed the following node is down at your site:
+
+    {HOSTNAME} : Since {EVENT.AGE}
+
+We're writing again because our previous correspondence, sent only to the registered Technical Contact, has gone unacknowledged for at least a week, and we need your help returning these machines to their regular operation.  We understand that machine maintenance can take time.  So, while we wait for the machines to return to their regular operation slice creation has been suspended at your site.  No new slices may be created, but the existing slices and services running within them will be unaffected.
+
+To help, please confirm that a verison 3.0 or greater BootCD is installed in the machine.  Then, after checking that the node is properly networked, power cycle the machine.  Note that rebooting the machine may not fully resolve the problems we are seeing.  Once the machine has come back up, please visit the monitor status page to verify that your node is accessible.  
+
+If the machine has booted successfully, you may check directly by logging in with your site_admin account, and running:
+
+    sudo /usr/sbin/vps ax                                                                                  
+If you have a BootCD older than 3.0, you will need to create a new BootImage on CD or USB.  You can find instructions for this at the Technical Contact's Guide:
+
+    https://www.planet-lab.org/doc/guides/bootcdsetup
+
+If after following these directions, you are still experiencing problems, then you can acknowledge this notice by visiting, and letting us know what the problem is at mailto:%(support_email)s 
+
+    http://%(hostname)s/zabbix/acknow.php?eventid={EVENT.ID}
+    http://%(hostname)s/zabbix/tr_events.php?triggerid={TRIGGER.ID}&eventid={EVENT.ID}
+
+After another week, we will disable all slices currently running on PlanetLab.  Because this action will directly affect all users of these slices, these users will also be notified at that time.
+
+Thank you for your help,
+  -- PlanetLab Central (%(support_email)s)
+
+
+"""
+
+       nodedown_three_subject="Server {HOSTNAME} is unreachable: Third Notice"
+       nodedown_three ="""
+Hello,
+
+We hope that you're having a good day.  As part of PlanetLab node monitoring, we noticed the following node is down at your site:
+
+    {HOSTNAME} : Since {EVENT.AGE}
+
+We understand that machine maintenance can take time.  We're writing again because our previous correspondences, sent first to the registered Technical Contact then the the Site PI, have gone unacknowledged for at least two weeks, and we need your help returning these machines to their regular operation.  This is the third time attempting to contact someone in regard to these machines at your site.  So, while we wait for the machines to return to their regular operation all current slice activity will be suspended.  Current experiments will be stopped and will not be be able to start again until there is evidence that you have begun to help with the maintenance of these machines.  
+
+To help, please confirm that a verison 3.0 or greater BootCD is installed in the machine.  Then, after checking that the node is properly networked, power cycle the machine.  Note that rebooting the machine may not fully resolve the problems we are seeing.  Once the machine has come back up, please visit the monitor status page to verify that your node is accessible.
+
+If the machine has booted successfully, you may check directly by logging in with your site_admin account, and running:
+
+       sudo /usr/sbin/vps ax
+
+If you have a BootCD older than 3.0, you will need to create a new BootImage on CD or USB.  You can find instructions for this at the Technical Contact's Guide:
+
+       https://www.planet-lab.org/doc/guides/bootcdsetup
+
+If after following these directions, you are still experiencing problems, then you can acknowledge this notice by visiting, and letting us know what the problem is at mailto:%(support_email)s
+
+       http://%(hostname)s/zabbix/acknow.php?eventid={EVENT.ID}
+    http://%(hostname)s/zabbix/tr_events.php?triggerid={TRIGGER.ID}&eventid={EVENT.ID}
+
+Thank you for your help,
+       -- PlanetLab Central (%(support_email)s)
+       """
+       nodedown_four_subject="Server {HOSTNAME} is unreachable: Waiting Forever"
+       nodedown_four=""" 
+Hello,
+
+We hope that you're having a good day.  As part of PlanetLab node monitoring, we noticed the following node is down at your site:
+
+    {HOSTNAME} : Since {EVENT.AGE}
+
+We have not heard a response from you regarding this machine.  We will continue sending message until we receive an acknowledgment and description of the issue prevening the node from remaining online.
+
+You can acknowledge this notice by visiting the link below or by letting us know what the problem is by replying to this message.
+
+       http://%(hostname)s/zabbix/acknow.php?eventid={EVENT.ID}
+    http://%(hostname)s/zabbix/tr_events.php?triggerid={TRIGGER.ID}&eventid={EVENT.ID}
+
+Thank you for your help,
+       -- PlanetLab Central (%(support_email)s)
+"""
+       thankyou_nodeup = """
+While monitoring your site, we noticed that the following nodes *improved* their states:
+
+    {HOSTNAME} : Available
+
+Often, system administration is a thankless job, but not today. :-)
+
+Thank you!
+  -- PlanetLab Central (%(support_email)s)
+"""
diff --git a/monitor/database/zabbixapi/model.py b/monitor/database/zabbixapi/model.py
new file mode 100644 (file)
index 0000000..45fbd75
--- /dev/null
@@ -0,0 +1,762 @@
+import pkg_resources
+pkg_resources.require("SQLAlchemy>=0.3.10")
+pkg_resources.require("Elixir>=0.4.0")
+# import the basic Elixir classes and functions for declaring the data model
+# (see http://elixir.ematia.de/trac/wiki/TutorialDivingIn)
+from elixir import EntityMeta, Entity, Field, OneToMany, ManyToOne, ManyToMany
+from elixir import options_defaults, using_options, setup_all, metadata, entities
+# import some datatypes for table columns from Elixir
+# (see http://www.sqlalchemy.org/docs/04/types.html for more)
+from elixir import String, Unicode, Integer, DateTime
+from sqlalchemy import ColumnDefault
+from sqlalchemy import Table
+from sqlalchemy.orm import ColumnProperty, object_session
+
+from xml.marshal.generic import Marshaller
+from xml.dom.ext import PrettyPrint
+from xml.dom.ext.reader.Sax import FromXml
+from elementtree import ElementTree
+
+options_defaults['autosetup'] = False
+
+from elixir.statements import Statement
+from sqlalchemy import Sequence
+
+import defines
+
+
+#from elixir import metadata
+#from monitor.database.dborm import zabbix_db, zabbix_session
+#__metadata__ = zabbix_db
+#__session__  = zabbix_session
+
+# TODO:
+#   - declare association between Media and MediaType so that look ups can
+#      occur on 'description'
+
+class ZabbixSerialize(object):
+
+       @classmethod
+       def xmlDeserialize(cls, xml):
+               d = cls.xml2dict(xml)
+               return cls.dict2object(d)
+
+       def xmlSerialize(self, elem=None):
+               dict = self.convert_dict(self.to_dict())
+
+               if hasattr(self, 'deepcopy'):
+                       for val in self.deepcopy:
+                               dict[val] = getattr(self, val)
+
+               skip_keys = [self._descriptor.auto_primarykey]
+               if hasattr(self, 'skip_keys'):
+                       skip_keys += self.skip_keys
+
+               return self.xmlMessage(dict, skip_keys, elem)
+
+       @classmethod
+       def xmlMessage(cls, dict=None, skip_keys=[], use_elem=None):
+
+               elem = ElementTree.Element(cls.classname())
+
+               if isinstance(dict, type({})):
+                       for key, value in dict.items():
+                               if key in skip_keys:
+                                       continue
+
+                               if isinstance(value, type(0)):
+                                       ElementTree.SubElement(elem, key, type="int").text = str(value)
+
+                               elif isinstance(value, type(0L)):
+                                       ElementTree.SubElement(elem, key, type="long").text = str(value)
+
+                               elif isinstance(value, type([])):
+                                       if len(value) > 0:
+                                               e = ElementTree.SubElement(elem, key, type="list") 
+                                               for obj in value:
+                                                       d = obj.convert_dict(obj.to_dict())
+                                                       obj.xmlSerialize(e) 
+                               else:
+                                       ElementTree.SubElement(elem, key).text = value
+
+               elif isinstance(dict, type([])):
+                       if len(dict) > 0:
+                               o = dict[0]
+                               key = "%s_list" % o.__class__.__name__.lower()
+                               e = ElementTree.SubElement(elem, key, type="list") 
+                               for obj in dict:
+                                       d = obj.convert_dict(obj.to_dict())
+                                       obj.xmlSerialize(e) 
+
+               if use_elem is not None:
+                       use_elem.append(elem)
+                               
+               return ElementTree.tostring(elem)
+
+       @classmethod
+       def xml2dict(cls, message, elem=None):
+               em = get_zabbix_entitymap()
+
+               if message and elem is None:
+                       elem = ElementTree.XML(message)
+               elif elem is None:
+                       raise Exception("Cannot proceed with empty xml, and no elem")
+
+               #print "tag: %s : classname : %s" % (elem.tag, cls.classname())
+               if cls is not ZabbixSerialize:
+                       assert elem.tag == cls.classname()
+               dict = {}
+               for elem in elem:
+                       if elem.get("type") == "int":
+                               dict[elem.tag] = int(elem.text)
+                       elif elem.get("type") == "long":
+                               dict[elem.tag] = long(elem.text)
+                       elif elem.get("type") == "list":
+                               if cls is not ZabbixSerialize:
+                                       assert elem.tag in cls.deepcopy, "List (%s) in XML is not a recognized type for this object (%s)" % (elem.tag, cls.classname())
+                               dict[elem.tag] = []
+                               for e in elem:
+                                       dict[elem.tag].append( em[e.tag].xml2dict(None, e) )
+                       elif elem.text is None:
+                               dict[elem.tag] = ""
+                       else:
+                               dict[elem.tag] = elem.text
+               return dict
+
+       @classmethod
+       def dict2object(cls, dict):
+               em = get_zabbix_entitymap()
+               if cls is ZabbixSerialize:
+                       # note: assume that there's only one type of class
+                       retdict = {}
+                       for key in dict.keys():
+                               clsobj = get_zabbix_class_from_name(key)
+                               retdict[key] = [ clsobj.dict2object(data) for data in dict[key] ]
+                       return retdict
+
+               # take deepcopy values out of dict.
+               backup = {}
+               if hasattr(cls, 'deepcopy'):
+                       for val in cls.deepcopy:
+                               if val in dict:
+                                       backup[val] = dict[val]
+                                       del dict[val]
+
+               # instantiate them
+               # for each deepcopy object, convert all values in list
+               for k in backup.keys():
+                       clsobj = get_zabbix_class_from_name(k)
+                       l = [ clsobj.dict2object(data) for data in backup[k] ]
+                       backup[k] = l
+
+               # find or create the primary object
+               obj = cls.find_or_create(**dict)
+               #if cls is DiscoveryCheck or \
+               #       cls is ActionCondition or \
+               #       cls is ActionOperation:
+               #       # NOTE: Some objects should always be created. like DiscoveryCheck
+               #       obj = None
+               #else:
+               #       obj = cls.get_by(**dict)
+#
+#              if obj is None:
+#                      print "CREATING NEW %s" % cls.classname()
+#                      obj = cls(**dict)
+#              else:
+#                      print "FOUND EXISTING OBJECT: %s"% obj
+
+               # add deepcopy values to primary object
+               for k in backup.keys():
+                       print type(backup[k][0])
+
+                       if isinstance(obj, User) and isinstance(backup[k][0], UsrGrp):
+                               print "adding groups to user"
+                               for g in backup[k]:
+                                       obj.append_group(g)
+
+                       elif isinstance(obj, User) and isinstance(backup[k][0], Media):
+                               print "adding media to user"
+                               for g in backup[k]:
+                                       obj.media_list.append(g)
+
+                       elif isinstance(obj, UsrGrp) and isinstance(backup[k][0], HostGroup):
+                               print "adding hostgroup to usergroup"
+                               print "NOT IMPLEMENTED!!!"
+                               for g in backup[k]:
+                                       obj.append_hostgroup(g)
+                                       pass
+
+                       elif isinstance(obj, Action) and isinstance(backup[k][0], ActionCondition):
+                               print "adding actionconditon to action"
+                               for g in backup[k]:
+                                       obj.actioncondition_list.append(g)
+
+                       elif isinstance(obj, Action) and isinstance(backup[k][0], ActionOperation):
+                               print "adding actionoperation to action"
+                               for g in backup[k]:
+                                       obj.actionoperation_list.append(g)
+
+                       elif isinstance(obj, ActionOperation) and \
+                                isinstance(backup[k][0], OperationCondition):
+                               print "adding operationcondition to actionoperation"
+                               for g in backup[k]:
+                                       obj.operationcondition_list.append(g)
+
+                       elif isinstance(obj, DiscoveryRule) and isinstance(backup[k][0], DiscoveryCheck):
+                               print "adding discoverycheck to discoveryrule"
+                               for v in backup[k]:
+                                       obj.discoverycheck_list.append(v)
+
+               return obj
+
+       def convert_dict(self, d):
+               rd = {}
+               for key in d.keys():
+                       if type(d[key]) == type([]):
+                               rd[str(key)] = [ self.convert_dict(v) for v in d[key] ]
+                       else:
+                               rd[str(key)] = d[key]
+               return rd
+
+       @classmethod
+       def classname(cls):
+               return cls.__name__
+
+       def prettyserialize(self):
+               xml = self.xmlSerialize()
+               d = FromXml(xml)
+               PrettyPrint(d)
+       
+class ZabbixEntity(ZabbixSerialize):
+       __metaclass__ = EntityMeta
+
+       def __init__(self, **kwargs):
+               print "__INIT__ %s" % self.classname()
+               tablename = self._descriptor.tablename
+               fieldname = self._descriptor.auto_primarykey
+               index = IDs.get_by(table_name=tablename, field_name=fieldname)
+               if not index:
+                       index = IDs(table_name=tablename, field_name=fieldname, nodeid=0, nextid=10)
+                       index.flush()
+               index.nextid = index.nextid + 1
+               kwargs[fieldname] = index.nextid
+               self.set(**kwargs)
+
+       def __repr__(self):
+               rd = {}
+               if hasattr(self, 'deepcopy'):
+                       for k in self.deepcopy:
+                               rd[k] = [ str(v) for v in getattr(self, k) ]
+
+               rd.update(self.to_dict())
+               val = ""
+               for k in rd.keys():
+                       val += k
+                       val += "="
+                       val += str(rd[k])
+                       val += ", "
+               return self.classname() + "(" + val + ")"
+
+       @classmethod
+       def classname(cls):
+               return cls.__name__
+
+       def set(self, **kwargs):
+               for key, value in kwargs.iteritems():
+                       setattr(self, key, value)
+       
+       @classmethod
+       def find_or_create(cls, exec_if_new=None, set_if_new={}, **kwargs):
+               if cls is DiscoveryCheck or cls is ActionCondition or \
+                       cls is ActionOperation:
+                       # NOTE: Some objects should always be created. like DiscoveryCheck
+                       obj = None
+               else:
+                       # NOTE: ignore *_list items
+                       query = {}
+                       for key in kwargs:
+                               if "_list" not in key:
+                                       query[key] = kwargs[key]
+                       print "SEARCHING USING %s" % query
+                       obj = cls.get_by(**query)
+
+               if obj is None:
+                       print "CREATING NEW %s" % cls.classname()
+                       print "USING %s" % kwargs
+                       obj = cls(**kwargs)
+                       obj.set(**set_if_new)
+                       if exec_if_new:
+                               exec_if_new(obj)
+               else:
+                       print "FOUND EXISTING OBJECT: %s"% obj
+
+               return obj
+
+       def update_or_create(cls, data, surrogate=True):
+               pk_props = cls._descriptor.primary_key_properties
+
+               # if all pk are present and not None
+               if not [1 for p in pk_props if data.get(p.key) is None]:
+                       pk_tuple = tuple([data[prop.key] for prop in pk_props])
+                       record = cls.query.get(pk_tuple)
+                       if record is None:
+                               if surrogate:
+                                       raise Exception("cannot create surrogate with pk")
+                               else:
+                                       record = cls()
+               else:
+                       if surrogate:
+                               record = cls()
+                       else:
+                               raise Exception("cannot create non surrogate without pk")
+               record.from_dict(data)
+               return record
+       update_or_create = classmethod(update_or_create)
+
+       def from_dict(self, data):
+               """
+               Update a mapped class with data from a JSON-style nested dict/list
+               structure.
+               """
+               # surrogate can be guessed from autoincrement/sequence but I guess
+               # that's not 100% reliable, so we'll need an override
+
+               mapper = sqlalchemy.orm.object_mapper(self)
+
+               for key, value in data.iteritems():
+                       if isinstance(value, dict):
+                               dbvalue = getattr(self, key)
+                               rel_class = mapper.get_property(key).mapper.class_
+                               pk_props = rel_class._descriptor.primary_key_properties
+
+                               # If the data doesn't contain any pk, and the relationship
+                               # already has a value, update that record.
+                               if not [1 for p in pk_props if p.key in data] and \
+                                  dbvalue is not None:
+                                       dbvalue.from_dict(value)
+                               else:
+                                       record = rel_class.update_or_create(value)
+                                       setattr(self, key, record)
+                       elif isinstance(value, list) and \
+                                value and isinstance(value[0], dict):
+
+                               rel_class = mapper.get_property(key).mapper.class_
+                               new_attr_value = []
+                               for row in value:
+                                       if not isinstance(row, dict):
+                                               raise Exception(
+                                                               'Cannot send mixed (dict/non dict) data '
+                                                               'to list relationships in from_dict data.')
+                                       record = rel_class.update_or_create(row)
+                                       new_attr_value.append(record)
+                               setattr(self, key, new_attr_value)
+                       else:
+                               setattr(self, key, value)
+
+       def to_dict(self, deep={}, exclude=[]):
+               """Generate a JSON-style nested dict/list structure from an object."""
+               col_prop_names = [p.key for p in self.mapper.iterate_properties \
+                                                                         if isinstance(p, ColumnProperty)]
+               data = dict([(name, getattr(self, name))
+                                        for name in col_prop_names if name not in exclude])
+               for rname, rdeep in deep.iteritems():
+                       dbdata = getattr(self, rname)
+                       #FIXME: use attribute names (ie coltoprop) instead of column names
+                       fks = self.mapper.get_property(rname).remote_side
+                       exclude = [c.name for c in fks]
+                       if isinstance(dbdata, list):
+                               data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata]
+                       else:
+                               data[rname] = dbdata.to_dict(rdeep, exclude)
+               return data
+
+       # session methods
+       def flush(self, *args, **kwargs):
+               return object_session(self).flush([self], *args, **kwargs)
+
+       def delete(self, *args, **kwargs):
+               return object_session(self).delete(self, *args, **kwargs)
+
+       def expire(self, *args, **kwargs):
+               return object_session(self).expire(self, *args, **kwargs)
+
+       def refresh(self, *args, **kwargs):
+               return object_session(self).refresh(self, *args, **kwargs)
+
+       def expunge(self, *args, **kwargs):
+               return object_session(self).expunge(self, *args, **kwargs)
+
+       # This bunch of session methods, along with all the query methods below
+       # only make sense when using a global/scoped/contextual session.
+       def _global_session(self):
+               return self._descriptor.session.registry()
+       _global_session = property(_global_session)
+
+       def merge(self, *args, **kwargs):
+               return self._global_session.merge(self, *args, **kwargs)
+
+       def save(self, *args, **kwargs):
+               return self._global_session.save(self, *args, **kwargs)
+
+       def update(self, *args, **kwargs):
+               return self._global_session.update(self, *args, **kwargs)
+
+       # only exist in SA < 0.5
+       # IMO, the replacement (session.add) doesn't sound good enough to be added
+       # here. For example: "o = Order(); o.add()" is not very telling. It's
+       # better to leave it as "session.add(o)"
+       def save_or_update(self, *args, **kwargs):
+               return self._global_session.save_or_update(self, *args, **kwargs)
+
+       # query methods
+       def get_by(cls, *args, **kwargs):
+               return cls.query.filter_by(*args, **kwargs).first()
+       get_by = classmethod(get_by)
+
+       def get(cls, *args, **kwargs):
+               return cls.query.get(*args, **kwargs)
+       get = classmethod(get)
+
+class IDs(Entity):
+       using_options(
+               tablename='ids',
+               autoload=True,
+       )
+
+class Right(ZabbixEntity):
+       # rights of a usergroup to interact with hosts of a hostgroup
+       using_options(
+               tablename='rights',
+               autoload=True,
+               auto_primarykey='rightid',
+       )
+       # column groupid is an index to usrgrp.usrgrpid
+       # column id is an index into the host-groups.groupid
+       # permission is 3=rw, 2=ro, 1=r_list, 0=deny
+
+       # TODO: NOTE: When serialization occurs, the 'permissions' field is lost,
+       # currently since the rights table is merely treated as an intermediate
+       # table for the m2m between usrgrp and groups.
+
+rights = Table('rights', metadata, autoload=True)
+hostsgroups = Table('hosts_groups', metadata, autoload=True)
+
+       
+# m2m table between hosts and groups below
+class HostsGroups(ZabbixEntity):
+       using_options(
+               tablename='hosts_groups',
+               autoload=True,
+               auto_primarykey='hostgroupid',
+       )
+
+class Host(ZabbixEntity):
+       using_options(
+               tablename='hosts',
+               autoload=True,
+               auto_primarykey='hostid',
+       )
+       hostgroup_list = ManyToMany(
+               'HostGroup',
+               table=hostsgroups,
+               foreign_keys=lambda: [hostsgroups.c.groupid, hostsgroups.c.hostid],
+               primaryjoin=lambda: Host.hostid==hostsgroups.c.hostid,
+               secondaryjoin=lambda: HostGroup.groupid==hostsgroups.c.groupid,
+       )
+       def delete(self):
+               # NOTE: media objects are automatically handled.
+               hosts_groups_match = HostsGroups.query.filter_by(hostid=self.hostid).all()
+               for row in hosts_groups_match:
+                       row.delete()
+               super(Host, self).delete()
+
+class HostGroup(ZabbixEntity):
+       using_options(
+               tablename='groups',
+               autoload=True,
+               auto_primarykey='groupid',
+       )
+       usrgrp_list = ManyToMany(
+               'UsrGrp',
+               table=rights,
+               foreign_keys=lambda: [rights.c.groupid, rights.c.id],
+               primaryjoin=lambda: HostGroup.groupid==rights.c.id,
+               secondaryjoin=lambda: UsrGrp.usrgrpid==rights.c.groupid,
+       )
+       host_list = ManyToMany(
+               'Host',
+               table=hostsgroups,
+               foreign_keys=lambda: [hostsgroups.c.groupid, hostsgroups.c.hostid],
+               primaryjoin=lambda: HostGroup.groupid==hostsgroups.c.groupid,
+               secondaryjoin=lambda: Host.hostid==hostsgroups.c.hostid,
+       )
+       def delete(self):
+               # NOTE: media objects are automatically handled.
+               hosts_groups_match = HostsGroups.query.filter_by(groupid=self.groupid).all()
+               for row in hosts_groups_match:
+                       row.delete()
+               super(HostGroup, self).delete()
+
+class UsersGroups(ZabbixEntity):
+       using_options(
+               tablename='users_groups',
+               autoload=True,
+               auto_primarykey='id',
+       )
+
+class MediaType(ZabbixEntity):
+       using_options(
+               tablename='media_type',
+               autoload=True,
+               auto_primarykey='mediatypeid',
+       )
+
+class Script(ZabbixEntity):
+       using_options(
+               tablename='scripts',
+               autoload=True,
+               auto_primarykey='scriptid',
+       )
+
+
+# DISCOVERY ################################################3
+
+class DiscoveryCheck(ZabbixEntity):
+       using_options(
+               tablename='dchecks',
+               autoload=True,
+               auto_primarykey='dcheckid',
+       )
+       skip_keys = ['druleid']
+       discoveryrule = ManyToOne('DiscoveryRule', 
+                                       primaryjoin=lambda: DiscoveryCheck.druleid == DiscoveryRule.druleid,
+                                       foreign_keys=lambda: [DiscoveryCheck.druleid],
+                                       ondelete='cascade') 
+
+class DiscoveryRule(ZabbixEntity):  # parent of dchecks
+       using_options(
+               tablename='drules',
+               autoload=True,
+               auto_primarykey='druleid',
+       )
+       deepcopy = ['discoverycheck_list']
+       discoverycheck_list = OneToMany('DiscoveryCheck', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: DiscoveryCheck.druleid == DiscoveryRule.druleid,
+                                       foreign_keys=lambda: [DiscoveryCheck.druleid])
+
+       discoveredhost_list = OneToMany('DiscoveredHost', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: DiscoveredHost.druleid == DiscoveryRule.druleid,
+                                       foreign_keys=lambda: [DiscoveredHost.druleid])
+
+class DiscoveredHost(ZabbixEntity):
+       using_options(
+               tablename='dhosts',
+               autoload=True,
+               auto_primarykey='dhostid',
+       )
+       discoveryrule = ManyToOne('DiscoveryRule',
+                                       primaryjoin=lambda: DiscoveredHost.druleid == DiscoveryRule.druleid,
+                                       foreign_keys=lambda: [DiscoveredHost.druleid],
+                                       ondelete='cascade') 
+
+       discoveryservice_list = OneToMany('DiscoveryService', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: DiscoveryService.dhostid== DiscoveredHost.dhostid,
+                                       foreign_keys=lambda: [DiscoveryService.dhostid],) 
+
+class DiscoveryService(ZabbixEntity):
+       using_options(
+               tablename='dservices',
+               autoload=True,
+               auto_primarykey='dserviceid',
+       )
+       discoveryrule = ManyToOne('DiscoveredHost',
+                                       primaryjoin=lambda: DiscoveryService.dhostid== DiscoveredHost.dhostid,
+                                       foreign_keys=lambda: [DiscoveryService.dhostid],
+                                       ondelete='cascade') 
+                                               
+
+# ACTIONS ################################################3
+
+class ActionOperation(ZabbixEntity):
+       using_options(
+               tablename='operations', autoload=True, auto_primarykey='operationid',
+       )
+       deepcopy = ['operationcondition_list']
+       skip_keys = ['actionid']
+       action = ManyToOne('Action', ondelete='cascade',
+                                       primaryjoin=lambda: ActionOperation.actionid == Action.actionid,
+                                       foreign_keys=lambda: [ActionOperation.actionid])
+                                       
+       operationcondition_list = OneToMany('OperationCondition', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: OperationCondition.operationid == ActionOperation.operationid,
+                                       foreign_keys=lambda: [OperationCondition.operationid])
+
+class OperationCondition(ZabbixEntity):
+       using_options(
+               tablename='opconditions', autoload=True, auto_primarykey='opconditionid',
+       )
+       skip_keys = ['operationid']
+       actionoperation = ManyToOne('ActionOperation', ondelete='cascade',
+                                       primaryjoin=lambda: OperationCondition.operationid == ActionOperation.operationid,
+                                       foreign_keys=lambda: [OperationCondition.operationid])
+
+class ActionCondition(ZabbixEntity):
+       using_options(
+               tablename='conditions', autoload=True, auto_primarykey='conditionid',
+       )
+       skip_keys = ['actionid']
+       action = ManyToOne('Action', ondelete='cascade',
+                                       primaryjoin=lambda: ActionCondition.actionid == Action.actionid,
+                                       foreign_keys=lambda: [ActionCondition.actionid])
+
+class Action(ZabbixEntity):
+       using_options(
+               tablename='actions', autoload=True, auto_primarykey='actionid',
+       )
+       deepcopy = ['actionoperation_list', 'actioncondition_list']
+       actionoperation_list = OneToMany('ActionOperation', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: ActionOperation.actionid == Action.actionid,
+                                       foreign_keys=lambda: [ActionOperation.actionid])
+                                       
+       actioncondition_list = OneToMany('ActionCondition', cascade='all, delete-orphan',
+                                       primaryjoin=lambda: ActionCondition.actionid == Action.actionid,
+                                       foreign_keys=lambda: [ActionCondition.actionid])
+
+# USERS & EMAIL MEDIA ################################################3
+
+class Media(ZabbixEntity):
+       using_options(
+               tablename='media',
+               autoload=True,
+               auto_primarykey='mediaid',
+       )
+       skip_keys = ['userid']
+       user = ManyToOne('User', 
+                                       primaryjoin=lambda: Media.userid == User.userid,
+                                       foreign_keys=lambda: [Media.userid],
+                                       ondelete='cascade') 
+
+users_groups = Table('users_groups', metadata, autoload=True)
+
+class User(ZabbixEntity): # parent of media
+       using_options(
+               tablename='users',
+               autoload=True,
+               auto_primarykey='userid',
+       )
+       deepcopy = ['media_list', 'usrgrp_list']
+       media_list = OneToMany('Media', 
+                                         primaryjoin=lambda: Media.userid == User.userid,
+                                         foreign_keys=lambda: [Media.userid],
+                                         cascade='all, delete-orphan')
+
+       # READ-ONLY: do not append or remove groups here.
+       usrgrp_list = ManyToMany('UsrGrp',
+                               table=users_groups,
+                               foreign_keys=lambda: [users_groups.c.userid, users_groups.c.usrgrpid],
+                               primaryjoin=lambda: User.userid==users_groups.c.userid,
+                               secondaryjoin=lambda: UsrGrp.usrgrpid==users_groups.c.usrgrpid)
+
+       def delete(self):
+               # NOTE: media objects are automatically handled.
+               users_groups_match = UsersGroups.query.filter_by(userid=self.userid).all()
+               for row in users_groups_match:
+                       row.delete()
+               super(User, self).delete()
+               
+       def append_group(self, group):
+               ug_row = UsersGroups(usrgrpid=group.usrgrpid, userid=self.userid)
+               return group
+
+       def remove_group(self, group):
+               ug_row = UsersGroups.get_by(usrgrpid=group.usrgrpid, userid=self.userid)
+               if ug_row is not None:
+                       ug_row.delete()
+                       #ug_row.flush()
+               return
+               
+class UsrGrp(ZabbixEntity):
+       using_options(
+               tablename='usrgrp',
+               autoload=True,
+               auto_primarykey='usrgrpid',
+       )
+       deepcopy= ['hostgroup_list']
+
+       user_list = ManyToMany(
+               'User',
+               table=users_groups,
+               foreign_keys=lambda: [users_groups.c.userid, users_groups.c.usrgrpid],
+               secondaryjoin=lambda: User.userid==users_groups.c.userid,
+               primaryjoin=lambda: UsrGrp.usrgrpid==users_groups.c.usrgrpid,
+       )
+
+       hostgroup_list = ManyToMany(
+               'HostGroup',
+               table=rights,
+               foreign_keys=lambda: [rights.c.groupid, rights.c.id],
+               primaryjoin=lambda: UsrGrp.usrgrpid==rights.c.groupid,
+               secondaryjoin=lambda: HostGroup.groupid==rights.c.id,
+       )
+
+       def delete(self):
+               rights_match = Right.query.filter_by(groupid=self.usrgrpid).all()
+               for row in rights_match:
+                       row.delete()
+
+               users_groups_match = UsersGroups.query.filter_by(usrgrpid=self.usrgrpid).all()
+               for row in users_groups_match:
+                       row.delete()
+
+               super(UsrGrp, self).delete()
+
+       def append_hostgroup(self, hg):
+               # NOTE: I know it looks wrong, but this is how the keys are mapped.
+               print "APPENDING HOSTGROUP %s!!!!!!!!!!" % hg.name
+               ug_row = Right(groupid=self.usrgrpid, id=hg.groupid, permission=3)
+               ug_row.save()
+               return
+
+       def append_user(self, user):
+               ug_row = UsersGroups(userid=user.userid, usrgrpid=self.usrgrpid)
+               ug_row.save()
+               return
+
+       def remove_user(self, user):
+               ug_row = UsersGroups.get_by(userid=user.userid, usrgrpid=self.usrgrpid)
+               if ug_row is not None:
+                       ug_row.delete()
+                       #ug_row.flush()
+               return
+
+setup_all()
+
+def get_zabbix_class_from_name(name):
+       em = get_zabbix_entitymap()
+       cls = None
+       if "_list" in name:
+               name=name[:-5]  # strip off the _list part.
+
+       for k in em.keys():
+               if name == k.lower():
+                       cls = em[k]
+       return cls
+       
+def get_zabbix_entitymap():
+       entity_map = {}
+       for n,c in zip([ u.__name__ for u in entities], entities): 
+               entity_map[n] = c
+       return entity_map
+
+# COMMON OBJECT TYPES
+class OperationConditionNotAck(object):
+       def __new__(cls):
+               o = OperationCondition(
+                               conditiontype=defines.CONDITION_TYPE_EVENT_ACKNOWLEDGED, 
+                               operator=defines.CONDITION_OPERATOR_EQUAL, 
+                               value=0 ) # NOT_ACK
+               return  o
+
+#import md5
+#u = User(alias="stephen.soltesz@gmail.com", name="stephen.soltesz@gmail.com", surname="", passwd=md5.md5("test").hexdigest(), url="", autologin=0, autologout=900, lang="en_gb", refresh=30, type=1, theme="default.css")
+#u.flush()
diff --git a/monitor/database/zabbixapi/site.py b/monitor/database/zabbixapi/site.py
new file mode 100644 (file)
index 0000000..94f01e1
--- /dev/null
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-\r
+"""This module contains functions called from console script entry points."""\r
+\r
+from os import getcwd\r
+from os.path import dirname, exists, join\r
+import sys\r
+\r
+from model import *\r
+from emailZabbix import *\r
+from monitor import config\r
+\r
+import defines\r
+import md5\r
+\r
+HOSTGROUP_NAME="%s_hostgroup"\r
+USERGROUP_NAME="%s_usergroup"\r
+       \r
+DISCOVERY_RULE_NAME="discovery rule for %s"\r
+DISCOVERY_ACTION_NAME="Auto-discover %s action"\r
+ESCALATION_ACTION_NAME="Escalation Action for %s"\r
+\r
+def delete_site(loginbase):\r
+\r
+       # get host group, usrgrp\r
+       # get all users in usrgrp, delete each\r
+       usergroupname = USERGROUP_NAME % loginbase\r
+       hostgroupname = HOSTGROUP_NAME % loginbase\r
+       discovery_action_name = DISCOVERY_ACTION_NAME % loginbase\r
+       discovery_rule_name = DISCOVERY_RULE_NAME % loginbase\r
+       escalation_action_name = ESCALATION_ACTION_NAME % loginbase\r
+\r
+       ug = UsrGrp.get_by(name=usergroupname)\r
+       if ug:\r
+               for user in ug.user_list:\r
+                       # remove user from group, if a member of no other groups, \r
+                       # delete user.\r
+                       #user.delete()\r
+                       pass\r
+               ug.delete()\r
+\r
+       hg = HostGroup.get_by(name=hostgroupname)\r
+       if hg: \r
+               # figure out how to delete all the hosts...\r
+               # NOTE: hosts are listed in hg.host_list\r
+               for host in hg.host_list:\r
+                       host.delete()\r
+               hg.delete()\r
+\r
+       # delete dr\r
+       dr = DiscoveryRule.get_by(name=discovery_rule_name)\r
+       if dr: dr.delete()\r
+\r
+       da = Action.get_by(name=discovery_action_name)\r
+       if da: da.delete()\r
+\r
+       ea = Action.get_by(name=escalation_action_name)\r
+       if ea: ea.delete()\r
+\r
+       return\r
+\r
+\r
+def setup_global():\r
+       # GLOBAL:\r
+       #       update mediatype for email.\r
+       mediatype = MediaType.get_by(description="Email")\r
+       if not mediatype:\r
+               print "ERROR:  There is no defined media type for 'Email'"\r
+               raise Exception("No Email Media type in Zabbix db")\r
+\r
+       # NOTE: assumes smtp server is local to this machine.\r
+       mediatype.smtp_server='localhost'\r
+       mediatype.smtp_helo=".".join(config.MONITOR_HOSTNAME.split('.')[1:])\r
+       mediatype.smtp_email=config.from_email\r
+\r
+       # GLOBAL: \r
+       #       update email messages with local url references.\r
+       mailtxt.reformat({'hostname' : config.MONITOR_HOSTNAME, \r
+                                         'support_email' : config.support_email})\r
+\r
+       # pltemplate - via web, xml import\r
+       # TODO: os.system("curl --post default_templates.xml")\r
+\r
+\r
+def setup_site(loginbase, techemail, piemail, iplist):\r
+\r
+       # TODO: Initially adding this info is ok. what about updates to users,\r
+       # additional hosts, removed users from plc, \r
+       # TODO: send a message when host is discovered.\r
+       # TODO: update 'discovered' hosts with dns name.\r
+       # TODO: remove old nodes that are no longer in the plcdb.\r
+\r
+       BI_WEEKLY_ESC_PERIOD = int(60*60*24)\r
+       BI_WEEKLY_ESC_PERIOD = int(60) # testing...\r
+\r
+       # User Group\r
+       site_user_group = UsrGrp.find_or_create(name="%s_usergroup" % loginbase)\r
+       for user in set(techemail + piemail):\r
+               # USER\r
+               u = User.find_or_create(alias=user, type=1,\r
+                                                               set_if_new={'passwd' : md5.md5(user).hexdigest()},\r
+                                                               exec_if_new=lambda obj: \\r
+                                                               obj.media_list.append( Media(mediatypeid=1, sendto=user)))\r
+\r
+               if site_user_group not in u.usrgrp_list:\r
+                       u.append_group(site_user_group)\r
+\r
+       # HOST GROUP\r
+       plc_host_group = HostGroup.find_or_create(name="MyPLC Hosts")\r
+       site_host_group = HostGroup.find_or_create(name="%s_hostgroup" % loginbase)\r
+       plctemplate = Host.get_by(host="Template_Linux_PLHost")\r
+       escalation_action_name = ESCALATION_ACTION_NAME % loginbase\r
+       discovery_action_name = DISCOVERY_ACTION_NAME % loginbase\r
+       discovery_rule_name = DISCOVERY_RULE_NAME % loginbase\r
+\r
+       # ADD hg to ug\r
+       if site_host_group not in site_user_group.hostgroup_list:\r
+               site_user_group.append_hostgroup(site_host_group)\r
+\r
+       # DISCOVERY RULE & CHECK\r
+       dr = DiscoveryRule.find_or_create(name=discovery_rule_name,\r
+                         iprange=iplist,\r
+                         delay=3600,\r
+                         proxy_hostid=0,\r
+                         exec_if_new=lambda obj: \\r
+                               obj.discoverycheck_list.append( DiscoveryCheck(type=9, \r
+                                                                               key_="system.uname", ports=10050) )\r
+                       )\r
+\r
+       # DISCOVERY ACTION for these servers\r
+       a = Action.find_or_create(name=discovery_action_name,\r
+                       eventsource=defines.EVENT_SOURCE_DISCOVERY,\r
+                       status=defines.DRULE_STATUS_ACTIVE,\r
+                       evaltype=defines.ACTION_EVAL_TYPE_AND_OR)\r
+       if len(a.actioncondition_list) == 0:\r
+               a.actioncondition_list=[\r
+                                       # Host IP Matches\r
+                                       ActionCondition(\r
+                                               conditiontype=defines.CONDITION_TYPE_DHOST_IP,\r
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,\r
+                                               value=iplist),\r
+                                       # AND, Service type is Zabbix agent\r
+                                       ActionCondition(\r
+                                               conditiontype=defines.CONDITION_TYPE_DSERVICE_TYPE,\r
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,\r
+                                               value=defines.SVC_AGENT),\r
+                                       # AND, Received system.uname value like 'Linux'\r
+                                       ActionCondition(\r
+                                               conditiontype=defines.CONDITION_TYPE_DVALUE,\r
+                                               operator=defines.CONDITION_OPERATOR_LIKE,\r
+                                               value="Linux"),\r
+                                       # AND, Discovery status is Discover\r
+                                       ActionCondition(\r
+                                               conditiontype=defines.CONDITION_TYPE_DSTATUS,\r
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,\r
+                                               value=defines.DOBJECT_STATUS_DISCOVER),\r
+                               ]\r
+                               # THEN\r
+               a.actionoperation_list=[\r
+                                       # Add Host\r
+                                       ActionOperation(\r
+                                               operationtype=defines.OPERATION_TYPE_HOST_ADD,\r
+                                               object=0, objectid=0,\r
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),\r
+                                       # Add To Group PLC Hosts\r
+                                       ActionOperation(\r
+                                               operationtype=defines.OPERATION_TYPE_GROUP_ADD,\r
+                                               object=0, objectid=plc_host_group.groupid,\r
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),\r
+                                       # Add To Group LoginbaseSiteGroup\r
+                                       ActionOperation(\r
+                                               operationtype=defines.OPERATION_TYPE_GROUP_ADD,\r
+                                               object=0, objectid=site_host_group.groupid,\r
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),\r
+                                       # Link to Template 'Template_Linux_Minimal'\r
+                                       ActionOperation(\r
+                                               operationtype=defines.OPERATION_TYPE_TEMPLATE_ADD,\r
+                                               object=0, objectid=plctemplate.hostid,\r
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),\r
+                               ]\r
+       else:\r
+               # TODO: verify iplist is up-to-date\r
+               pass\r
+\r
+       # ESCALATION ACTION for these servers\r
+       ea = Action.find_or_create(name=escalation_action_name,\r
+                       eventsource=defines.EVENT_SOURCE_TRIGGERS,\r
+                       status=defines.ACTION_STATUS_ENABLED,\r
+                       evaltype=defines.ACTION_EVAL_TYPE_AND_OR,\r
+                       esc_period=BI_WEEKLY_ESC_PERIOD,        # three days\r
+                       recovery_msg=1,\r
+                       set_if_new={\r
+                               'r_shortdata':"Thank you for maintaining {HOSTNAME}!",\r
+                               'r_longdata': mailtxt.thankyou_nodeup, }\r
+                       )\r
+       if len(ea.actioncondition_list) == 0:\r
+                       # THEN this is a new entry\r
+               print "SETTING UP ESCALATION ACTION"\r
+               ea.actioncondition_list=[\r
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_TRIGGER_VALUE, \r
+                                                               operator=defines.CONDITION_OPERATOR_EQUAL, \r
+                                                               value=defines.TRIGGER_VALUE_TRUE),\r
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_TRIGGER_NAME, \r
+                                                               operator=defines.CONDITION_OPERATOR_LIKE, \r
+                                                               value="is unreachable"),\r
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_HOST_GROUP, \r
+                                                               operator=defines.CONDITION_OPERATOR_EQUAL, \r
+                                                               value=site_host_group.groupid),\r
+                       ]\r
+               ea.actionoperation_list=[\r
+                               # STAGE 1\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE,\r
+                                       shortdata=mailtxt.nodedown_one_subject,\r
+                                       longdata=mailtxt.nodedown_one,\r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       esc_period=0, esc_step_to=3, esc_step_from=3, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ),\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE,\r
+                                       shortdata=mailtxt.nodedown_one_subject,\r
+                                       longdata=mailtxt.nodedown_one,\r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       esc_period=0, esc_step_to=7, esc_step_from=7, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ),\r
+                               # STAGE 2\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, \r
+                                       esc_step_from=10, esc_step_to=10, \r
+                                       esc_period=0,\r
+                                       shortdata="",\r
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} disablesite", \r
+                                       operationcondition_list=[ OperationConditionNotAck() ]),\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, \r
+                                       shortdata=mailtxt.nodedown_two_subject,\r
+                                       longdata=mailtxt.nodedown_two,\r
+                                       esc_step_from=10, esc_step_to=10, \r
+                                       esc_period=0, \r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), \r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, \r
+                                       shortdata=mailtxt.nodedown_two_subject,\r
+                                       longdata=mailtxt.nodedown_two,\r
+                                       esc_step_from=14, esc_step_to=14, \r
+                                       esc_period=0, \r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), \r
+\r
+                               # STAGE 3\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, \r
+                                       esc_step_from=17, esc_step_to=17, \r
+                                       esc_period=0, \r
+                                       shortdata="",\r
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} disableslices", \r
+                                       # TODO: send notice to users of slices\r
+                                       operationcondition_list=[ OperationConditionNotAck() ]),\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, \r
+                                       shortdata=mailtxt.nodedown_three_subject,\r
+                                       longdata=mailtxt.nodedown_three,\r
+                                       esc_step_from=17, esc_step_to=17, \r
+                                       esc_period=0, \r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), \r
+                               # STAGE 4++\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, \r
+                                       esc_step_from=21, esc_step_to=0, \r
+                                       esc_period=int(BI_WEEKLY_ESC_PERIOD*3.5),\r
+                                       shortdata="",\r
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} forever", \r
+                                       operationcondition_list=[ OperationConditionNotAck() ]),\r
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, \r
+                                       shortdata=mailtxt.nodedown_four_subject,\r
+                                       longdata=mailtxt.nodedown_four,\r
+                                       esc_step_from=21, esc_step_to=0, \r
+                                       esc_period=int(BI_WEEKLY_ESC_PERIOD*3.5),\r
+                                       object=defines.OPERATION_OBJECT_GROUP, \r
+                                       objectid=site_user_group.usrgrpid, \r
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), \r
+                       ]\r
index 8d166b8..421e673 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -3,8 +3,9 @@
 from distutils.core import setup
 #from setuptools import setup, find_packages
 
-packages=['monitor', 'monitor.database', 'monitor.pcu',
-               'monitor.sources', 'monitor.util', 'monitor.wrapper' ]
+packages=['monitor', 'monitor.database', 'monitor.database.zabbixapi', 
+               'monitor.database.infovacuum', 'monitor.database.infovacuum.model', 'monitor.pcu',
+               'monitor.sources', 'monitor.util', 'monitor.wrapper' ]
 
 #packages = find_packages(exclude=('Rpyc', 'www', 'ssh', 'pyssh',
 #'Rpyc.Demo', 'Rpyc.Servers', 'www.HyperText'))
diff --git a/zabbix/zabbixsite.py b/zabbix/zabbixsite.py
new file mode 100755 (executable)
index 0000000..9c0267f
--- /dev/null
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+
+from os import getcwd
+from os.path import dirname, exists, join
+import sys
+import md5
+
+from monitor import config
+from monitor.database.dborm import * 
+from monitor.database.zabbixapi.emailZabbix import *
+from monitor.database.zabbixapi import defines
+
+
+
+HOSTGROUP_NAME="%s_hostgroup"
+USERGROUP_NAME="%s_usergroup"
+       
+DISCOVERY_RULE_NAME="discovery rule for %s"
+DISCOVERY_ACTION_NAME="Auto-discover %s action"
+ESCALATION_ACTION_NAME="Escalation Action for %s"
+
+def delete_site(loginbase):
+
+       # get host group, usrgrp
+       # get all users in usrgrp, delete each
+       usergroupname = USERGROUP_NAME % loginbase
+       hostgroupname = HOSTGROUP_NAME % loginbase
+       discovery_action_name = DISCOVERY_ACTION_NAME % loginbase
+       discovery_rule_name = DISCOVERY_RULE_NAME % loginbase
+       escalation_action_name = ESCALATION_ACTION_NAME % loginbase
+
+       ug = UsrGrp.get_by(name=usergroupname)
+       if ug:
+               for user in ug.user_list:
+                       # remove user from group, if a member of no other groups, 
+                       # delete user.
+                       #user.delete()
+                       pass
+               ug.delete()
+
+       hg = HostGroup.get_by(name=hostgroupname)
+       if hg: 
+               # figure out how to delete all the hosts...
+               # NOTE: hosts are listed in hg.host_list
+               for host in hg.host_list:
+                       host.delete()
+               hg.delete()
+
+       # delete dr
+       dr = DiscoveryRule.get_by(name=discovery_rule_name)
+       if dr: dr.delete()
+
+       da = Action.get_by(name=discovery_action_name)
+       if da: da.delete()
+
+       ea = Action.get_by(name=escalation_action_name)
+       if ea: ea.delete()
+
+       return
+
+
+def setup_global():
+       # GLOBAL:
+       #       update mediatype for email.
+       ############################### MAIL
+       print "checking for MediaType Email"
+       mediatype = MediaType.get_by(description="Email")
+       if not mediatype:
+               print "ERROR:  There is no defined media type for 'Email'"
+               raise Exception("No Email Media type in Zabbix db")
+
+       print "checking for correct configuration"
+       mediatype = MediaType.get_by(smtp_email=config.from_email)
+       if not mediatype:
+               # NOTE: assumes smtp server is local to this machine.
+               print "updating email server configuration"
+               mediatype.smtp_server='localhost'
+               mediatype.smtp_helo=".".join(config.MONITOR_HOSTNAME.split('.')[1:])
+               mediatype.smtp_email=config.from_email
+
+       ############################# EMAIL
+       mailtxt.reformat({'hostname' : config.MONITOR_HOSTNAME, 
+                                         'support_email' : config.support_email})
+
+       ############################### CENTRAL SERVER
+       print "checking zabbix server host info"
+       zabbixserver = Host.get_by(host="ZABBIX Server")
+       if zabbixserver:
+               print "UPDATING Primary Zabbix server entry"
+               zabbixserver.host="MyPLC Server"
+               zabbixserver.ip=config.MONITOR_IP
+               zabbixserver.dns=config.MONITOR_HOSTNAME
+               zabbixserver.useip=1
+
+       ############################ DEFAULT TEMPLATES
+       # pltemplate - via web, xml import
+       # TODO: os.system("curl --post default_templates.xml")
+
+       ##################### SCRIPTS 
+       ## TODO: add calls to check/reset the boot states.
+       print "checking scripts"
+       script1 = Script.find_or_create(name="RebootNode",
+                                                                       set_if_new = {
+                                                                               'command':"/usr/share/monitor-server/reboot.py {HOST.CONN}",
+                                                                               'host_access':3 # r/w)
+                                                                       })
+       script2 = Script.find_or_create(name="NMap",
+                                                       set_if_new = {
+                                                               'command':"/usr/bin/nmap -A {HOST.CONN}",
+                                                               'host_access':2 # r/o)
+                                               })
+       return
+
+def setup_site(loginbase, techemail, piemail, iplist):
+
+       # TODO: Initially adding this info is ok. what about updates to users,
+       # additional hosts, removed users from plc, 
+       # TODO: send a message when host is discovered.
+       # TODO: update 'discovered' hosts with dns name.
+       # TODO: remove old nodes that are no longer in the plcdb.
+
+       BI_WEEKLY_ESC_PERIOD = int(60*60*24)
+       BI_WEEKLY_ESC_PERIOD = int(60) # testing...
+
+       # User Group
+       site_user_group = UsrGrp.find_or_create(name="%s_usergroup" % loginbase)
+       for user in set(techemail + piemail):
+               # USER
+               u = User.find_or_create(alias=user, type=1,
+                                                               set_if_new={'passwd' : md5.md5(user).hexdigest()},
+                                                               exec_if_new=lambda obj: \
+                                                               obj.media_list.append( Media(mediatypeid=1, sendto=user)))
+
+               if site_user_group not in u.usrgrp_list:
+                       u.append_group(site_user_group)
+
+       # HOST GROUP
+       plc_host_group = HostGroup.find_or_create(name="MyPLC Hosts")
+       site_host_group = HostGroup.find_or_create(name="%s_hostgroup" % loginbase)
+       plctemplate = Host.get_by(host="Template_Linux_PLHost")
+       escalation_action_name = ESCALATION_ACTION_NAME % loginbase
+       discovery_action_name = DISCOVERY_ACTION_NAME % loginbase
+       discovery_rule_name = DISCOVERY_RULE_NAME % loginbase
+
+       # ADD hg to ug
+       if site_host_group not in site_user_group.hostgroup_list:
+               site_user_group.append_hostgroup(site_host_group)
+
+       # DISCOVERY RULE & CHECK
+       dr = DiscoveryRule.find_or_create(name=discovery_rule_name,
+                         delay=3600,
+                         proxy_hostid=0,
+                         set_if_new = {'iprange':iplist},
+                         exec_if_new=lambda obj: \
+                               obj.discoverycheck_list.append( DiscoveryCheck(type=9, 
+                                                                               key_="system.uname", ports=10050) )
+                       )
+       if dr.iprange != iplist:
+               if len(iplist) < 255:
+                       dr.iprange = iplist
+               else:
+                       raise Exception("iplist length is too long!")
+               
+
+       # DISCOVERY ACTION for these servers
+       a = Action.find_or_create(name=discovery_action_name,
+                       eventsource=defines.EVENT_SOURCE_DISCOVERY,
+                       status=defines.DRULE_STATUS_ACTIVE,
+                       evaltype=defines.ACTION_EVAL_TYPE_AND_OR)
+       if len(a.actioncondition_list) == 0:
+               a.actioncondition_list=[
+                                       # Host IP Matches
+                                       ActionCondition(
+                                               conditiontype=defines.CONDITION_TYPE_DHOST_IP,
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,
+                                               value=iplist),
+                                       # AND, Service type is Zabbix agent
+                                       ActionCondition(
+                                               conditiontype=defines.CONDITION_TYPE_DSERVICE_TYPE,
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,
+                                               value=defines.SVC_AGENT),
+                                       # AND, Received system.uname value like 'Linux'
+                                       ActionCondition(
+                                               conditiontype=defines.CONDITION_TYPE_DVALUE,
+                                               operator=defines.CONDITION_OPERATOR_LIKE,
+                                               value="Linux"),
+                                       # AND, Discovery status is Discover
+                                       ActionCondition(
+                                               conditiontype=defines.CONDITION_TYPE_DSTATUS,
+                                               operator=defines.CONDITION_OPERATOR_EQUAL,
+                                               value=defines.DOBJECT_STATUS_DISCOVER),
+                               ]
+                               # THEN
+               a.actionoperation_list=[
+                                       # Add Host
+                                       ActionOperation(
+                                               operationtype=defines.OPERATION_TYPE_HOST_ADD,
+                                               object=0, objectid=0,
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),
+                                       # Add To Group PLC Hosts
+                                       ActionOperation(
+                                               operationtype=defines.OPERATION_TYPE_GROUP_ADD,
+                                               object=0, objectid=plc_host_group.groupid,
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),
+                                       # Add To Group LoginbaseSiteGroup
+                                       ActionOperation(
+                                               operationtype=defines.OPERATION_TYPE_GROUP_ADD,
+                                               object=0, objectid=site_host_group.groupid,
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),
+                                       # Link to Template 'Template_Linux_Minimal'
+                                       ActionOperation(
+                                               operationtype=defines.OPERATION_TYPE_TEMPLATE_ADD,
+                                               object=0, objectid=plctemplate.hostid,
+                                               esc_period=0, esc_step_from=1, esc_step_to=1),
+                               ]
+       else:
+               # TODO: verify iplist is up-to-date
+               pass
+
+       # ESCALATION ACTION for these servers
+       ea = Action.find_or_create(name=escalation_action_name,
+                       eventsource=defines.EVENT_SOURCE_TRIGGERS,
+                       status=defines.ACTION_STATUS_ENABLED,
+                       evaltype=defines.ACTION_EVAL_TYPE_AND_OR,
+                       esc_period=BI_WEEKLY_ESC_PERIOD,        # three days
+                       recovery_msg=1,
+                       set_if_new={
+                               'r_shortdata':"Thank you for maintaining {HOSTNAME}!",
+                               'r_longdata': mailtxt.thankyou_nodeup, }
+                       )
+       if len(ea.actioncondition_list) == 0:
+                       # THEN this is a new entry
+               print "SETTING UP ESCALATION ACTION"
+               ea.actioncondition_list=[
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_TRIGGER_VALUE, 
+                                                               operator=defines.CONDITION_OPERATOR_EQUAL, 
+                                                               value=defines.TRIGGER_VALUE_TRUE),
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_TRIGGER_NAME, 
+                                                               operator=defines.CONDITION_OPERATOR_LIKE, 
+                                                               value="is unreachable"),
+                               ActionCondition(conditiontype=defines.CONDITION_TYPE_HOST_GROUP, 
+                                                               operator=defines.CONDITION_OPERATOR_EQUAL, 
+                                                               value=site_host_group.groupid),
+                       ]
+               ea.actionoperation_list=[
+                               # STAGE 1
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE,
+                                       shortdata=mailtxt.nodedown_one_subject,
+                                       longdata=mailtxt.nodedown_one,
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       esc_period=0, esc_step_to=3, esc_step_from=3, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ),
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE,
+                                       shortdata=mailtxt.nodedown_one_subject,
+                                       longdata=mailtxt.nodedown_one,
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       esc_period=0, esc_step_to=7, esc_step_from=7, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ),
+                               # STAGE 2
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, 
+                                       esc_step_from=10, esc_step_to=10, 
+                                       esc_period=0,
+                                       shortdata="",
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} disablesite", 
+                                       operationcondition_list=[ OperationConditionNotAck() ]),
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, 
+                                       shortdata=mailtxt.nodedown_two_subject,
+                                       longdata=mailtxt.nodedown_two,
+                                       esc_step_from=10, esc_step_to=10, 
+                                       esc_period=0, 
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), 
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, 
+                                       shortdata=mailtxt.nodedown_two_subject,
+                                       longdata=mailtxt.nodedown_two,
+                                       esc_step_from=14, esc_step_to=14, 
+                                       esc_period=0, 
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), 
+
+                               # STAGE 3
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, 
+                                       esc_step_from=17, esc_step_to=17, 
+                                       esc_period=0, 
+                                       shortdata="",
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} disableslices", 
+                                       # TODO: send notice to users of slices
+                                       operationcondition_list=[ OperationConditionNotAck() ]),
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, 
+                                       shortdata=mailtxt.nodedown_three_subject,
+                                       longdata=mailtxt.nodedown_three,
+                                       esc_step_from=17, esc_step_to=17, 
+                                       esc_period=0, 
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), 
+                               # STAGE 4++
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_COMMAND, 
+                                       esc_step_from=21, esc_step_to=0, 
+                                       esc_period=int(BI_WEEKLY_ESC_PERIOD*3.5),
+                                       shortdata="",
+                                       longdata="zabbixserver:/usr/share/monitor-server/checkslices.py {HOSTNAME} forever", 
+                                       operationcondition_list=[ OperationConditionNotAck() ]),
+                               ActionOperation(operationtype=defines.OPERATION_TYPE_MESSAGE, 
+                                       shortdata=mailtxt.nodedown_four_subject,
+                                       longdata=mailtxt.nodedown_four,
+                                       esc_step_from=21, esc_step_to=0, 
+                                       esc_period=int(BI_WEEKLY_ESC_PERIOD*3.5),
+                                       object=defines.OPERATION_OBJECT_GROUP, 
+                                       objectid=site_user_group.usrgrpid, 
+                                       operationcondition_list=[ OperationConditionNotAck() ] ), 
+                       ]
+
+if __name__ == "__main__":
+       setup_global()
+       session.flush()
diff --git a/zabbix/zabbixsync.py b/zabbix/zabbixsync.py
new file mode 100755 (executable)
index 0000000..8c078e7
--- /dev/null
@@ -0,0 +1,75 @@
+#!/usr/bin/python
+
+import sys
+import site
+from monitor.wrapper import plc
+from monitor import database
+
+import zabbixsite
+from monitor.database.dborm import session
+
+print "test"
+
+plcdb = database.dbLoad("l_plcsites")
+netid2ip = database.dbLoad("plcdb_netid2ip")
+lb2hn = database.dbLoad("plcdb_lb2hn")
+
+def get_site_iplist(loginbase):
+       node_list = lb2hn[loginbase]
+
+       # TODO: ip_list string cannot be longer than 255 characters.
+       # TODO: if it is, then we need to break up the discovery rule.
+       ip_list = ""
+       for node in node_list:
+               ip = netid2ip[node['nodenetwork_ids'][0]]
+               if len(ip_list) > 0: ip_list += ","
+               ip_list += ip
+
+       return ip_list
+       
+def add_loginbase(loginbase):
+       
+       techs = plc.getTechEmails(loginbase)
+       pis = plc.getPIEmails(loginbase)
+       iplist = get_site_iplist(loginbase)
+
+       print "zabbixsite.setup_site('%s', %s, %s, '%s')" % (loginbase,techs, pis, iplist)
+       zabbixsite.setup_site(loginbase, techs, pis, iplist)
+
+if __name__=="__main__":
+       #sites = api.GetSites({'peer_id' : None}, ['login_base'])
+       for loginbase in ['princeton', 'princetondsl', 'monitorsite']:
+               add_loginbase(loginbase)
+
+       session.flush()
+
+## Scripts : includes external scripts to: 
+#                - reboot.py
+#                - nmap
+
+## UserGroups
+# define technical contact, principal investigator groups
+# define a Group for every site
+
+## Users
+# define a User for every user with admin/tech/pi roles
+#              get passwords from a combination of site&name, perhaps?
+#              I'm guessing we could use the grpid or userid as part of the passwd,
+#              so that it varies in general, and can be guessed from the templates
+# add user to groups
+
+## Discovery Rules and Actions
+# define a discovery rule for every site's hosts.
+# define discovery action for online hosts.
+
+## Messages & Escalations
+# define actions and escellations for trigger sources:
+#              - unreachable host,
+
+## HostGroups
+# add host group for every site
+# add host group for global network (PLC name)
+
+## Hosts & Templates
+# no need to define hosts?
+# add template?  It appears that the xml-based tempate system is sufficient.