-# Copyright (c) 2009, 2010, 2011 Nicira Networks
+# Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
purpose of the return value of Idl.run() and Idl.change_seqno. This is
useful for columns that the IDL's client will write but not read.
+ As a convenience to users, 'schema' may also be an instance of the
+ SchemaHelper class.
+
The IDL uses and modifies 'schema' directly."""
+ assert isinstance(schema, SchemaHelper)
+ schema = schema.get_idl_schema()
+
self.tables = schema.tables
self._db = schema
self._session = ovs.jsonrpc.Session.open(remote)
def __txn_abort_all(self):
while self._outstanding_txns:
txn = self._outstanding_txns.popitem()[1]
- txn._status = Transaction.AGAIN_WAIT
+ txn._status = Transaction.TRY_AGAIN
def __txn_process_reply(self, msg):
txn = self._outstanding_txns.pop(msg.id, None)
if 'column_name' changed in this row (or if this row was deleted)
between the time that the IDL originally read its contents and the time
that the transaction commits, then the transaction aborts and
- Transaction.commit() returns Transaction.AGAIN_WAIT or
- Transaction.AGAIN_NOW (depending on whether the database change has
- already been received).
+ Transaction.commit() returns Transaction.TRY_AGAIN.
The intention is that, to ensure that no transaction commits based on
dirty reads, an application should call Row.verify() on each data item
self.__dict__["_changes"] = None
del self._table.rows[self.uuid]
+ def increment(self, column_name):
+ """Causes the transaction, when committed, to increment the value of
+ 'column_name' within this row by 1. 'column_name' must have an integer
+ type. After the transaction commits successfully, the client may
+ retrieve the final (incremented) value of 'column_name' with
+ Transaction.get_increment_new_value().
+
+ The client could accomplish something similar by reading and writing
+ and verify()ing columns. However, increment() will never (by itself)
+ cause a transaction to fail because of a verify error.
+
+ The intended use is for incrementing the "next_cfg" column in
+ the Open_vSwitch table."""
+ self._idl.txn._increment(self, column_name)
+
def _uuid_name_from_uuid(uuid):
return "row%s" % str(uuid).replace("-", "_")
INCOMPLETE = "incomplete" # Commit in progress, please wait.
ABORTED = "aborted" # ovsdb_idl_txn_abort() called.
SUCCESS = "success" # Commit successful.
- AGAIN_WAIT = "wait then try again"
- # Commit failed because a "verify" operation
+ TRY_AGAIN = "try again" # Commit failed because a "verify" operation
# reported an inconsistency, due to a network
# problem, or other transient failure. Wait
# for a change, then try again.
- AGAIN_NOW = "try again now" # Same as AGAIN_WAIT but try again right away.
NOT_LOCKED = "not locked" # Server hasn't given us the lock yet.
ERROR = "error" # Commit failed due to a hard error.
self._comments = []
self._commit_seqno = self.idl.change_seqno
- self._inc_table = None
+ self._inc_row = None
self._inc_column = None
- self._inc_where = None
self._inserted_rows = {} # Map from UUID to _InsertedRow
relatively human-readable form.)"""
self._comments.append(comment)
- def increment(self, table, column, where):
- assert not self._inc_table
- self._inc_table = table
- self._inc_column = column
- self._inc_where = where
-
def wait(self, poller):
if self._status not in (Transaction.UNCOMMITTED,
Transaction.INCOMPLETE):
operations.append(op)
# Add increment.
- if self._inc_table and any_updates:
+ if self._inc_row and any_updates:
self._inc_index = len(operations) - 1
operations.append({"op": "mutate",
- "table": self._inc_table,
+ "table": self._inc_row._table.name,
"where": self._substitute_uuids(
- self._inc_where),
+ _where_uuid_equals(self._inc_row.uuid)),
"mutations": [[self._inc_column, "+=", 1]]})
operations.append({"op": "select",
- "table": self._inc_table,
+ "table": self._inc_row._table.name,
"where": self._substitute_uuids(
- self._inc_where),
+ _where_uuid_equals(self._inc_row.uuid)),
"columns": [self._inc_column]})
# Add comment.
self.idl._outstanding_txns[self._request_id] = self
self._status = Transaction.INCOMPLETE
else:
- self._status = Transaction.AGAIN_WAIT
+ self._status = Transaction.TRY_AGAIN
self.__disassemble()
return self._status
return inserted_row.real
return None
+ def _increment(self, row, column):
+ assert not self._inc_row
+ self._inc_row = row
+ self._inc_column = column
+
def _write(self, row, column, datum):
assert row._changes is not None
vlog.warn("operation reply is not JSON null or object")
if not soft_errors and not hard_errors and not lock_errors:
- if self._inc_table and not self.__process_inc_reply(ops):
+ if self._inc_row and not self.__process_inc_reply(ops):
hard_errors = True
for insert in self._inserted_rows.itervalues():
elif lock_errors:
self._status = Transaction.NOT_LOCKED
elif soft_errors:
- if self._commit_seqno == self.idl.change_seqno:
- self._status = Transaction.AGAIN_WAIT
- else:
- self._status = Transaction.AGAIN_NOW
+ self._status = Transaction.TRY_AGAIN
else:
self._status = Transaction.SUCCESS
insert.real = uuid_
return True
+
+
+class SchemaHelper(object):
+ """IDL Schema helper.
+
+ This class encapsulates the logic required to generate schemas suitable
+ for creating 'ovs.db.idl.Idl' objects. Clients should register columns
+ they are interested in using register_columns(). When finished, the
+ get_idl_schema() function may be called.
+
+ The location on disk of the schema used may be found in the
+ 'schema_location' variable."""
+
+ def __init__(self, location=None):
+ """Creates a new Schema object."""
+
+ if location is None:
+ location = "%s/vswitch.ovsschema" % ovs.dirs.PKGDATADIR
+
+ self.schema_location = location
+ self._tables = {}
+ self._all = False
+
+ def register_columns(self, table, columns):
+ """Registers interest in the given 'columns' of 'table'. Future calls
+ to get_idl_schema() will include 'table':column for each column in
+ 'columns'. This function automatically avoids adding duplicate entries
+ to the schema.
+
+ 'table' must be a string.
+ 'columns' must be a list of strings.
+ """
+
+ assert type(table) is str
+ assert type(columns) is list
+
+ columns = set(columns) | self._tables.get(table, set())
+ self._tables[table] = columns
+
+ def register_all(self):
+ """Registers interest in every column of every table."""
+ self._all = True
+
+ def get_idl_schema(self):
+ """Gets a schema appropriate for the creation of an 'ovs.db.id.IDL'
+ object based on columns registered using the register_columns()
+ function."""
+
+ schema = ovs.db.schema.DbSchema.from_json(
+ ovs.json.from_file(self.schema_location))
+
+ if not self._all:
+ schema_tables = {}
+ for table, columns in self._tables.iteritems():
+ schema_tables[table] = (
+ self._keep_table_columns(schema, table, columns))
+
+ schema.tables = schema_tables
+ return schema
+
+ def _keep_table_columns(self, schema, table_name, columns):
+ assert table_name in schema.tables
+ table = schema.tables[table_name]
+
+ new_columns = {}
+ for column_name in columns:
+ assert type(column_name) is str
+ assert column_name in table.columns
+
+ new_columns[column_name] = table.columns[column_name]
+
+ table.columns = new_columns
+ return table