if (schemap) {
error = ovsdb_schema_from_json(json, &schema);
if (error) {
- json_destroy(json);
error = ovsdb_wrap_error(error,
"failed to parse \"%s\" as ovsdb schema",
file_name);
&date, &txn);
json_destroy(json);
if (error) {
+ ovsdb_log_unread(log);
break;
}
oldest_commit = date;
}
- ovsdb_txn_commit(txn, false);
+ error = ovsdb_txn_commit(txn, false);
+ if (error) {
+ ovsdb_log_unread(log);
+ break;
+ }
}
if (error) {
/* Log error but otherwise ignore it. Probably the database just got
* truncated due to power failure etc. and we should use its current
* contents. */
char *msg = ovsdb_error_to_string(error);
- VLOG_WARN("%s", msg);
+ VLOG_ERR("%s", msg);
free(msg);
ovsdb_error_destroy(error);
if (!table) {
if (!strcmp(table_name, "_date")
&& node_json->type == JSON_INTEGER) {
- if (date) {
- *date = json_integer(node_json);
- }
+ *date = json_integer(node_json);
continue;
} else if (!strcmp(table_name, "_comment") || converting) {
continue;
/* If it has been at least COMPACT_MIN_MSEC millseconds since the last time
* we compacted (or at least COMPACT_RETRY_MSEC since the last time we
* tried), and if there are at least 100 transactions in the database, and
- * if the database is at least 1 MB, then compact the database. */
+ * if the database is at least 10 MB, then compact the database. */
if (time_msec() >= file->next_compact
&& file->n_transactions >= 100
&& ovsdb_log_get_offset(file->log) >= 10 * 1024 * 1024)
char *s = ovsdb_error_to_string(error);
ovsdb_error_destroy(error);
VLOG_WARN("%s: compacting database failed (%s), retrying in "
- "60 seconds", file->file_name, s);
+ "%d seconds",
+ file->file_name, s, COMPACT_RETRY_MSEC / 1000);
free(s);
file->next_compact = time_msec() + COMPACT_RETRY_MSEC;