git://git.onelab.eu
/
nodemanager.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
blind and brutal 2to3
[nodemanager.git]
/
database.py
diff --git
a/database.py
b/database.py
index
0680b6c
..
aef900d
100644
(file)
--- a/
database.py
+++ b/
database.py
@@
-15,7
+15,7
@@
partition, the database is constantly being dumped to disk.
import sys
import sys
-import
cP
ickle
+import
p
ickle
import threading
import time
import threading
import time
@@
-39,7
+39,7
@@
MINIMUM_ALLOCATION = {'cpu_pct': 0,
'net_i2_max_rate': 8,
'net_share': 1,
}
'net_i2_max_rate': 8,
'net_share': 1,
}
-LOANABLE_RESOURCES =
MINIMUM_ALLOCATION.keys(
)
+LOANABLE_RESOURCES =
list(MINIMUM_ALLOCATION.keys()
)
DB_FILE = '/var/lib/nodemanager/database.pickle'
DB_FILE = '/var/lib/nodemanager/database.pickle'
@@
-77,11
+77,11
@@
In order to do the accounting, we store three different rspecs:
* and variable resid_rspec, which is the amount of resources the sliver
has after giving out loans but not receiving any."""
slivers = {}
* and variable resid_rspec, which is the amount of resources the sliver
has after giving out loans but not receiving any."""
slivers = {}
- for name, rec in self.ite
rite
ms():
+ for name, rec in self.items():
if 'rspec' in rec:
rec['_rspec'] = rec['rspec'].copy()
slivers[name] = rec
if 'rspec' in rec:
rec['_rspec'] = rec['rspec'].copy()
slivers[name] = rec
- for rec in slivers.
iter
values():
+ for rec in slivers.values():
eff_rspec = rec['_rspec']
resid_rspec = rec['rspec'].copy()
for target, resource_name, amount in rec.get('_loans', []):
eff_rspec = rec['_rspec']
resid_rspec = rec['rspec'].copy()
for target, resource_name, amount in rec.get('_loans', []):
@@
-100,7
+100,7
@@
keys."""
old_rec = self.get(name)
if old_rec == None: self[name] = rec
elif rec['timestamp'] > old_rec['timestamp']:
old_rec = self.get(name)
if old_rec == None: self[name] = rec
elif rec['timestamp'] > old_rec['timestamp']:
- for key in
old_rec.keys(
):
+ for key in
list(old_rec.keys()
):
if not key.startswith('_'): del old_rec[key]
old_rec.update(rec)
if not key.startswith('_'): del old_rec[key]
old_rec.update(rec)
@@
-109,7
+109,7
@@
keys."""
We use it to determine if a record is stale.
This method should be called whenever new GetSlivers() data comes in."""
self._min_timestamp = ts
We use it to determine if a record is stale.
This method should be called whenever new GetSlivers() data comes in."""
self._min_timestamp = ts
- for name, rec in
self.items(
):
+ for name, rec in
list(self.items()
):
if rec['timestamp'] < ts: del self[name]
def sync(self):
if rec['timestamp'] < ts: del self[name]
def sync(self):
@@
-120,7
+120,7
@@
It may be necessary in the future to do something smarter."""
# delete expired records
now = time.time()
# delete expired records
now = time.time()
- for name, rec in
self.items(
):
+ for name, rec in
list(self.items()
):
if rec.get('expires', now) < now: del self[name]
self._compute_effective_rspecs()
if rec.get('expires', now) < now: del self[name]
self._compute_effective_rspecs()
@@
-138,7
+138,7
@@
It may be necessary in the future to do something smarter."""
if name not in self:
logger.verbose("database: sync : ensure_destroy'ing %s"%name)
account.get(name).ensure_destroyed()
if name not in self:
logger.verbose("database: sync : ensure_destroy'ing %s"%name)
account.get(name).ensure_destroyed()
- for name, rec in self.ite
rite
ms():
+ for name, rec in self.items():
# protect this; if anything fails for a given sliver
# we still need the other ones to be handled
try:
# protect this; if anything fails for a given sliver
# we still need the other ones to be handled
try:
@@
-179,7
+179,7
@@
It proceeds to handle dump requests forever."""
while True:
db_lock.acquire()
while not dump_requested: db_cond.wait()
while True:
db_lock.acquire()
while not dump_requested: db_cond.wait()
- db_pickle =
cPickle.dumps(db, cP
ickle.HIGHEST_PROTOCOL)
+ db_pickle =
pickle.dumps(db, p
ickle.HIGHEST_PROTOCOL)
dump_requested = False
db_lock.release()
try:
dump_requested = False
db_lock.release()
try:
@@
-190,7
+190,7
@@
It proceeds to handle dump requests forever."""
global db
try:
f = open(DB_FILE)
global db
try:
f = open(DB_FILE)
- try: db =
cP
ickle.load(f)
+ try: db =
p
ickle.load(f)
finally: f.close()
except IOError:
logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE)
finally: f.close()
except IOError:
logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE)