# Copyright (C) 2013 INRIA
#
# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
import time
import tempfile
+_re_inet = re.compile("\d+:\s+(?P<name>[a-z0-9_-]+)\s+inet6?\s+(?P<inet>[a-f0-9.:/]+)\s+(brd\s+[0-9.]+)?.*scope\s+global.*")
+
logger = logging.getLogger("sshfuncs")
-def log(msg, level, out = None, err = None):
+def log(msg, level = logging.DEBUG, out = None, err = None):
if out:
msg += " - OUT: %s " % out
-
if err:
msg += " - ERROR: %s " % err
-
logger.log(level, msg)
if hasattr(os, "devnull"):
Special value that when given to rspawn in stderr causes stderr to
redirect to whatever stdout was redirected to.
"""
+ pass
class ProcStatus:
"""
hostbyname_cache = dict()
hostbyname_cache_lock = threading.Lock()
+def resolve_hostname(host):
+ ip = None
+
+ if host in ["localhost", "127.0.0.1", "::1"]:
+ p = subprocess.Popen(
+ "ip -o addr list",
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines = True,
+ )
+ stdout, stderr = p.communicate()
+ m = _re_inet.findall(stdout)
+ ip = m[0][1].split("/")[0]
+ else:
+ ip = socket.gethostbyname(host)
+
+ return ip
+
def gethostbyname(host):
global hostbyname_cache
global hostbyname_cache_lock
hostbyname = hostbyname_cache.get(host)
if not hostbyname:
with hostbyname_cache_lock:
- hostbyname = socket.gethostbyname(host)
+ hostbyname = resolve_hostname(host)
hostbyname_cache[host] = hostbyname
msg = " Added hostbyname %s - %s " % (host, hostbyname)
"""
global OPENSSH_HAS_PERSIST
if OPENSSH_HAS_PERSIST is None:
- proc = subprocess.Popen(["ssh","-v"],
+ proc = subprocess.Popen(
+ ["ssh", "-v"],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
- stdin = open("/dev/null","r") )
+ stdin = subprocess.DEVNULL,
+ universal_newlines = True,
+ )
out,err = proc.communicate()
proc.wait()
return s
else:
# unsafe string - escape
- def escp(c):
+ def escape(c):
if (32 <= ord(c) < 127 or c in ('\r','\n','\t')) and c not in ("'",'"'):
return c
else:
return "'$'\\x%02x''" % (ord(c),)
- s = ''.join(map(escp,s))
+ s = ''.join(map(escape, s))
return "'%s'" % (s,)
def eintr_retry(func):
@functools.wraps(func)
def rv(*p, **kw):
retry = kw.pop("_retry", False)
- for i in xrange(0 if retry else 4):
+ for i in range(0 if retry else 4):
try:
return func(*p, **kw)
- except (select.error, socket.error), args:
+ except (select.error, socket.error) as args:
if args[0] == errno.EINTR:
continue
else:
raise
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
continue
else:
args.extend(['-o', 'StrictHostKeyChecking=no'])
if gw:
- if gwuser:
- proxycommand = 'ProxyCommand=ssh %s@%s -W %%h:%%p' % (gwuser, gw)
- else:
- proxycommand = 'ProxyCommand=ssh %%r@%s -W %%h:%%p' % gw
+ proxycommand = _proxy_command(gw, gwuser, identity)
args.extend(['-o', proxycommand])
if agent:
stdout = stderr = stdin = None
return _retry_rexec(args, log_msg,
- stderr = stderr,
- stdin = stdin,
- stdout = stdout,
- env = env,
- retry = retry,
- tmp_known_hosts = tmp_known_hosts,
- blocking = blocking)
+ stderr = stderr,
+ stdin = stdin,
+ stdout = stdout,
+ env = env,
+ retry = retry,
+ tmp_known_hosts = tmp_known_hosts,
+ blocking = blocking)
def rcopy(source, dest,
port = None,
elif isinstance(source, str) and ':' in source:
remspec, path = source.split(':',1)
else:
- raise ValueError, "Both endpoints cannot be local"
+ raise ValueError("Both endpoints cannot be local")
user,host = remspec.rsplit('@',1)
# plain scp
tmp_known_hosts = None
args = ['scp', '-q', '-p', '-C',
+ # 2015-06-01 Thierry: I am commenting off blowfish
+ # as this is not available on a plain ubuntu 15.04 install
+ # this IMHO is too fragile, shoud be something the user
+ # decides explicitly (so he is at least aware of that dependency)
# Speed up transfer using blowfish cypher specification which is
# faster than the default one (3des)
- '-c', 'blowfish',
+ # '-c', 'blowfish',
# Don't bother with localhost. Makes test easier
'-o', 'NoHostAuthenticationForLocalhost=yes',
'-o', 'ConnectTimeout=60',
args.append('-P%d' % port)
if gw:
- if gwuser:
- proxycommand = 'ProxyCommand=ssh %s@%s -W %%h:%%p' % (gwuser, gw)
- else:
- proxycommand = 'ProxyCommand=ssh %%r@%s -W %%h:%%p' % gw
+ proxycommand = _proxy_command(gw, gwuser, identity)
args.extend(['-o', proxycommand])
if recursive:
blocking = True)
def rspawn(command, pidfile,
- stdout = '/dev/null',
- stderr = STDOUT,
- stdin = '/dev/null',
- home = None,
- create_home = False,
- sudo = False,
- host = None,
- port = None,
- user = None,
- gwuser = None,
- gw = None,
- agent = None,
- identity = None,
- server_key = None,
- tty = False):
+ stdout = '/dev/null',
+ stderr = STDOUT,
+ stdin = '/dev/null',
+ home = None,
+ create_home = False,
+ sudo = False,
+ host = None,
+ port = None,
+ user = None,
+ gwuser = None,
+ gw = None,
+ agent = None,
+ identity = None,
+ server_key = None,
+ tty = False,
+ strict_host_checking = True):
"""
Spawn a remote command such that it will continue working asynchronously in
background.
:param sudo: Flag forcing execution with sudo user
:type sudo: bool
- :rtype: touple
+ :rtype: tuple
(stdout, stderr), process
agent = agent,
identity = identity,
server_key = server_key,
- tty = tty ,
+ tty = tty,
+ strict_host_checking = strict_host_checking ,
)
if proc.wait():
- raise RuntimeError, "Failed to set up application on host %s: %s %s" % (host, out,err,)
+ raise RuntimeError("Failed to set up application on host %s: %s %s" % (host, out,err,))
return ((out, err), proc)
@eintr_retry
def rgetpid(pidfile,
- host = None,
- port = None,
- user = None,
- gwuser = None,
- gw = None,
- agent = None,
- identity = None,
- server_key = None):
+ host = None,
+ port = None,
+ user = None,
+ gwuser = None,
+ gw = None,
+ agent = None,
+ identity = None,
+ server_key = None,
+ strict_host_checking = True):
"""
Returns the pid and ppid of a process from a remote file where the
information was stored.
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
if proc.wait():
if out:
try:
- return map(int,out.strip().split(' ',1))
+ return [ int(x) for x in out.strip().split(' ',1)) ]
except:
# Ignore, many ways to fail that don't matter that much
return None
gw = None,
agent = None,
identity = None,
- server_key = None):
+ server_key = None,
+ strict_host_checking = True):
"""
Returns a code representing the the status of a remote process
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
if proc.wait():
sudo = False,
identity = None,
server_key = None,
- nowait = False):
+ nowait = False,
+ strict_host_checking = True):
"""
Sends a kill signal to a remote process.
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
# wait, don't leave zombies around
return (out, err), proc
def _retry_rexec(args,
- log_msg,
- stdout = subprocess.PIPE,
- stdin = subprocess.PIPE,
- stderr = subprocess.PIPE,
- env = None,
- retry = 3,
- tmp_known_hosts = None,
- blocking = True):
-
- for x in xrange(retry):
+ log_msg,
+ stdout = subprocess.PIPE,
+ stdin = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ env = None,
+ retry = 3,
+ tmp_known_hosts = None,
+ blocking = True):
+
+ for x in range(retry):
+ # display command actually invoked when debug is turned on
+ message = " ".join( [ "'{}'".format(arg) for arg in args ] )
+ log("sshfuncs: invoking {}".format(message), logging.DEBUG)
# connects to the remote host and starts a remote connection
- proc = subprocess.Popen(args,
- env = env,
- stdout = stdout,
- stdin = stdin,
- stderr = stderr)
-
+ proc = subprocess.Popen(
+ args,
+ env = env,
+ stdout = stdout,
+ stdin = stdin,
+ stderr = stderr,
+ universal_newlines = True,
+ )
# attach tempfile object to the process, to make sure the file stays
# alive until the process is finished with it
proc._known_hosts = tmp_known_hosts
# The method communicate was re implemented for performance issues
# when using python subprocess communicate method the ssh commands
# last one minute each
+ #log("BEFORE communicate", level=logging.INFO); import time; beg=time.time()
out, err = _communicate(proc, input=None)
+ #log("AFTER communicate - {}s".format(time.time()-beg), level=logging.INFO)
elif stdout:
out = proc.stdout.read()
time.sleep(t)
continue
break
- except RuntimeError, e:
+ except RuntimeError as e:
msg = " rexec EXCEPTION - TIMEOUT -> %s \n %s" % ( e.args, log_msg )
log(msg, logging.DEBUG, out, err)
try:
rlist, wlist, xlist = select.select(read_set, write_set, [], select_timeout)
- except select.error,e:
+ except select.error as e:
if e[0] != 4:
raise
else:
write_set.remove(proc.stdin)
if proc.stdout in rlist:
- data = os.read(proc.stdout.fileno(), 1024)
- if data == "":
+ # python2 version used to do this
+ # data = os.read(proc.stdout.fileno(), 1024)
+ # however this always returned bytes...
+ data = proc.stdout.read()
+ log('we have read {}'.format(data))
+ # data should be str and not bytes because we use
+ # universal_lines = True, but to be clean
+ # instead of saying data != ""
+ if not data:
+ log('closing stdout')
proc.stdout.close()
read_set.remove(proc.stdout)
stdout.append(data)
if proc.stderr in rlist:
- data = os.read(proc.stderr.fileno(), 1024)
- if data == "":
+ # likewise (see above)
+ # data = os.read(proc.stderr.fileno(), 1024)
+ data = proc.stderr.read()
+ if not data:
proc.stderr.close()
read_set.remove(proc.stderr)
stderr.append(data)
if stderr is not None:
stderr = ''.join(stderr)
- # Translate newlines, if requested. We cannot let the file
- # object do the translation: It is based on stdio, which is
- # impossible to combine with select (unless forcing no
- # buffering).
- if proc.universal_newlines and hasattr(file, 'newlines'):
- if stdout:
- stdout = proc._translate_newlines(stdout)
- if stderr:
- stderr = proc._translate_newlines(stderr)
+# # Translate newlines, if requested. We cannot let the file
+# # object do the translation: It is based on stdio, which is
+# # impossible to combine with select (unless forcing no
+# # buffering).
+# if proc.universal_newlines and hasattr(file, 'newlines'):
+# if stdout:
+# stdout = proc._translate_newlines(stdout)
+# if stderr:
+# stderr = proc._translate_newlines(stderr)
if killed and err_on_timeout:
errcode = proc.poll()
- raise RuntimeError, ("Operation timed out", errcode, stdout, stderr)
+ raise RuntimeError("Operation timed out", errcode, stdout, stderr)
else:
if killed:
proc.poll()
proc.wait()
return (stdout, stderr)
+def _proxy_command(gw, gwuser, gwidentity):
+ """
+ Constructs the SSH ProxyCommand option to add to the SSH command to connect
+ via a proxy
+ :param gw: SSH proxy hostname
+ :type gw: str
+
+ :param gwuser: SSH proxy username
+ :type gwuser: str
+
+ :param gwidentity: SSH proxy identity file
+ :type gwidentity: str
+
+
+ :rtype: str
+
+ returns the SSH ProxyCommand option.
+ """
+
+ proxycommand = 'ProxyCommand=ssh -q '
+ if gwidentity:
+ proxycommand += '-i %s ' % os.path.expanduser(gwidentity)
+ if gwuser:
+ proxycommand += '%s' % gwuser
+ else:
+ proxycommand += '%r'
+ proxycommand += '@%s -W %%h:%%p' % gw
+
+ return proxycommand