2 Utilities to create a setup made of 2 different builds
3 read : 2 different node flavours
4 so that each myplc knows about the nodeflavour/slicefamily supported
7 This would be the basics for running tests on multi-node myplc,
8 in particular for node upgrades
11 #################### WARNING
13 # this feature relies on a few assumptions that need to be taken care of
14 # more or less manually; this is based on the onelab.eu setup
16 # (*) the build host is expected to have /root/git-build.sh reasonably up-to-date
17 # with our build module, so we can locate partial-repo.sh
18 # this utility needs to be run on the build host so we can point at a PARTIAL-RPMS
19 # sub-repo that exposes the
20 # bootcd/bootstraps/ and the like rpms from one flavour to another
22 # a utility to create a bonding_plc_spec from
23 # a plc_spec and just a buildname
25 def onelab_bonding_spec (buildname):
27 # essentially generic ..
30 # visit the other build's test directory to figure its characteristics
31 with open ("../{}/arg-fcdistro".format(buildname)) as input:
32 fcdistro = input.read().strip()
33 with open ("../{}/arg-pldistro".format(buildname)) as input:
34 pldistro = input.read().strip()
35 with open ("../{}/arg-ips-bplc".format(buildname)) as input:
36 plc_box = input.read().strip().split()[0]
37 # e.g. http://build.onelab.eu/onelab//2015.03.15--f14/RPMS/x86_64
38 with open ("../{}/arg-arch-rpms-url".format(buildname)) as input:
39 arch_rpms_url = input.read().strip()
40 arch = arch_rpms_url.split('/')[-1]
41 build_www_host = arch_rpms_url.split('/')[2]
42 base_url = arch_rpms_url.replace("RPMS/{}".format(arch), "PARTIAL-RPMS")
45 build_www_git = '/root/git-build/'
46 build_www_dir = '/build/{}/{}'.format(pldistro, buildname)
55 from TestSsh import TestSsh
58 class TestBonding(object):
61 Holds details about a 'bonding' build
62 so we can configure the local myplc (test_plc)
63 for multi-flavour nodes and slices
64 options is a TestMain options
66 details for a bonding node (like hostname and IP) are
67 computed from the underlying Substrate object and
68 stored in arg-bonding-{buildname}
71 def __init__(self, test_plc, bonding_spec, substrate, options):
73 test_plc is one local TestPlc instance
74 bonding_spec is a dictionary that gives details on
75 the build we want to be bonding with
77 # the local build & plc is described in options
78 # the bonding build is described in bonding_spec
79 self.test_plc = test_plc
80 self.bonding_spec = bonding_spec
81 self.substrate = substrate
82 self.options = options
83 # a little hacky : minimal provisioning and modify plc_spec on the fly
87 return "{pldistro}-{fcdistro}-{arch}".format(**self.bonding_spec)
89 #################### provisioning
90 # store only hostname so it's either to set this manually
91 def persistent_name(self):
92 return "arg-bonding-{}".format(self.bonding_spec['buildname'])
93 def persistent_store(self):
94 with open(self.persistent_name(),'w') as f:
95 f.write("{}\n".format(self.vnode_hostname))
96 def persistent_load(self):
98 with open(self.persistent_name()) as f:
99 self.vnode_hostname = f.read().strip().split()[0]
100 self.vnode_ip = socket.gethostbyname(self.vnode_hostname)
106 # locate the first node in our own spec
107 site_spec = self.test_plc.plc_spec['sites'][0]
108 node_spec = site_spec['nodes'][0]
109 # find a free IP for node
110 if self.persistent_load():
111 print("Re-using bonding nodes attributes from {}".format(self.persistent_name()))
113 print("Sensing for an avail. IP (Could not load from {})".format(self.persistent_name()))
114 vnode_pool = self.substrate.vnode_pool
117 hostname, mac = vnode_pool.next_free()
118 self.vnode_hostname = self.substrate.fqdn(hostname)
119 self.vnode_ip = vnode_pool.get_ip(hostname)
121 self.persistent_store()
123 raise Exception("Cannot provision bonding node")
125 print("Bonding on node {} - {}".format(self.vnode_hostname, self.vnode_ip))
127 # implement the node on another IP
128 node_spec['node_fields']['hostname'] = self.vnode_hostname
129 node_spec['interface_fields']['ip'] = self.vnode_ip
130 # with the node flavour that goes with bonding plc
131 for tag in ['arch', 'fcdistro', 'pldistro']:
132 node_spec['tags'][tag] = self.bonding_spec[tag]
134 #################### steps
135 def init_partial(self):
137 runs partial-repo.sh for the bonding build
138 this action takes place on the build host
140 test_ssh = TestSsh (self.bonding_spec['build_www_host'])
141 command = "{build_www_git}/partial-repo.sh -i {build_www_dir}".\
142 format(**self.bonding_spec)
144 return test_ssh.run (command, dry_run = self.options.dry_run) == 0
149 creates a separate yum.repo file in the myplc box
150 where our own build runs, and that points at the partial
151 repo for the bonding build
154 # create a .repo file locally
155 yumrepo_contents = """
157 name=Partial repo from bonding build {buildname}
161 """.format(**self.bonding_spec)
163 yumrepo_local = '{buildname}-partial.repo'.\
164 format(**self.bonding_spec)
165 with open(yumrepo_local, 'w') as yumrepo_file:
166 yumrepo_file.write(yumrepo_contents)
167 utils.header("(Over)wrote {}".format(yumrepo_local))
169 # push onto our myplc instance
170 test_ssh = TestSsh (self.test_plc.vserverip)
172 yumrepo_remote = '/etc/yum.repos.d/{bonding_buildname}-partial.repo'.\
173 format(bonding_buildname = self.bonding_spec['buildname'])
175 if test_ssh.copy_abs (yumrepo_local, yumrepo_remote,
176 dry_run=self.options.dry_run) != 0:
179 # xxx TODO looks like drupal also needs to be excluded
180 # from the 2 entries in building.repo
181 # otherwise subsequent yum update calls will fail
185 def install_rpms(self):
187 once the 2 operations above have been performed, we can
188 actually install the various rpms that provide support for the
189 nodeflavour/slicefamily offered byt the bonding build to our own build
192 test_ssh = TestSsh (self.test_plc.vserverip)
194 command1 = "yum -y update --exclude drupal"
195 if test_ssh.run (command1, dry_run = self.options.dry_run) != 0:
198 nodefamily = self.nodefamily()
199 extra_list = [ 'bootcd', 'nodeimage', 'noderepo' ]
201 extra_rpms = [ "{}-{}".format(rpm, nodefamily) for rpm in extra_list]
203 command2 = "yum -y install " + " ".join(extra_rpms)
204 if test_ssh.run (command2, dry_run = self.options.dry_run) != 0:
207 command3 = "/etc/plc.d/packages force"
208 if test_ssh.run (command3, dry_run = self.options.dry_run) != 0:
213 ### probably obsolete already
214 if __name__ == '__main__':
216 from TestPlc import TestPlc
218 from config_default import sample_test_plc_spec
219 test_plc_spec = sample_test_plc_spec()
220 test_plc = TestPlc (test_plc_spec)
223 print(test_plc.host_box)
225 from argparse import ArgumentParser
226 parser = ArgumentParser()
227 parser.add_argument ("-n", "--dry-run", dest='dry_run', default=False,
228 action='store_true', help="dry run")
229 parser.add_argument ("build_name")
230 args = parser.parse_args()
232 test_bonding = TestBonding (test_plc,
233 onelab_bonding_spec(args.build_name),
234 dry_run = args.dry_run)