2 Utilities to create a setup made of 2 different builds
3 read : 2 different node flavours
4 so that each myplc knows about the nodeflavour/slicefamily supported
7 This would be the basics for running tests on multi-node myplc,
8 in particular for node upgrades
11 #################### WARNING
13 # this feature relies on a few assumptions that need to be taken care of
14 # more or less manually; this is based on the onelab.eu setup
16 # (*) the build host is expected to have /root/git-build.sh reasonably up-to-date
17 # with our build module, so we can locate partial-repo.sh
18 # this utility needs to be run on the build host so we can point at a PARTIAL-RPMS
19 # sub-repo that exposes the
20 # bootcd/bootstraps/ and the like rpms from one flavour to another
22 # a utility to create a bonding_plc_spec from
23 # a plc_spec and just a buildname
25 def onelab_bonding_spec (buildname):
27 # essentially generic ..
30 # visit the other build's test directory to figure its characteristics
31 with open ("../{}/arg-fcdistro".format(buildname)) as input:
32 fcdistro = input.read().strip()
33 with open ("../{}/arg-pldistro".format(buildname)) as input:
34 pldistro = input.read().strip()
35 with open ("../{}/arg-ips-bplc".format(buildname)) as input:
36 plc_box = input.read().strip().split()[0]
37 # e.g. http://build.onelab.eu/onelab//2015.03.15--f14/RPMS/x86_64
38 with open ("../{}/arg-arch-rpms-url".format(buildname)) as input:
39 arch_rpms_url = input.read().strip()
40 arch = arch_rpms_url.split('/')[-1]
41 build_www_host = arch_rpms_url.split('/')[2]
42 base_url = arch_rpms_url.replace("RPMS/{}".format(arch), "PARTIAL-RPMS")
45 build_www_git = '/root/git-build/'
46 build_www_dir = '/build/{}/{}'.format(pldistro, buildname)
54 from TestSsh import TestSsh
57 class TestBonding(object):
60 Holds details about a 'bonding' build
61 so we can configure the local myplc (test_plc)
62 for multi-flavour nodes and slices
63 options is a TestMain options
65 details for a bonding node (like hostname and IP) are
66 computed from the underlying Substrate object and
67 stored in arg-bonding-{buildname}
70 def __init__(self, test_plc, bonding_spec, substrate, options):
72 test_plc is one local TestPlc instance
73 bonding_spec is a dictionary that gives details on
74 the build we want to be bonding with
76 # the local build & plc is described in options
77 # the bonding build is described in bonding_spec
78 self.test_plc = test_plc
79 self.bonding_spec = bonding_spec
80 self.substrate = substrate
81 self.options = options
82 # a little hacky : minimal provisioning and modify plc_spec on the fly
86 return "{pldistro}-{fcdistro}-{arch}".format(**self.bonding_spec)
88 #################### provisioning
89 def persistent_name(self):
90 return "arg-bonding-{}".format(self.bonding_spec['buildname'])
91 def persistent_store(self):
92 with open(self.persistent_name(),'w') as f:
93 f.write("{} {}\n".format(self.vnode_hostname, self.vnode_ip))
94 def persistent_load(self):
96 with open(self.persistent_name()) as f:
97 self.vnode_hostname, self.vnode_ip = f.read().strip().split()
103 # locate the first node in our own spec
104 site_spec = self.test_plc.plc_spec['sites'][0]
105 node_spec = site_spec['nodes'][0]
106 # find a free IP for node
107 if self.persistent_load():
108 print("Re-using bonding nodes attributes from {}".format(self.persistent_name()))
110 print("Sensing for an avail. IP (Could not load from {})".format(self.persistent_name()))
111 vnode_pool = self.substrate.vnode_pool
114 hostname, mac = vnode_pool.next_free()
115 self.vnode_hostname = self.substrate.fqdn(hostname)
116 self.vnode_ip = vnode_pool.get_ip(hostname)
118 self.persistent_store()
120 raise Exception("Cannot provision bonding node")
122 print("Bonding on node {} - {}".format(self.vnode_hostname, self.vnode_ip))
124 # implement the node on another IP
125 node_spec['node_fields']['hostname'] = self.vnode_hostname
126 node_spec['interface_fields']['ip'] = self.vnode_ip
127 # with the node flavour that goes with bonding plc
128 for tag in ['arch', 'fcdistro', 'pldistro']:
129 node_spec['tags'][tag] = self.bonding_spec[tag]
131 #################### steps
132 def init_partial(self):
134 runs partial-repo.sh for the bonding build
135 this action takes place on the build host
137 test_ssh = TestSsh (self.bonding_spec['build_www_host'])
138 command = "{build_www_git}/partial-repo.sh -i {build_www_dir}".\
139 format(**self.bonding_spec)
141 return test_ssh.run (command, dry_run = self.options.dry_run) == 0
146 creates a separate yum.repo file in the myplc box
147 where our own build runs, and that points at the partial
148 repo for the bonding build
151 # create a .repo file locally
152 yumrepo_contents = """
154 name=Partial repo from bonding build {buildname}
158 """.format(**self.bonding_spec)
160 yumrepo_local = '{buildname}-partial.repo'.\
161 format(**self.bonding_spec)
162 with open(yumrepo_local, 'w') as yumrepo_file:
163 yumrepo_file.write(yumrepo_contents)
164 utils.header("(Over)wrote {}".format(yumrepo_local))
166 # push onto our myplc instance
167 test_ssh = TestSsh (self.test_plc.vserverip)
169 yumrepo_remote = '/etc/yum.repos.d/{bonding_buildname}-partial.repo'.\
170 format(bonding_buildname = self.bonding_spec['buildname'])
172 if test_ssh.copy_abs (yumrepo_local, yumrepo_remote,
173 dry_run=self.options.dry_run) != 0:
176 # xxx TODO looks like drupal also needs to be excluded
177 # from the 2 entries in building.repo
178 # otherwise subsequent yum update calls will fail
182 def install_rpms(self):
184 once the 2 operations above have been performed, we can
185 actually install the various rpms that provide support for the
186 nodeflavour/slicefamily offered byt the bonding build to our own build
189 test_ssh = TestSsh (self.test_plc.vserverip)
191 command1 = "yum -y update --exclude drupal"
192 if test_ssh.run (command1, dry_run = self.options.dry_run) != 0:
195 nodefamily = self.nodefamily()
196 extra_list = [ 'bootcd', 'nodeimage', 'noderepo' ]
198 extra_rpms = [ "{}-{}".format(rpm, nodefamily) for rpm in extra_list]
200 command2 = "yum -y install " + " ".join(extra_rpms)
201 if test_ssh.run (command2, dry_run = self.options.dry_run) != 0:
204 command3 = "/etc/plc.d/packages force"
205 if test_ssh.run (command3, dry_run = self.options.dry_run) != 0:
210 ### probably obsolete already
211 if __name__ == '__main__':
213 from TestPlc import TestPlc
215 from config_default import sample_test_plc_spec
216 test_plc_spec = sample_test_plc_spec()
217 test_plc = TestPlc (test_plc_spec)
220 print(test_plc.host_box)
222 from argparse import ArgumentParser
223 parser = ArgumentParser()
224 parser.add_argument ("-n", "--dry-run", dest='dry_run', default=False,
225 action='store_true', help="dry run")
226 parser.add_argument ("build_name")
227 args = parser.parse_args()
229 test_bonding = TestBonding (test_plc,
230 onelab_bonding_spec(args.build_name),
231 dry_run = args.dry_run)