Merge remote-tracking branch 'origin/5.0' into bootmanager-vender bootmanager-vender
authorGit User <support@planet-lab.org>
Tue, 14 Oct 2014 07:17:39 +0000 (03:17 -0400)
committerGit User <support@planet-lab.org>
Tue, 14 Oct 2014 07:17:39 +0000 (03:17 -0400)
55 files changed:
Makefile [new file with mode: 0644]
README [new file with mode: 0644]
bootmanager.spec [new file with mode: 0644]
build.sh [new file with mode: 0755]
documentation/boot-manager-pdn.pdf [new file with mode: 0644]
documentation/boot-manager-pdn.xml [new file with mode: 0644]
documentation/pdn-pdf-style.xsl [new file with mode: 0644]
dummy_bootloader/Makefile [new file with mode: 0644]
dummy_bootloader/dummy_bootloader [new file with mode: 0644]
dummy_bootloader/dummy_bootloader.S [new file with mode: 0644]
nodeconfig/boot/getnodeid.php [new file with mode: 0755]
nodeconfig/boot/index.php [new file with mode: 0755]
nodeconfig/boot/upload-bmlog.php [new file with mode: 0755]
plc.d/bootmanager [new file with mode: 0755]
source/BootAPI.py [new file with mode: 0644]
source/BootManager.py [new file with mode: 0755]
source/BootServerRequest.py [new file with mode: 0644]
source/COPYRIGHT [new file with mode: 0644]
source/Exceptions.py [new file with mode: 0644]
source/ModelOptions.py [new file with mode: 0644]
source/RunlevelAgent.py [new file with mode: 0755]
source/configuration [new file with mode: 0644]
source/debug_files/debug_root_ssh_key [new file with mode: 0644]
source/debug_files/sshd_config [new file with mode: 0644]
source/libc-opendir-hack.c [new file with mode: 0644]
source/notify_messages.py [new file with mode: 0644]
source/setup_bash_history_scripts.sh [new file with mode: 0644]
source/steps/AuthenticateWithPLC.py [new file with mode: 0644]
source/steps/ChainBootNode.py [new file with mode: 0644]
source/steps/CheckForNewDisks.py [new file with mode: 0644]
source/steps/CheckHardwareRequirements.py [new file with mode: 0644]
source/steps/ConfirmInstallWithUser.py [new file with mode: 0644]
source/steps/GetAndUpdateNodeDetails.py [new file with mode: 0644]
source/steps/InitializeBootManager.py [new file with mode: 0644]
source/steps/InstallBootstrapFS.py [new file with mode: 0644]
source/steps/InstallInit.py [new file with mode: 0644]
source/steps/InstallPartitionDisks.py [new file with mode: 0644]
source/steps/InstallUninitHardware.py [new file with mode: 0644]
source/steps/InstallWriteConfig.py [new file with mode: 0644]
source/steps/MakeInitrd.py [new file with mode: 0644]
source/steps/ReadNodeConfiguration.py [new file with mode: 0644]
source/steps/SendHardwareConfigToPLC.py [new file with mode: 0644]
source/steps/StartDebug.py [new file with mode: 0644]
source/steps/StartRunlevelAgent.py [new file with mode: 0644]
source/steps/StopRunlevelAgent.py [new file with mode: 0644]
source/steps/UpdateBootStateWithPLC.py [new file with mode: 0644]
source/steps/UpdateLastBootOnce.py [new file with mode: 0644]
source/steps/UpdateNodeConfiguration.py [new file with mode: 0644]
source/steps/UpdateRunLevelWithPLC.py [new file with mode: 0644]
source/steps/ValidateNodeInstall.py [new file with mode: 0644]
source/steps/WriteModprobeConfig.py [new file with mode: 0644]
source/steps/WriteNetworkConfig.py [new file with mode: 0644]
source/steps/__init__.py [new file with mode: 0644]
source/systeminfo.py [new file with mode: 0755]
source/utils.py [new file with mode: 0644]

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..c2540fa
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,52 @@
+#
+
+########## sync
+# 2 forms are supported
+# (*) if your plc root context has direct ssh access:
+# make sync PLC=private.one-lab.org
+# (*) otherwise, for test deployments, use on your testmaster
+# $ run export
+# and cut'n paste the export lines before you run make sync
+
+PLCHOST ?= testplc.onelab.eu
+
+ifdef PLC
+SSHURL:=root@$(PLC):/
+SSHCOMMAND:=ssh root@$(PLC)
+else
+ifdef PLCHOSTLXC
+SSHURL:=root@$(PLCHOST):/var/lib/lxc/$(GUESTNAME)/rootfs
+SSHCOMMAND:=ssh root@$(PLCHOSTLXC) ssh $(GUESTHOSTNAME)
+else
+ifdef PLCHOSTVS
+SSHURL:=root@$(PLCHOSTVS):/vservers/$(GUESTNAME)
+SSHCOMMAND:=ssh root@$(PLCHOSTVS) vserver $(GUESTNAME) exec
+endif
+endif
+endif
+
+LOCAL_RSYNC_EXCLUDES   := --exclude '*.pyc' 
+RSYNC_EXCLUDES         := --exclude .svn --exclude .git --exclude '*~' --exclude TAGS $(LOCAL_RSYNC_EXCLUDES)
+RSYNC_COND_DRY_RUN     := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,)
+RSYNC                  := rsync -a -v $(RSYNC_COND_DRY_RUN) $(RSYNC_EXCLUDES)
+
+DEPLOYMENT ?= regular
+
+sync:
+ifeq (,$(SSHURL))
+       @echo "sync: I need more info from the command line, e.g."
+       @echo "  make sync PLC=boot.planetlab.eu"
+       @echo "  make sync PLCHOSTVS=.. GUESTNAME=.."
+       @echo "  make sync PLCHOSTLXC=.. GUESTNAME=.. GUESTHOSTNAME=.."
+       @exit 1
+else
+       $(SSHCOMMAND) mkdir -p /usr/share/bootmanager/$(DEPLOYMENT)
+       +$(RSYNC) build.sh source $(SSHURL)/usr/share/bootmanager/$(DEPLOYMENT)
+       $(SSHCOMMAND) service plc start bootmanager
+endif
+
+##########
+tags:
+       find . -type f | egrep -v 'TAGS|/\.svn/|\.git/|~$$' | xargs etags
+
+.PHONY: tags
diff --git a/README b/README
new file mode 100644 (file)
index 0000000..f86a201
--- /dev/null
+++ b/README
@@ -0,0 +1,57 @@
+Starting with 5.0
+
+===
+the entry point for the boot CD is installed in the location below
+this is expected to return the signed shell script that runs the bootmanager
+    /var/www/html/boot/index.php
+the bootmanager rpm installs most of its stuff in
+    /usr/share/bootmanager/regular/
+and the default signed script is created as
+    /var/www/html/boot/bootmanager_regular.sh.sgn
+
+===
+If you need to create an alternate version for a specific 'deployment'
+tag, say fror 'alpha' nodes, you need to
+
+* create /usr/share/bootmanager/alpha/
+* populate its build.sh and source subdirs, just as in 'regular'
+* and run service plc start bootmanager
+
+---
+Alternatively from a source tree, if you have root ssh access to the plc box, you can do
+$ cd svntree/BootManager/trunk
+$ make sync
+.. see the usage text
+$ make sync DEPOLOYMENT=alpha PLC=boot.onelab.eu
+
+which should do everything needed
+
+===
+It's also possible to attach a (signed) bm script to a unique node by renaming a .sgn into
+/var/www/html/boot/bootmanager_<hostname>.sh.sgn
+
+======================================== bootstrapfs
+the actual name of the bootstrapfs('s) that need to be installed is
+computed in the PLCAPI, namely the GetNodeFlavour method 
+
+In essence:
+
+* if the node has the 'deployment' tag set, then we fetch
+https://.../boot/bootstrafs-<deployment>.tar.bz2
+* otherwise we get
+https://.../boot/bootstrafs-<nodefamily>.tar.bz2
+
+* as far as extensions, we fetch
+https://.../boot/bootstrafs-<extension>-<fcdistro>-<arch>.tar.bz2
+  for all defined extensions
+
+* also if the plainbootstrapfs tag is set, then the uncompressed files
+  are fetched
+
+======================================== yum repo
+
+* if the node has the 'deployment' tag set, then we use
+http://.../install-rpms/alpha
+
+* otherwise 
+http://.../install-rpms/<nodefamily>
diff --git a/bootmanager.spec b/bootmanager.spec
new file mode 100644 (file)
index 0000000..398276a
--- /dev/null
@@ -0,0 +1,284 @@
+#
+%define name bootmanager
+%define version 5.0
+%define taglevel 24
+
+%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
+
+Vendor: PlanetLab
+Packager: PlanetLab Central <support@planet-lab.org>
+Distribution: PlanetLab %{plrelease}
+URL: %{SCMURL}
+
+Summary: The PlanetLab Boot Manager
+Name: %{name}
+Version: %{version}
+Release: %{release}
+License: BSD
+Group: System Environment/Base
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+BuildArch: noarch
+
+Requires: tar, gnupg, sharutils, bzip2, pypcilib
+# need the apache user at install-time
+Requires: httpd 
+
+Requires: PLCAPI >= 5.0
+# the python code packaged in these are shipped on the node as well
+Requires: pypcilib pyplnet
+
+### avoid having yum complain about updates, as stuff is moving around
+# plc.d/bootmanager
+Conflicts: myplc <= 4.3
+# nodeconfig/boot/*
+Conflicts: nodeconfig <= 4.3
+
+AutoReqProv: no
+%define debug_package %{nil}
+
+%description
+The PlanetLab Boot Manager securely authenticates and boots PlanetLab
+nodes.
+
+%prep
+%setup -q
+
+%build
+gcc -shared -fPIC -ldl -Os -o source/libc-opendir-hack.so source/libc-opendir-hack.c
+
+%install
+rm -rf $RPM_BUILD_ROOT
+
+# Install source so that it can be rebuilt
+find build.sh source | cpio -p -d -u $RPM_BUILD_ROOT/%{_datadir}/%{name}/regular/
+
+install -m 644 README  $RPM_BUILD_ROOT/%{_datadir}/%{name}/README
+
+# formerly in the nodeconfig module
+install -D -m 755 nodeconfig/boot/index.php $RPM_BUILD_ROOT/var/www/html/boot/index.php
+install -D -m 755 nodeconfig/boot/upload-bmlog.php $RPM_BUILD_ROOT/var/www/html/boot/upload-bmlog.php
+install -D -m 755 nodeconfig/boot/getnodeid.php $RPM_BUILD_ROOT/var/www/html/boot/getnodeid.php
+
+# formerly in the MyPLC module
+install -D -m 755 plc.d/bootmanager $RPM_BUILD_ROOT/etc/plc.d/bootmanager
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+# initialize the boot manager upload area
+mkdir -p /var/log/bm
+chown apache:apache /var/log/bm
+chmod 700 /var/log/bm
+
+%files
+%defattr(-,root,root,-)
+%{_datadir}/%{name}
+/var/www/html/boot/index.php
+/var/www/html/boot/upload-bmlog.php
+/var/www/html/boot/getnodeid.php
+/etc/plc.d/bootmanager
+
+%changelog
+* Fri Aug 31 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-24
+- run parted with --script to avoid it to hang
+
+* Mon Jul 09 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-23
+- added support for disks larger than 2Tb using gpt instead of msdos
+
+* Tue May 15 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-22
+- bootmanager log clearly states duration of download and extraction of node image
+
+* Fri Apr 13 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-21
+- no significant change, just checkpoint as 5.1 is addressing lxc
+
+* Thu Jul 07 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-20
+- be more explicit on the node conf_file actually used
+- did this after a former PLC node tried to boot at PLE with its PLC plnode.txt still on a usb stick
+
+* Fri Jun 10 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-19
+- nicer log - was intended for previous tag
+
+* Wed Jun 08 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-18
+- {Start,Stop,}RunLevelAgent now ship with bootmanager
+- new UpdateLastBootOnce
+- root_size bumped to 14Gb which is more in line with modern h/w
+- more safely tries to umount /dev/ and /sys
+- support for raid partitions
+- mkswap -f
+- blacklist files from /etc/modprobe.conf/* instead
+
+* Thu Feb 17 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-17
+- on install of boostrapfs, keep track in /bm-install.log with date & flavour
+
+* Sun Jan 23 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-16
+- for f14 : try to mount /dev as devtmpfs before bind-mounting to on the hdd's /dev
+- fix for chosing version of parted - for f14
+- added support for virtio deveices in /dev/vd
+- fixed scanning of new disks
+- slightly reviewed logs - default mode is verbose
+- removed deprecated mkinitrd.sh
+
+* Fri Dec 10 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-15
+- Fix problems caused by shell redirection
+
+* Thu Dec 09 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-5.0-14
+- tag 5.0-13 is broken
+
+* Wed Dec 08 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-13
+- Add support for uploading bash_history to a central server for failboot nodes.
+- Start to use subprocess instead of deprecated popen2 module
+- Fix typo for VSERVERS_SIZE
+- Add --allow-missing parameter to support different kernel configs with mkinitrd
+
+* Thu Aug 26 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-12
+- Revert "replace deprecated popen2 with subprocess"
+
+* Wed Aug 11 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-11
+- replace deprecated popen2 with subprocess and handle fsck return codes in a different code path
+
+* Fri Jul 30 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-10
+- Fix typo
+
+* Fri Jul 30 2010 Baris Metin <Talip-Baris.Metin@sophia.inria.fr> - bootmanager-5.0-9
+- fix typo
+
+* Wed Jul 28 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-8
+- disable time/count based filesystem checks
+
+* Tue Jul 27 2010 S.Çağlar Onur <caglar@cs.princeton.edu> - bootmanager-5.0-7
+- Fix new disk additions to LVM array
+
+* Wed Jul 07 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-5.0-6
+- bugfix for centos5/python2.4 missing hashlib
+
+* Mon Jul 05 2010 Baris Metin <Talip-Baris.Metin@sophia.inria.fr> - BootManager-5.0-5
+- check sha1sum of downloaded bootstrapfs
+- try recovering filesystem errors
+
+* Wed Jun 23 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-5.0-4
+- nicer initscript now uses 'action' from /etc/init.d/functions
+- bugfix for nodes with extensions
+
+* Fri Apr 02 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-5.0-3
+- create /etc/planetlab if missing
+- uses key 'ssh_rsa_key' in BootUpdateNode (requires PLCAPI-5.0.5)
+
+* Sat Feb 13 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-5.0-2
+- caglar's change to run MkInitrd right before kexec
+- plus clean up old code
+
+* Fri Jan 29 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-5.0-1
+- first working version of 5.0:
+- pld.c/, db-config.d/ and nodeconfig/ scripts should now sit in the module they belong to
+- uses PLCAPI's GetNodeFlavour to get all info on the bootstrapfs tarball(s) to install
+- installation layout on the plc side has changed, more consistent for e.g. 'alpha' bootmanagers
+
+* Sat Jan 09 2010 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-16
+- support for fedora 12
+
+* Sat Dec 19 2009 Marc Fiuczynski <mef@cs.princeton.edu> - BootManager-4.3-15
+- - support for when the node is behind a NAT
+- - clean up RUN_LEVEL support
+- - support for early sshd
+
+* Thu Nov 19 2009 Daniel Hokka Zakrisson <daniel@hozac.com> - BootManager-4.3-14
+- Add NAT model option for nodes which don't resolve properly.
+
+* Mon Sep 07 2009 Stephen Soltesz <soltesz@cs.princeton.edu> - BootManager-4.3-12
+- Moved some configuration values from BootServerRequest.py to 'configuration' file.
+- BootServerRequest takes the 'VARS' variable to read these values.
+- UPLOAD_LOG_SCRIPT can point optionally to the 'upload-bmlog.php' or 'monitor/upload'
+- (or any other interface that accepts a POST file)
+- build.sh bundles cacerts for boot and monitor servers (if present) to
+- authenticate the UPLOAD_LOG_SCRIPT.
+- Previously, these certs were re-used from the bootcd, now they are bundled
+- with BM.  This allows the BM to point to a completely different myplc if
+- desired, and it is still secure, because the original download is
+- authenticated.
+
+* Wed Aug 26 2009 Stephen Soltesz <soltesz@cs.princeton.edu> - BootManager-4.3-11
+- raise a single exception for nodes with authentication errors
+- fix syntax error in MakeInitrd.py
+
+* Mon Aug 10 2009 Stephen Soltesz <soltesz@cs.princeton.edu> - BootManager-4.3-10
+- Replace UpdateBootstate with UpdateRunlevel where appropriate.
+- Removed failboot and install from forced states.
+- Removed checks for initrd in Validate
+- Added extra messages for Validate failures, not-installed, no kernel, failed fsck
+- Added libc-opendir-hack.so patch from 3.2 branch for 2.6.12 bootcds on PL.
+
+* Mon Jun 29 2009 Marc Fiuczynski <mef@cs.princeton.edu> - BootManager-4.3-9
+- Special handling for "forcedeth" ethernet NIC.
+
+* Mon Jun 15 2009 Stephen Soltesz <soltesz@cs.princeton.edu> - BootManager-4.3-8
+- include a fix for public pl dealing with old/new boot images and root
+- environments
+
+* Fri May 15 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-7
+- review selection nodefamily at bootstrapfs install-time
+- now based on (1) tags (2) nodefamily and (3) defaults
+- this is required on very old bootcd
+
+* Wed Apr 29 2009 Marc Fiuczynski <mef@cs.princeton.edu> - BootManager-4.3-6
+- Use modprobe module to write out /etc/modprobe.conf.
+
+* Wed Apr 22 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-5
+- minor updates - using the new modprobe module *not* in this tag
+
+* Wed Apr 08 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-4
+- load device mapper if needed, for centos5-based bootcd variant
+
+* Wed Mar 25 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-3
+- renumbered 4.3
+- New step StartRunLevelAgent
+- various other tweaks
+
+* Wed Jan 28 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-2
+- most of the actual network config job moved to (py)plnet
+- support for RAWDISK
+- network interfaces deterministically sorted
+- does not use nodegroups anymore for getting node arch and other extensions
+- drop yum-based extensions
+- debug sshd started as early as possible
+- timestamped and uploadable logs (requires upload-bmlog.php from nodeconfig/)
+- cleaned up (drop support for bootcdv2)
+- still needs testing
+
+* Wed Sep 10 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-4.3-1
+- reflects new names from the data model
+
+* Sat May 24 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-3.2-7
+- dont unload cpqphp
+
+* Thu Apr 24 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-3.2-6
+- changes in the state automaton logic 
+- root+swap = 7G
+- usb-key threshhold increased to 17 G
+- bootstrafs selection logic altered - uses /etc/planetlab/nodefamily instead of GetPlcRelease
+
+* Wed Mar 26 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - BootManager-3.2-4 BootManager-3.2-5
+- renamed step InstallBootstrapRPM into InstallBootstrapFS
+- reviewed selection of bootstrapfs, based on nodegroups, for multi-arch deployment
+- import pypcimap rather than pypciscan
+- initial downlaoding of plc_config made more robust
+- root and /vservers file systems mounted ext3
+- calls to BootGetNodeDetails replaced with GetNodes/GetNodeNetworks
+- also seems to be using session-based authentication rather than former hmac-based one
+
+* Fri Feb 08 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-3.2-3 bootmanager-3.2-4
+- usage of wireless attributes fixed and tested
+- breakpoints cleaned up (no change for production)
+- less alarming message when floppy does not get unloaded
+
+* Thu Jan 31 2008 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - bootmanager-3.2-2 bootmanager-3.2-3
+- network config : support the full set of settings from ifup-wireless - see also http://svn.planet-lab.org/svn/MyPLC/tags/myplc-4.2-1/db-config
+- removes legacy calls to PlanetLabConf.py 
+- refrains from unloading floppy 
+- first draft of the dual-method for implementing extensions (bootstrapfs-like images or yum install)
+
+* Fri Sep  2 2005 Mark Huang <mlhuang@cotton.CS.Princeton.EDU> - 
+- Initial build.
+
+%define module_current_branch 4.3
diff --git a/build.sh b/build.sh
new file mode 100755 (executable)
index 0000000..ee4bd32
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+#
+# Builds bootmanager.sh[.sgn], which is the PlanetLab Boot Manager script.
+#
+# The bootmanager.sh script contains in it a uuencoded tarball of the
+# Boot Manager, customized for this PLC installation.
+#
+# Aaron Klingaman <alk@absarokasoft.com>
+# Mark Huang <mlhuang@cs.princeton.edu>
+# Marc E. Fiuczynski <mef@cs.princeton.edu>
+# Copyright (C) 2004-2007 The Trustees of Princeton University
+#
+
+# Source PLC configuration
+. /etc/planetlab/plc_config
+
+# Do not tolerate errors
+set -e
+
+# this is set by plc.d/bootmanager
+DEPLOYMENT=$1
+
+BOOTSTRAPDIR="/boot"
+
+# Change to our source directory
+cd $(dirname $0)
+
+# Source bootmanager configuration
+. source/configuration
+
+# Write boot script. nodeconfig/boot/index.php retrieves the contents of this script
+# after checking the node id
+
+BMDIR=/var/www/html/boot
+mkdir -p $BMDIR
+
+DEST_SCRIPT="$BMDIR/bootmanager_${DEPLOYMENT}.sh"
+# Remove the old version or any sym links prior to re-writing
+rm -f ${DEST_SCRIPT}
+rm -f ${DEST_SCRIPT}.sgn
+
+
+# hard code 443 here.
+sed -i -e "s@^BOOT_API_SERVER.*@BOOT_API_SERVER=https://$PLC_API_HOST:443/$PLC_API_PATH/@" source/configuration
+
+sed -i -e "s@^BOOT_SERVER.*@BOOT_SERVER=$PLC_BOOT_HOST@" source/configuration
+if [ "$PLC_MONITOR_ENABLED" = "1" ]; then
+    MONITOR_SERVER=$PLC_MONITOR_HOST
+else
+    MONITOR_SERVER=$PLC_BOOT_HOST
+fi
+sed -i -e "s@^MONITOR_SERVER.*@MONITOR_SERVER=$MONITOR_SERVER@" source/configuration
+
+install -D -m 644 $PLC_BOOT_CA_SSL_CRT source/cacert/$PLC_BOOT_HOST/cacert.pem
+if [ -f "$PLC_MONITOR_CA_SSL_CRT" ] ; then 
+       install -D -m 644 "$PLC_MONITOR_CA_SSL_CRT" source/cacert/$PLC_MONITOR_HOST/cacert.pem
+fi
+
+# Replace the default debug SSH key
+if [ -f "$PLC_DEBUG_SSH_KEY_PUB" ] ; then
+    install -D -m 644 "$PLC_DEBUG_SSH_KEY_PUB" source/debug_files/debug_root_ssh_key
+fi
+
+# Add python code from the following packages
+# make sure they are in the 'Requires' header of the specfile
+required_rpms="pypcilib pyplnet"
+extra_libs=`mktemp -d "/tmp/.bootmanager.XXXXXX"`
+mkdir $extra_libs/source
+cp -p $(rpm -ql $required_rpms | grep -v '\.py[co]$') $extra_libs/source
+
+
+########## create the bootmanager script
+cat <<EOF > $DEST_SCRIPT
+#!/bin/bash
+#
+# PlanetLab Boot Manager $VERSION
+#
+# DO NOT EDIT. Generated by $USER@$HOSTNAME at
+# $(date) 
+#
+
+# Do not tolerate errors
+set -e
+
+(/usr/bin/uudecode | /bin/tar -C /tmp -xj) << _EOF_
+EOF
+
+
+# Embed the uuencoded tarball in the script
+tar -cj source/ -C $extra_libs source/ | uuencode -m - >> $DEST_SCRIPT
+
+# wrap up the script
+echo '_EOF_' >> $DEST_SCRIPT
+echo 'cd /tmp/source' >> $DEST_SCRIPT
+echo 'chmod +x BootManager.py && ./BootManager.py' >> $DEST_SCRIPT
+
+# Remove temp directory
+rm -fr $extra_libs
+
+# Sign the whole script, if the keyring is on this machine.
+if [ -f "$PLC_ROOT_GPG_KEY" -a -f "$PLC_ROOT_GPG_KEY_PUB" ] ; then
+    gpg --homedir=/root \
+       --no-default-keyring \
+       --keyring "$PLC_ROOT_GPG_KEY_PUB" \
+       --secret-keyring "$PLC_ROOT_GPG_KEY" \
+       --yes --sign --output $DEST_SCRIPT.sgn \
+       $DEST_SCRIPT
+else
+    echo "Warning: Remember to sign $PWD/$DEST_SCRIPT!" >&2
+fi
+
diff --git a/documentation/boot-manager-pdn.pdf b/documentation/boot-manager-pdn.pdf
new file mode 100644 (file)
index 0000000..19999d8
--- /dev/null
@@ -0,0 +1,567 @@
+%PDF-1.3
+%ª«¬­
+4 0 obj
+<< /Type /Info
+/Producer (FOP 0.20.5) >>
+endobj
+5 0 obj
+<< /Length 2168 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gb"/+h/D%+&:`#5Tspj`W_&pKLLF9uCfn0L$T\B<=SG'RZqXLH4c)enrUhC(g,qDo$>eOg(//%`G9SKmM>+@OTt=W9I/,o4Ou+^_'J*9$;&BIpqB?9t(NC#.s#>g3)J475jCK(gBb;o)_/GZu%=&^O:Pp?tb,EI8bM*dg,$s6)X1m_$\=0$E_e[[`$[rU%5K^U])l-oBmX;aA/6ni'aE&%b8Dik>mPm9dPfLW%:G$-n]Z'6i;Y?]ZUe8M69UqE_`pNYAlTX*HPgr0@(6W;)k1E3:`/4$ckY>)L7@k3%;O++<b?f;#(<9UH/2^EXX3*,Wb(J#RY>!#:dE@9r_j2ub21,t`-3D<qm:B4IoQ8?ZN*0YE*H,Z*LP9Nt;pIX<Hj/e=aN2%0S>W;c2;dg]F#mLb[O'0i2b-Z6=C2ipqSPkN<QlisX_%TNSLeRlKW/;:^5(#!aW9Vb$P`JEODR5B0knP<q`;%&>pBTA<GhO0M:8=Q/ok_^isk>smXTl3NXtFbY`U]WB-+^oj(bGAZKQc"-?tUgR@lDQn8[8_Z$Gu(hoJEM-f84d^:JUa%!:HWK>k2&o#uDfV?;2n+-^_7Ve\b]_*F?`=1j.qE#thCMB/k$(K:g[EBSPentCF30%!.2;!c)0/0d_O@8asM/efUBOr.#0=CcuLm?%FU3'DrH;7tj]aHl_D-mDNG[#1Zs.c"nL!ob"VSF>YYSS;+QNOl'lH/t)X*rS/7Vl>:<.?SCP.OrV7$2gJDh3KhQo!KZX<0ZQ(_]t4)qD=;-S7J(/o$UDG!gIbclh>2j`]V!/V;h^GM:gjN7im'sBSLl6>Z"9t5jEplYTMe>qcE&V/?_Xi[p[4(M`\GA"'/[b1cn.Z\UEG!7o'pa)Q@E\,8JKtS4J(qY.jkP854K-HK5Zs8i.EN."bA'?f"d*>3R@!"`E(7q,_;C9$_@?7ilsJBqRsF-qPQcV=PC3[X;&t#s,Oe*dQhokCj==V3<_N,=kbd/5<g^S*/p>WB.Y"*dQho';?2IE=sR`;rES[)n"t9-*.-fD=Vb;ifQ_PrD/*/9tc1P.Y`=*]A%?&lGa0L.6K>a-W)C5B()t^M@)SGo#RT((@u)?@ik'-^mPBhniXO#Js,ESVL%<c7o%_"P@:(b0^Rc<b_sL>_VmA?fKRQS,)kS)H7Hp\jH2rFUmY\TB!N&t$s\F(AeYXBOjb8Tj`fVd_k,NM?b:RDd:\.AH'FNROVX$@k-mbVi=V,5o&uiHOPW;i#TP@K%!9Bih%%,1S2Bntj,lhQrC8E'J7Gt<FP0R+mdP+h(1#D-J+r)f7im(@(90XN'1kFmar[?A8]iSX$R\0\R"HiI$CIUL"'JB^4:[Q1FV>ouEj"RP68BJ-f,\-0:rjp#d=SGPGdk-hrV;Jn-9$l!e47D<cj[u-U_UM%geg9-m2d%si=669AQ8gooTFaUF_[`8P>&.9*fgNk6nm&/8Q[![W!/Z98BXcf-ET%kAO^M6TER:;eu5<Ze_shV6S4%Q5eToGA6NO%;Tm\!eiqK=d:lZ_^>I:`"%72?5YFbsP0F@<3bnph4uY/lAc6^Aqc"nNnh="p`7QQ^&/C&V88)4PaXkG+6e/caPu[K=l0K.eXepGi?HUdKcIB]H2aCR:c3j4KZSd:KotttM!3YY.@do-ETOBhfO84E'S<fDj?C3[O+*K^U5AYDHH`[QSULGi#erg:>ZY]<1*]h!nlXhOIFXXT++TbS]7&6)&FHYU`-a9V@=#K'>8IORYYSN8#'>)H03DpH&bVR+ErE,!?OVsj,FLSZVI:hsQP]K/`X$HM5dhZ_2C<NNY*MPtC\>ELioE&(25f32]c\t`Rs-"YO".i(S8!UW-PTF,Z%a?e6/pZYW<5_;I(p(5Eo<M4%9I9>j*W9dGV9uU/\Xu`(FaRu$V$\WabBR]#Is>\'d7&h;O01<kot""6Y>5m5.DNqG"(=(RLMD+W?Wd_&a%7;[_7L^r@qV\ZLle`p-:VO"_H:r%D[#^0r1u/qXBNNcHDW,"ntYkY9gqk3Q'DjdgU-#M;69m+gU%#/Cch9i!=]sZ[H+2pe%-G^(e35-EY<Eb!X6k*8_U%I(P*\.'Y<`"LNBpZB*J'lY0BZFk9QoLJ<GSLEfo&h/-h'en=a]h[cT4uR$1>[rJA'5;R)[aYVKoJ&#ki.>?q(8euaU6~>
+endstream
+endobj
+6 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 5 0 R
+/Annots 7 0 R
+>>
+endobj
+7 0 obj
+[
+8 0 R
+10 0 R
+12 0 R
+14 0 R
+16 0 R
+18 0 R
+20 0 R
+22 0 R
+24 0 R
+26 0 R
+28 0 R
+30 0 R
+32 0 R
+34 0 R
+36 0 R
+38 0 R
+]
+endobj
+8 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 497.439 169.43 487.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 9 0 R
+/H /I
+>>
+endobj
+10 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 486.439 182.22 476.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 11 0 R
+/H /I
+>>
+endobj
+12 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 475.439 178.88 465.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 13 0 R
+/H /I
+>>
+endobj
+14 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 464.439 262.65 454.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 15 0 R
+/H /I
+>>
+endobj
+16 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 453.439 268.75 443.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 17 0 R
+/H /I
+>>
+endobj
+18 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 442.439 182.66 432.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 19 0 R
+/H /I
+>>
+endobj
+20 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 431.439 168.77 421.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 21 0 R
+/H /I
+>>
+endobj
+22 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 420.439 197.1 410.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 23 0 R
+/H /I
+>>
+endobj
+24 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 409.439 204.44 399.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 25 0 R
+/H /I
+>>
+endobj
+26 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 398.439 236.27 388.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 27 0 R
+/H /I
+>>
+endobj
+28 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 387.439 230.71 377.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 29 0 R
+/H /I
+>>
+endobj
+30 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 376.439 192.93 366.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 31 0 R
+/H /I
+>>
+endobj
+32 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 365.439 210.43 355.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 33 0 R
+/H /I
+>>
+endobj
+34 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 354.439 182.66 344.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 35 0 R
+/H /I
+>>
+endobj
+36 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 343.439 175.56 333.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 37 0 R
+/H /I
+>>
+endobj
+38 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 332.439 172.78 322.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 39 0 R
+/H /I
+>>
+endobj
+40 0 obj
+<< /Length 3413 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gatm>:!_S(&\dF0psoT;C99/4D@]/KFO:fe2F(VW%o!SA9\XA/d1msShu<J43(%[69?s7[I,Qk*LZED<8Uku1oA`XB.c*R_e][4%Ap&P9Y;Ym.=)fu,MbOi'`Tj:+rT!:k\pLp_+k^aarjL<8fug';*+"'*]-u^e;3u\_)`Cg*P',>W<O2%m.BEHHR^4A!qA9WCqBGe(!Y_P2F@nK-s$=,q;o;XNA:%7OSF:/u9df7NX+.9Yr\BVZFruccC&=d$jlRLD_l8hOK>-T9M[5#im8I@lY9"almSLL5UL=-\l+mUaa+ENYU(M$%3M3(o^0TtMkU3Ob$KQB/A4.C9nY^I4HS*Zn:^so4+rPR=NMB4&]2,/P8PqnQ?+)1_DdR$ZX%$1er&QV()E*^e\rCl9Nf60CP=E565dl<"_+?OJ89XBW+P>CmfGbM;ICuqu5@6B9Z-H)T)7r/cOEA6nc6M;;7WiNib,grL%#<'("_D=Pkoff33V?!b0:4DXBOpj?*+30Cm%`rs^Y`2i(hG`A;Y;KWG?%WdjWHp<PUG#&@h_g*p2,^GbF,hPdA[(IlOjl>VJH(7lWa2"XRZc)GFdtcqQkUsT5;9'P"co'C,dC.7-X8e$X,S/Zjpa9:&i-/Oi3Y0,g]XlYal2fKUQPQc`O]`^m$<EXYb>L:FS/Wf3EGcb"$!u8.VeoS,7/+h.:d';V+ulDf/\CgO5j4AV5qJ,>W]t8AdEN"<G.2S"(;7-=t%ZLUu5/3@KAa+4.26IX6nA/roWo*jPm;DP*J/ZH6NO3+gh+i[dZ+7.G.M$V4W^[_.!#Y&+rNqoR3mhd,/)O<gF!,__KtPGMVj@%sfhjBQWOFQtD<[]d`jJ\-oIFE).nAWe?B[$cmOo'+h<Bc:R%iaf"37NM7f:7<U5"YBlNN7YD9Q1#Bh>(a:X+WPDj)^*N(G&?9ZZBbM]fPejAPNAW\<f1^JJ0W5)\NW8fYR<H[T#9HVq&mU4LB':Lk;D%:oi5N"EdKjPZ(5f7;A]VM]4QC+jTXEuS(4D]Y5`(iLi]f7q&iV3UK?'FmS4@QOkF>/Z'?]i@SY"Ckf#Y"k*`4:[o&_CiR0-G/fmV'n"X/C)&.e2^;hYCG@8)u?W]*e3@/I";VUPqE`kl`=4pa^D^I$i$%o#'iOQ9C2G?'M4=3_qjAT=4aNamg0]Rr^At>>7fUu_MK*\Jc,HWO0\6`JNR0"Ig!K/@M#\e*DMG<`Fd<F15klGGM/rK7>]3P#ba%$\\4"P&*Q@f8iM@$1<gYamV7l&EbC_<:(5"eu+?'Js[L^7H;)0g&n*Iu:GL3sZ\NrU'1D)3UlS&Z"$)2+bm*T@A#2PaIB&2'R.8J^&g(6&6F?U:I?dMc7\@mmB=A`5gJ-WMVD.\aIajC+rfd6]/1=/'9<#6u,ps'S)`k-SXoGT][I2&;,i.R@FuPe*l*fg,h\X"<SA=Fnu,!_HLT6,;SN$e:VRL")LgcZF;<DtYD7"!WjFr.^([O*=G5e(2u>8bPH7hhDPdQu-@:WCX=Ai0-HS:->.njHR]H&,o_h"75$<VgOR;emq&B!dYChXOa`H^ZC!J?=]]s)p$q(indld4'+LjFB#.(NC!#%1fng&+E+">SU2S"X_GhuCBW6i%u.=(b/gl,+f"`F<SKRg"bR]dUFm]P$kdUr-r0"0p+US"eU2M&jd?q0W'u'&`FB@9D;BD*i>i%6BX7t<$'C0h54O)lN@UhM:\rT:2AMd9%ZI5<"pQ,/?dUI,L5GnL,H\&#2eQ=nE#Mhpdqkf!:Sa8P?<X^f1]d(/o)_=E'-J._j7,K>Jiut6M\O/VA4[RBY$]@;--rSi[BNub&])U8K^uOK.)nud3Lu@_AQ*C%"p*TMYh_pu2@1B,7"*("8[[!b$O]l/H8HX_@L#`0^`@_d7i9K,NapE1(X`C*GLYE'.e]^)K$@")`kss/];p4!lNb-b1,MCtm1Wk)7n.:QTS@^dqq9f$]8b^-op[ZFI,P\U<q\V`[hd3&APqCO>8`tV(/@7L=iBOYMP8./_g&T0mQY"PZ_\*EBeu@Af6<1tV;<aCW_JbuP7jDd7@MP;;ogdbR3WH6TH-9mZ6,'XJ8\>oKE/>)Css\]K3uYI9E\K+<ABf[,?1U"oFg6mT'-ZIk9OI"m^8-3Q'10>N%W.-#\bn#68VV]AQBq8To9$ZQ1Y)"m\K@-dhrmR1as:N(6UY9=\'Yc6I9#F@@`s^h,oVb#ek[q?g(PI[R$(/i&lQ1&##Y$Ef<*"p[cGu_N\\aE^FkI?p8BLG\%4!O)%o]P'H;H`_4QQ[`OQk*[FLZ2n))?6@uiCYQ3*BI<QJqQI<jd#@fN>&/U;m_jF'D4QNUUTsqm^O?"NL+q3XfWA#O-=/YncM"7FD>HgerRZbq0FGZ(n/Fb\b3:h&3&2p3Hh2e.M`8c0=OT)51?oY6'*R^K=N.dC09c&t?L-Cdgk[Jca3>;MdXtim\(-W@1Es$=#olmu%f=e[o?Q>"Mq(!.pP(_"+SX8f2V(1[9dMs>>K^f%_0m\j)Jnf]1nESaE8nf?]#W<m*.3hXg`SEY;O!E.r3DN=&+En>eD[E*Na7cG:CJPliHd83U>lRUtbP]P?Q)ILGaHu"B9>ldEOMOpe&'':LU)$_]\7[osEOeOnaoC.8mG#kb+X-K&`1lj?37s%\<o"IcL@Y+!iL4V][l6(F"(OLNjA'6LZ%S#k9Tq<eN<Va@2T'=gd1]>ZYT7%27/k+$cF;2=*Di-EN8H%GS[)0Q2J@>G^.?'U6!B>"DCgU!Dh:W:V9cp3-7R-ec7n!Ng^PbB9!)'pJp1Ath!pb"G.J@94,!E'38(Mul#/Lk#_nsuYn7t7a`rWj;,;M\EM()jCMER8j,aWVLd:`5e<P@f,jN_Pc=Rlo4!cP99#3O\LDj*7)MQ/g\4X`K#==/c]OAL/S82.rnXei;pm/?[r6#-BEfgXqnh@.dm3u_e"Bp(L2Ad)SB1(T<PY[YU+pIY3^-:gHIXopuGGAq=6k*pmn*f:?rnXVUo#:C4r,oMK;"G_]hQa0.L+[i>kYLn>lmAo1a=4Y',olb`cL$$h1"/"mT<b8RTO#.r<*uh]5qO$F-&$2G>Tb0AUl_4LnPP]f<rj3J.Jbu!!f)0rkTK$rf!s?:"Y6qD`=`cH/Ml.fQHM23;jk1mj=G'dO5\L<W2=A'YqO#;3cHFYqa=\l`1sYHHN\Vr(O30keQlG:4g7Kb(PeE^HK-8TW-X6r\MW?\JriraL3Pgo^8-5a+9h3fYl-57R1,hJPmU)rQ.VrhQX7Fg2SC/UFFE2-Yk@"EI:_=6F,^!P)ji'$ig=:mnlNmS->-<0IA>TPb)AIWa[3iA1V;ptEoPY`4<2W/Tt32Jh!dRP@#T`=DgRKD:_Y5/2C^W\WYjT6MP)G&]ZjdXVc(;hh$aqAe)?!&Kg6[Xo^_.Zb6MKo~>
+endstream
+endobj
+41 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 40 0 R
+>>
+endobj
+42 0 obj
+<< /Length 3473 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&9on>E&\ZMon83LPO4KIYAEUB7lt/?39Ep[jp5mRNQ=u3*(?pX<4l0Nf8l]C:AJp])JHk#,8/@-&@^5-X0COAfAN`[&a^XsaGdj%\hkdZ5qb(R9es#nR9@A4];U>@8ZqT?&5,?lCkFXq&^HU?&pPqPVG5GINKF=Q_//[pRo[PA7Rl'\#S[>LFk,R-mf@N?'V2oF,eOi.pQNYU>YssWb0hNN];i=ZorI4J/fum_mPM9p&0Y%pjgeVMK`jKY.>151q0!\?0)r\4</OrLAcd,8p5/YN2/Qcplg`BIP7a<)L>ACE-af(nJr[3DVU+i/#lK>MLr7di@Bg;U&S>CLoD0lrd)U,NoJZE@fh5n;8AA&JZ4AU.dF>\FAeQa?sact4CbXYg'>"7"SdBjKo%WVG=B\EA3p-CYnhpL`#s7nDRiI3hp'""r9Q>WEr$jmnL,qOV\Pa]!?T,Vj6-"Ab:A]a7A6Y31<3NM[#$dBSJHG:V&o;EV)D(AAZ#NOFqNV+PL6Ci^:gi[1u&C'j==R8RoSX%G%o/K<M\a<al=_ke!IcCfjcB;=Cd5?p5XIlgZWT*XJ5+*+&^"nijcbnHr4'[QC#N26n&.N"l0?Q<l@#74>\o0VH]a*+.g8j+V5_YU,]ZPsd2YqM?'^PjU0b9#G2quKDn=K;M\J7jFrmQ%.rV7:(nn0;jo[9nu-48IBAPm/AJ@jTQP0Mpi>6,caLo]c8-*ON<Y&=C]#"Y6l!4ObfVKjE'b-lHlgCaUgT5*[k4"#im%7"p,KT1<)XGp:A<Z3IJ_?/q$4Ja-$)tYA0IWqi'ZeOlUIGaB^8E.4I$9UF*B4l>3Y!EuWM8$qsYU?1CR6jScAbq/oGt("i$ttq\o9<,P-H:BMj2UoUUK2l'A`&P:G8_L$iKK[1lZu9?Xe",OVoQB@oeK`!cDA^&:9VQS8.6^=8o;1KPPI79:I)"]=6:$p?a<-H?D$<;i*m=AOi&e<>.(Go.c%\8p,9i<aFVHe;:tZ/qC7@]0)M5_/G+^3NN?0V&Tl"K!S4!DPa2jX?acbbs%p9DB"qHpAjW_g8#7q\6>A1b/-jXk8*q8H6R=?^J&=H;FAct&i^4Y7CK>=XE7_%_7H`t?@6In92$)[km=Ru]e.5NIAu$<Oq<_c"JC'2f;o%5ocu][5SIk;&)A"Qo)c_ecJL:Z%m+;<B`NWUPM?CFk?R6Q+PMHW&2$FfLRJ+!=)M0O,_k6dH`doARQ>PF#PU742`$k`a`N=dIk6H5'X*l[PT'9#j&"Jh>RT0RXkab732X+UnB$I91G/1[31YLd[Z0[cP7pV:sN*IK'r(on@$X]E_*Lt$#Al^u/:cd?s[8Xpb9+H!M$%^u2jID'cjn`e^<p4^%>\1)+^hDlZ/Zs'8+`]BGP+Y$+&FOs;NAttArgOjl0fDH'`^0c%hj[Y,M\161\Qn2=f(%jZ<VUeOiC+%SGb-&SHgcutb2PJD52'Ec&b9oK_Vi#nKK2MR25WH96ti>[5a['iIo:!+@KJ&SA^ckS6KlsS7B%tgl4heSVP%6\@3#@Yl^/7_q#ZHq51]<2h?XjFrk>:r5f^p\l!YZ-1@TnFq/CsS.)3<S?.BR*MFa32qp@6fUs:%#+N'G<egqr_#RsPFddRVI9F/HRH9Aj'lqJ:%_2X_m+m=#_lm3&5I`F,Tqi!TZ"FH'aAM=Al"9B@rT(r]YdfAd-VF=Xl!%I#TLhAVH*1*?a/4C-7O'fEj1<Q7PbZMfO!oC?E3<G#f+S5qb(8cbF"Z)ug`EkZ/rT0>65oH<gb95BY?0g*%*m=U%>SKSQ1KXmV.)&*pSR-rj+Dc>lFO^(h6Y5ncLtqYUqGL]nJNh*.;'bM$]6O%M@J2\hja>Fs<lU)!G"R=$(@sr9\M)77Lo.,FgH1-Q!'7hg+u>$L*_uFne5GWM-L@V9Z/s@J8mNY4EJek3&`6a?RAeO21DZs;&O)6#I;2LjE]6?""gE*l:<63kB/>'YIfZ25:>'hBG6%iYalC#R(+#0=2ag:UQ&Co$gtiqf2AEc)k<@Sa3%!;MeSCSYLh.*'3ZHB?c?[RP7g6]eGqbJ<;uiG35d8]dKCD@9>!YUL,)H)S0:iC.&,fW64MP/tLhW9E($e*J@B'qbUC"VE3u5V:'h7%a\EQiCkW&Yc7jGMU@+U^kONJ`hhi9H;fMfZl*"56@EUY<+P5/W;i:oRQDjXXO+CNN$C\ga"&B[e@L`'!kq9+h?0htl`?,%,sQr%oW981J1U.'?IZ)ouP'Yl2m>8,D+49Z(p&"GVtfEVP1JAg]#7kU[AjB-BidfSKh:,[m>r-tYlYgkM#>]]<;h%r!?re;9PB"]C+0s2p-n&;Re^5e&uCG%="B_KkUj&(6%+Gop/W/J<()n`Hm.#.[4B_5C,kM2nN^E\"`mgr\`P&m/f+).@FmZ,P2gb0JE`80\o=btf7Fs93RMC;/Z"K]4]O*?Q'htX5g;4-\3NUS,2Lg'LDOk9@!O\F<h4(2u-TAf<to@NL<[4(.H0>&r:B>%sJ(M.(Km=%O"Xr`rl:[35?e8GuO.V5k_`5Pi;0gCj=:M1\frB$\`quc3COcq9ELkTI\Hkm.WTeZ4a`#0ReHmU(sDg`n%c^PTUQWbGVdAD$-dt)ocT[05UhgKmq*50KQ55\X)O)LY<9]LtBGWk&iTo\*VdbW&_]F<*f8B3?1T%?F(E%NFe,S,b&[r>f(&s5+gjQN@BI"_iV./nb7B\8<,$o,RRJ'hnr5Rn4:X,X\,0oT"%7/%WIM5a9_e3CSR9e:\'?':LVeq3>>!90e7E$5sYp\>t4<"I]GNuHpgU*\^;0QC5.eIQbdjd_]df.tk?11X^3QG8-n/o$X.g'<e>HNP7e,L8n$r664FI+)Ls\k\Zm_LUt&&74BY+@:!9`./"W\`GEV@N(uflN6"<$rS]'WR/B];4C&GmKFP3:pK5cMp-Po%bqk'r[O;rYPetB@aXdXftU?Wl!q+DIGj433(MfE<VC9)OT!fU@qYW$jTm8]QA%AuJs`mLXD;#slHCrQkJN3rs#UW5%Z5_40q?Oh<T9hR5gp!e[dn5J_ClV87"I1DNDGQ*Brto-dY`NY>Rq4#Rg7nT'Q8Eh8dmm.Y*i_&f(]:T.&q=RMGO%u@;=G-`EIt,_00WW5t*Jl\.t2p0StkYY]7_7UVTT+7iF?LGV;h@/a83_YR>JurqA03jZiPsSu@Moaa=<BB8bQ(GePAlU5P0!n'Z;fK-^gOVR=V9r5E^8_]A%q<KO"&N]R_[_0HT0a'D,/KP5HQ]_h];'XA^fNj08`CT!*a:6)r+Z+p<;=tQp4&KQ*aK1D>+lZPX1/`9\<Rlo@j/)7^(o[a03[l`!oN!"Vi4\!LZ=rZml<.Wd\?p%6\(LnWd*K6A!pBPjmj]tAF).W])Q`/"s?M\Y1T\LQrGT4up31AG1p."o)<poOb=O6Qg2Z0*4Kr=Pi[`:nUF6\5gg<Fs1"dY'XZC\1baj0nd&3oscjkYCep5ckL~>
+endstream
+endobj
+43 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 42 0 R
+>>
+endobj
+44 0 obj
+<< /Length 4016 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&;30Hk%Y!lMaP4-gl:p7`:@@3t>AUQ-F:Z@H!;E_(%uRJ76>\5cacji]5?XdMg8"$,O;E>06r=@]Od(?@I/!6qc"sr3(:2'A]>a./DE1q2hf?/1%E)_\b*L$5Z'kR\?6;DU5-?e*7t+eO/"]#LM/_Z16WR._dckCE8KnFT2R5H'<gb9t<ReFj10?EV%CVh3\hiS"SNI5/&h\<B4'`MNmt+1Vb-Ph/gB6VJcV/f`/6i![p?1f4*trL.pWKKeHPUMGa*b]?+D>o:crCnaV/Dg;)J*G:-`ZV&E)NgC:._`8P0ldH(#Afl\ureM=_1!JS-m<V?R9@H[!>WK^>fS2g/$sB[tqU,oi9f*%kFrnPL595'%FrHJY6`gb%a[:ZlD(N36R?.'.ooaG\1apj4G<O)nlB]0:^pmL\Vsq>%t1Oqp%)FX]=bFD%ZFV2lo;X@E7b+\<Qiuql%5=XY6$1T<ZI<mdM[s4X9`[[V%om:_-G[#s!^rR<R$3&h;S=\1a5FWSPZCS32jXHc-nb)O#fUjfFkE;iNVMViR@m^L[#,\8r/95n1sE8#5a+k'!0SZhG!4E0a`8UAV:oPP*th70McY6#Y\2"E`mm5FZ)Qjuao?TMIgaFbb7;.s4g=!R@t9>6:h<Zddq&j9o0$**sJ]bZ(U,rmjNf?Wi(I^j?EU&'2@q=V7@jS^]cE1B,81h'#3]#lV#H+)C\H%]WS'2J8Ns=#-QTZchRn&0-!rZNZ?N%(I]rD1aWU*EV(KN-hBk/,_'%[H0;]D)j"]M)0VFS8Ur5k1F_U^8eVk@:J,QPmu2A>K7]*':_@q,L12FNGJ7_(Q(k%@'eb/%BT=/^1tKK+^2;[GD4\mL?dut6&hF]QC[!@950@KptpK2L>.r1mqgu7\dC+%KIMn8_M+K5ENtNccM<JbKo>@?a6C$t4*B@VP\eQD7DCsU5.WD_&j"k()*Ds!Mr;NP]@l7b2s\S'X=1X81Jod1<;]"1qtc0J)0?I8Z+FXjUkq=Jdatq!=:,0$7@(WPBd(Vo?#5h`eVZ[i]E!h+[NX<(JMVhbF4,fiDi]t:r_3r/.S_2,@-@Ilf]Ia8]1X_)fFkKrcDd/I.PPERdG)"qS>.=We,)oO:ENIjj]MADP*Qf-3[rSkpq$&jHh+s@iiN[5H4$12`)(VQ!s(M3[%3ImKYkD&a9?p3[N_2@6Au-,^f_V/;.YuFY;Y5=gutQ\=5+;9]XUL2Q$k6q[,)Q4V.*Q@g(jU=2:N\!Am%HPK=rkE5u;7H;A]VU/oVpmHN4jZIMXBR<cR8@;Q]i*/#!3V!%969&0;#UE.S@N_oS5dDHnqT3>agF2LRZgG:XqlW+WH^MT'$:1eoa'OslU.aL3j6,5V"hKA2<;Z?M1Em?m,:I6/=%St@_Mp9d<[0hK'dHLQTp$":el4_8-oit"p*;k)W;UkI4m0s1J-K%_H]%MC]@>tXI(lmdH!>8GVBE_&9-AOb::YbT!pLJ9l:/@G(.r5(Wg(DY6E6MtmMhA>fQBtqF>'N;c]he%fFI_YK*CfcTfk`1M8N/*3Vr.0<LLgWE:qdd7)7#]7-.QGjEE\WuMc@(=*MqJ%?Oi!MtHltRa6i+n1%$I%(VNNnpO>\+0Xk+9DG4Oqd/t"VqK*uS,WO]s[WnH%_q<l36/s4Z0Cja]ubV3>H.qQ[/%]"Qi9C9.7N=O%+PKukm6m_iA_$khmSBmKi[b;0:Z(Q_Y25jonbBDW7L_nZ^!uBR=ZWr]68&CU;f-4S%[#J(T]>2&"Qb`@=+F9XB=V6QT!04)NrW;&_Ohm7kOhB);VLGS"Mcd`O"-Gp@BMP@*q>hF*5@JpkI<L/Y>SUkO83>Trl`6'9NWF4ZLEZ.LBl*T_9cA"SFsB%Jha#a:gX5^W3,<#.d4bnE<,1d]Ho!CPWnH%)l<G)k"B=FI\U5&M<F!>jOmr#BpP2PJ=?h5TLk`/IU4Jq1?atBDP7F`.4O#e)C/d8BLc#>%;@GQ;'>R4jDl_IFiD#9)J9^17qH!R(Q[BN-U^/CC#6KV_;*G&H)M0?RNOW4ID1+!;\A1p5(9Jcnc"'AGD7a8S6iV?!-4n_5]Bp!Pqfs'1<UJ.qn;GU]\Nf"j7bQ9>%!Ye,)#iOe<LBtV[&""G4mNiZ.gXqh<Mu-:QuQj`M1LmaVb_r=@b!InBB2@/`%0(tM4Q_Hd*tO7EFYbr]*Zgs33[D:hN6/X^*'QA2C(ju]\8+YerFI3,=G5Y*YPun&=(!>&/B+b7^u/<QuLI/A@7V;?)gqcM:&MMA,6km$C>(e+.Gn^-C_+bdSGOK6>&pXb<;i\QT\O8%)NV4Z6Qq$/ubW4dTs+9";(Pc_toFqPF=4f#?\%qP0im'+BAJU0Lc)=/8MSGKS?KHcpuAUYUO5$jrC15$'jHY-7piS(5f'k*pRj?5J4$b"l$BO:M4i)6?!["'g(nS2s`60oB\=q510;<Nm;NuAV6'-`:`$i2.(f&g1SYGQ7l\Q],:97]%@T7HWX['f\,?:G3jllQHT5%68,Bf6$hKRb[pWr]^c*5)eUQ[UGOr\RRT0WQNR?jg]rEDMnbC@l$YQ00rY!bcW4?i_!U+iE2ruABA)j,mML!o2=!X@S7H>nXH.'YmC&f<VhK(L=58Zr(`m$[&oH&q>bVDR'0pap*,=02.9LNuh&)LkWTmVN*qpD?$/_hT>nOqp%LdK^9#g6@6_FG'FkJ_!G2SerN%lmR?jPnkW7!un:_I4BM^QZ".1_qY5STe>XAH=`KVKI;grJ*%QTonTq_UO=rX!UD#\dAP7CJ83JOiTn(8a4ZU^P?SGfj)T=W,<@=?l@)'R`*"73@o\X.,'G\ML>t"YQ0Sjb49DQ#Ef/V-9C0Oe#Q7;hRcX9!-Yq*]@+p$JC022M>>71e@B7d^'YQDSG2l+jgk[-Zt*On"5EuDWF:)d7&5'UELU^'RV,;&h:Pb&GkCmiq_>"L/c0W^+`SJMg49oJkPF=]Paqo<HNdW"bH(%EBak.p7)`mN96A_c!Eeg^_4<@"E5D@,!b:DJ<DZ5.+X\;[M.aCAZT"8N4%.:5#Jjf)oH[rToe_/Gi11+V^9\BcP$.V_7p:*G14MR.-c%iBK47ejGS%k3?uCZZGT6)oi0EP8kg5@(!Vg+U=PcMU;*Lh\7D9j'T!sZ)%,?bU>$$'+HTgjPqeBVO.r4/7Qkeo0gsd&5Rc@4B^CSLAOaU7#gBiqjAU]Qft@C$np+0ufURtE=N/6N>MF2EK444D<pT7bKhVsa!+:'P#>sR`#`?%eOH'2r_[mS+K8SAa%-R[3fI3A:M!tufd(V3md1<&hW3g(jHL'ZZ[]Fms?IU!9EMoD<gp&OlhHP9>ongGW[XfW?k\7"*[V2n"j[nG((nkQ:K6Q.gG<hi]N.eE0>]Y."a9Y!=?^8kr0.nDs@0PSFW?O[c<JTo"<KFX;LhE:,?GV6P;(XD$7K&]UI=&T-\TBXYX)08#DVcdW54h5<ai5^Q2i=*JQ!8@F!q-'pmg/o4%ZLdWTOrSknEB+"4u^=$j-McW,O_l^k)C35,tRER&jH_Ud@(H+V@VQ:b'OLEhIcZ#;T+;Q95aMT-@d6nB,fh,W9$TRG<&jK"q1[&4$I>g,mB&Bc,uS0$m!UM#sVO,qg.h'E2Y,a_U'F0@IrCd(Y["K#l"EAA1;gL3DVXIL95uuOs/F,]N)g:I>8,W'n]09303ickT*YT2P_hY6#KpYZ42:MiWSbD6n!3,oI[c=Z`]4jB/:mnLRCQG%s/X7UqT75n<F7jgG4^j@3g*2An0Pu/t0OPC)Q:"We`?T."3IE6APFqWR/`ohL?V?:i0A0(`eRV]\L^l`'HaY:*_C&0T(6t=b?44?Y@AaV3iq>o4*GmR7.0Zd/:"7f`OD#VPbC)Y=E)spIYbBm;c*1Ii5to`q##]fV,qjCX$pPJsrs-_E[i'I8k`nB/sc=)&"1H`jucuYXa1>+EE%cWgJF9(E-;"JV(juJDn\MOb-X"D,r2GZXUV;^A;:^SJ@g8GBK[APC==Mqn&__cPQ"9Rr._4+$PY#!P^h4q]CU(>>b~>
+endstream
+endobj
+45 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 44 0 R
+>>
+endobj
+46 0 obj
+<< /Length 3900 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gasas9s<<_&`)VOGaAR&e!%Q6[qj\%:%;C$_$/\i[gPg&D;n0^8kR^GrUm!H6dAMoPJA-_Tf=#%al24*T:Xp<qK]PPj]T<B3kriSQZoK!mXD4UC-@0(I%@ALMl11WmgoHprQj0;`@pQU+krVaVrQdSB;:q1l)uiG>b`"sZtfIPdcXSY<V!Rl+Z3t3W@A'T4o(faI4n<Ph6*cj:>8b^M^s3kCFq#=rEs<0-ssn]g(Z^98Z1R^_o^i<rj_aLip#nFhsX@AkkePm(Z/nim@?jBj^=Ajib5uHj56h.[_4^P*1"W;D2^%CqMhIl=hUl#lBShLH:o4XSb*G=[mSb]RqU4f#u=W6+1)_n.D,SGGn1qekTNgQhR);dgUdA`>;?f8"sGr4,jYpB;;nll1JMFca<218<5(D[/U:D";[<l$l_ud?<(tP`0fU]>'U>DZebb;?r\>;oZQFKi9Mm0sdbFgWn#H=8\uHd>!k-*dXMXo@ARXuQFM6,)-1E7@,cPd1j1"hZ2TOIlC)k%sRlju1GJnBaKcL1_UG%6F0h`$I2q;8,_Dqb8MM='RrCrat<\=4*q91U"HDi@N&[SBsZaP!G]/A+i=f$OWNp<%^bOXAuH)q<%FaLTN\)(]^6ZcHab+<mlN$`nV1\+O4keq1F;clWC.BDdJ_rmmFhoG`IknLtZhI\i.cpH9_Ij4r:!gLm//77P?A8;Ffoh_n,`i\X%(5B%1Y.PIW4g_$`JH4RK%iU-h^^HEFRIXuLH&D?>p1N"$N9j;$$7]pPK[%OX9+otq=)Ij/D#V#>W)ZEp[9!uIj;SD<f7c&P;1*][L]\g%='-:ljBPg-B0:og=gY91eecjlHh@Kq>PYCNr]%N!G0XI2++G2a\(Rj2(sm`/;&WN6dY%4*8bbWZKAQe[gKo%s5p8nR`@/>9f$-1d986:u7P0jQe-_XE5DTlX&5/9!*L4<.YT,(-9RhM2c8fFEM%5ANjl!RmT',FeQ#_J,f1B#FlBQmU5q&>;"LuP@a@)5D<]?4cg3r/)5#Kq0EaWg=\/fK7]&6t)\m+(s9^Tcg7b%)6P*;KrG[^$M9)E91Fuf$^/^[Dadq6)`;$#5j#dnC]":#(Bj0@rc)G?eF@+nJ;>r&+R@*8eCMAWR0eAH/2b2:uPDb,?V0XMRrh)gnd`kcC+la0Ohj(#>]iJtJ3Nt35;+g1%H_h9`u_XkLgR^!L[]9W/_jCaih&0(`#L(PVs8u+B.#9>Xq]W.7uS?<P]YtVR.s14Md^qdL(h\6,p,WVQ#a^SWr)`L2q,b$TX[6bcqkK5ZE8o9k.,DU0L(/.JU1tJtrT9(N<JoS_X[:cfMCemILT)94*mD;k3,b(Cg1(<t$6X"c)!4ZnQ@E5?'A3$V_`Q^H/Mmn/0brE'-)3jb"'V%ImU)kWmSDOabaEELGYk4>Tm0'Z/Kt&3O^@I52cG)kYW>-HBNYaDOlD:^`5jD@elCK`I@bm;L7<1H</GYD!<bTU#X%.j%MVXYl5@t$\-/-`)-jF=ZLc"&H#e\f?:l/N&hmKlt,-$?I!:CWf2Vr[E#lFt(Pk4tl>RV6]E$u6&(!%CrT"/>+1HNs"/n"4<]-Ie\gDM4eDjlWEn!S1Cb^!nuo\)`M/,q]@(=;M">;OI9'UV-S&06HBJSq&`VBf:*"o1kL>qK4Pc$`<LGhfij6qj^=a'LJ9s.FAES:G;6K2o6&1@8=T@;Thd'ClSXb_5cO."uVC/5nB!+9=G9VEC+g=!K,42*uhoO^.,m.OjHQ<>\5MVT,Xm:+_<rJ2m_L_2j<lp-o0)H$D"b>9`[!,9T(o@8C=E"qfE'$L)8e;8nkXQ&u-mC.$-ls4nN20Us9FXoRmE=V5V*O:ifu0L2kP\Oc=eIZ3pBDq$l3Q46^fKMQ+W6h"k3cuN8P35&B]<PtARQ#2Z36V8L*!t?U\P@7>>Kr9aAV.cna9,\Di^.ghk-E9F\-DUcf69m95-I!MoZr5Ut3gABS'.`Q]2eGHiWaK6u=KX[3g+`ebr3MlIcS<b7;iW_Bn:Y\ln3XZn3/qXo^n/\M@=LU:bJ(NmMQj+KnrP.c.m670-F;RGK\U]ommhmOBfY4fc&#<N\=O^&3lSMD!.O0?3*\R^$BD)G4nL2%g0U4&8#<)R&K)(L:ZSMnc^s!14N_DIdtp16f2AEom?foW;Y6$34J\:12e>AJpU'$m'[oq4H/B[!V>!Hm.Ysqs+km,L$=u.Z9V>icIA.Q,qRlrh@j$lT#`M>@VSAbjX&A,eM_Zlb`KXs#K=Ac9)[p46DLqRm")b0l:L#m'XPs7(f^+HBQX[2crfgZbDHC+"1#AkH/W'_mEMKd<=VA6.Cn75.jS>J)S^1!'A&M$C6^?:!Sn[%onnY\_qAde:?U&HEDZ#:7DL?()/ielIVTn7jj]h5q(/cuElu_%FLW,%Pmf)$!cFT+mMTStTCjitJjda)D\lM\_DWQno^ZerH,Opic*stq,!Vc^^ngsnB$,HrXTsqp]Rb,T^$h8SJjHD]A4Q:D(G.j`&*A/.P5_k+"Wdsmj3Qroq(nRKrq,,"f]Ef0'n#Ybrnm+@A+p(8'5;BXr[43P+#i$#*-E>(PPN<;`n2(<Oj>5P'IFsWg"#7q]"`M>;Zm5u.`m?JBYp@_'Okc[9n5cY3?\/+]N"A2,U!st1#bs(-Gc6cGDCbp494bTK8`5JZI<!`SqS?7qBF$+io0I!%?^$i/aTA"0i860)1Gjo,5X@Co5D(A_(p?10GWcL;!A>4iWXni"OS#W_alV/b4?GB<?/YQcYkm/4KTR-<?R<J=;3O"!m8i;Fpb(rWqMql/#KWH6\JR[HckZJAMTBO/5\GW^hX\D<-LA.!+;_H@&SIJFiOL0M8nH,(G%Z_Ycg5bdh0-*jeLM^h\eBbZ61/6rc,ig+\[a`gr"Sc72GjV6L]X1'p`[@AJ40,p"<:-T(Icml1L@"\gV.e;Q4N7d:WfoUE*Y.c2')$XODioL,9-T=HQ*OBY3.pP5*73tVa-i6Nho+g/]g.G1o,mO&>.l*_P8YLgSlAj#Y-/Sr1n;.V4De_Vna-&;ZtDlVHgRNiV>uD$4M(`I%:"@Uc.ql$dGL3rM)&kH$*X:;jM<,#id;YOj:k3KJ5&P&n*8`loJPs?H&sUFdgHBrdr'F["G[f,YI%A1lZ8[7/:]t$UTL,'!Q$K\2Ss0Aun*&o?_jt*[PdWNo`\'n,u(Lq\X(I>`NM(dfsQUZX!l(4<$R)6OenCS$JE<#+u5qL/bRBO@::`M-^O%q7LPYs7+Hh0:GY*Sn-'_J7.L)r/+XJp>cj#3%$R]=@]CRs08i_n#0O_Q]3YW3+#q7A@-Q>XA!03qBjjq2CHD";+X0%`j,=Y[5Jd"EsgN^>Se)1nBCn?&_:S7^QlZp'Rjrt>K0V)rXU`-?/Vu&p9,Bg9PiiKXH@[\1-=SVGC+6'be^-r1-^V@[h#U1+Cf]OR,F?mj/WpTOYG;0$N:j8lWW38!>HZ?gd2:eB;[=$SQ2E&O<CJ=*iA_I5CtY2.O%-_1BP8toGG^oAtc$]cg1#^s)PTk)0*Tmp<E@K8<!M\]C?V'5Cqdc#UG0*'7oTJB(>md%)j@$9[W>`c$Hds%(TjP>i!XOSG%WId!&NW+m(PpVf<oIK,KjVn6j,JF3IO>C4\VVdjN-fDg8bHD1LVMF_R8fbGJ*rXW)Md!GlH00(O%g!Bl$3!m8#o2uM2-'m5sfKh6M#8f&km;eS"gi`\(%nA'WtKE^\k\sir6^=Eh#SO^[T"/Qh`HMB9$Pj+6Allce5k7<Q\m*f.O8?N.%%k:^B#sB>DA>P#-eO7irem6>g<m&.0G=`<-i<p?CAXOb?,O9bPJ*!G!*5&"93[c6;:40S.m^#9p>9()lB`]@)3jiq.qfbmKoL\ZS<W,:lic5hh2u1H^$o`;V<4]sXrqc9Ts0OfWKE~>
+endstream
+endobj
+47 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 46 0 R
+>>
+endobj
+48 0 obj
+<< /Length 3575 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gasas9=Nq0%c5g@aDS$'8Z1pf1V;C)dk`kZ+18VjWEBe%gi1CoDM)m(p9,Q'!XZoHmZS(m+@$4sPFV*6hgPEB[j[/7Z$=!q.i'f,b!k9<je?",k+Pn\b/<5L?2XH1mQN29gT9#-h;Z,]8g6<%b9`:FHR%N"LAE5$2HgX@2YpnShWcT%(MS\F;bb/rZAam11e%nR@1*S9[o%.%@OdmbjmnBE&Z'6qIJ71?^W#eo-L09Vb+p)Zm+]ZW>46jbqrb"87,]SVEbfecM5JZZ,!Z,"_icEjTlrQe)scoIWC0u\3t@D[ZC9K=%s'Pslu<&MG(2:,oX[m7UP7YgPdRSoo=?ss8QX6k<jc"Zl<YO8OfM:MIIf@H17N!kPIEb<fAGcc\iHV#QMscTSG"DEJ`=Hr#)HS&]-L:Y@C!HW<U%edaqE_MoG=Er)^[?\/C$nc40s/Zm7e*uM.*3f:8(@emUeBo%37N"X'9"4j\56)fOrbVqn&3!%2rFmPK%?GnM^,seNTB;&@>ML3/q1mb;d"'@?\dH1qN/75lp!^I9U;=%?jD%!u]KqJ@^V>2Aci,g1SF/FiPn=WU8Of&RV&IH^u-p:*o"QHtSM<g[+X7D,D#5k^A7&`i,461rn90#r^$GG_O(m[f,=*.dqVrNb4]cd#N-dIl#P2PC4%,gDFqF]U-B7buijDj6*<^-KhBN=AlCO9,a><qso<K0aJQb[4>BEncL[dIGQXENk;[TR_SMCCLu6j;r/Nq`T_J*VKEO-C+q7ui]:Knhig$f?[X>+\9N4__dko->u`hGQTSg<>X`/#gCj35Lq4#+brKE^VhEHjl=nW"luh_+gSA/"Ro0Z[4&L+JhA/^2c0:tE=`f;;aFY2ue3m%`j.R,:^/a0_hf;o#UuTB,3Jm3sp"->2Pd/MLl,hF(4V;7iSK,KOLsGnQaa#%-XE*a,bIj/8S"sU3*X?>@2%T\I"8klS%=:KqN;f2/+DYN*J4Ne9+-a"8H1\,F"<9ARX8;-QUCnU*4f$Demk\)hD!>EX*Idn$<t8?XOs@582UZWFkpbBq-'Rg3Kb\6/j9&\h#pjK"/cZ)m*8!64*s[[;"$$r!a3g1Y!XCuKC='s3c!`ML#5=jOF^Z?N!>iZ96%jiuja6[!^/jAH+$6^O`HX+MW?oAG;2a:o1FRYGi4%0K%LRJWI_Q"7JAFQN17)%?/rM!DR1#fAZ%KT>,0O@Lph](GH[?n!pb2:i+_`ENM@kYZ*!UW+`2KLCs$U@;E^(3g0?uo->@rKm2I%,7)]Q,_Jl8?F+Cq_R-pgjZb_-A4j$l)Vb80Bu?$78M0'AQ<></Ws?>Fn>O8_+I0>#]u35IS2"7P9EjKQcgI`:&O/8kI*(#<D_VpD!ZAB:fJ9uQ,2#42tmLG_910pA<+REC,7AbKZXkqT&6mr0Z<E_!Y2[o&TI4Emh;b:DDc,6u6U.[/!V6aJ\^)'C,(%"pSBlp3C<Ce-YFc.?Lt>k4c]IR1!2[UQ<5^o^,H#jD'C!bH!`^63u*!M@U#@S:*lik0c8),_[`\ohI9j'c3kB6)'?!gmR_o(V!(K@X>6=lheBS[l9`n9J9IRYY@\b.,ntBDJEWNIP`9VUGG8Cj-Em<4O0BS>Y&JY"!m^%b!KT7JYS*?<BVY/mYl5,JBJ/#f`NB.H<D8h1#9AZZ)>_qWKG1EJoU[hk<\?O8E2kZtiM,@<L:h>Gp<)QX+uZ)^0Xpq?6md@XLeq(RLp9=g""6EP"8J8m(WtK]Ls('>6$*/*A;c+q]PW8eTM^nl=q:c#):f)Bpi'#nlSmnAKmMO@#V7B?0*jgK02@9/c=?TN6X7N>^,D7)>13X'A40`>ge3R-8P@VaGKaDXVmJItXpWo016ILrQfl(cRCm>[C/pa@@4h8;tsn-Q3%?@lgDI@Gi<,>JpkTINZ933L)TB3E8LC)VL&-:5H0\T3(Z69o;tZ"U2H9)E^*dci=FD')m&p?G)e)b2XNB(stKdJ_/$1ZUSc"3%eG)Q-89"Q(1PC2fUO+&0o"-`f^nQ)/$Ul+JB?N&RT\Kn[.pf16'L,G#pH<6;S.?EPDlp-fdZg@[os3-p^S/63:TPgeG4NN1i.qY&!$)V(/i>;TH@&R0W#C;2VcpK%Lu<0S]9)$B[m0>-`-]'QGD\6#gViKuO'P"Zj;g'SleK<'io`M(ihDpqd,#A9A^HTh?dg:cP8I]D+ifQ4QI[[9O:]9CY!4[AE+nFMV]>M:[@p*S[;B\SO/@gk1c+6i[U9qH0@\+0FH]Bg81.O,P=^[F;TSeh7OHb"Zk$8PaP-^nmOjVQjG(;MB-,MBKYZ;`&:pCC=l`KGs=f9*ns9>Y&=GJiZT61KFaMgEdNcZ0n"*"QDk.L8^?LgheKhq^LRKR*gleQq9*s;*ep6`M+qJ8SI+I"JV/F&1Z-A/)!']VK2[G\h8h\%_*2Zf,8R>TYX[&c2@k7U8,0ph;(^,,IeQQH.eeL.gN*6PG&lrm8sb,hnWf>0#;8I.I4JkAa,?=]]b1XU]UGF7q7@E--F>M@U:*0hF![_q80D,s!"dm?pF%d]J$#SZc'@o<XmWBL^?+NoH:0U4?F;=6Jr7H9?+S;(<ah`*S>nRL:_`DQd:ZTV#<lTQT6-%K(e_oI6,"p4g'-q^$^i2gm*!^J=K6$WRIrX%,&3Z.*F=>jDNNWlfBGb.H]LR2c+r1.?#F1lJ)FBr=cEnZP0^Z?J/ul1!lfW=-SrgH!g^5L:0bF<^]+2LSSkTlr?!J&!U$KnE#VS6E-V?Y"l\<B1MH-5eE-3NZI`?^ckD<K.8-%coD/i%ZW<\MZSH:MW_Q+)%JS6=".61V@bfXmZN)RKhhp8:0fPaKF/+GDp@9WBU`&<B*f\fYTAO0N7idG`qBgI'^,f>ekh.pX*!i&(g-8=PG!(fA;^+(#`:<,[8M,/d2!j'MHt%abE4m#2:`i3^LEHU#(@sV@.)=pD(""Lm0g*g[UWfIhddJjA=9c2`?1Dge)(Al?Z3JbYU!7#]n8<708,9M@"ih8N8YQTi[%Z_oUVIuZp;8/2FQ`81POei[lMp(678:jY2lXOaa/W^fc_MM!F5AD,<fNHRDtb+[g]+0U@Ma+6C3\)&Jtgo"Nng\`=6J\o[mc^nBR0SIh)S2QXKbb-J1gr2W1V#CnXrQ$:HJJ:^pV=DKKLO>pU[9'=9a&Sdg<eJFgo=J2=ud5R;,=MFQ,a)HjW:KeF^e$+1EDkRF%0@oC;RSELdl_4<_A2ol4CEPd-$Q>N:dMB8:'BpCF#]a&FuZ?Sb.#ueBHGjMGKcEmeS@;W\5T;W&ln7+\]=3Q8Hs&He<5n8;T77Ic/'u0VHH"Kr:RTpJnW\$J&=@cm9?iTh%kQI"J'3uXj09?*D<L+'jaasK(UkkF^BJOu9_s([Q7RBWHK)TP:$tgHN0-JaD+HU2b,)@fH=%L1"RA=snZ2^HW4)U:f7)Ac;60\D"=ala`AYo^%;d?Th7C_\ODg5!nZ8aT5T*Zf(rr4q0AG0H"(O*4c?L`!q^FH]F;lI$`X$#5V03C&YMJtSiR:ZQeXa;;3DK4;MkPn8BU("<@f6f59Mb^p0?_#6o'H<B>mP_*Z$Jj`mIm1XE5JAliT`~>
+endstream
+endobj
+49 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 48 0 R
+>>
+endobj
+50 0 obj
+<< /Length 4422 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&99\,?&\e-Dka$4R9T+4tp>erR[8(RJ=7,(UZNl;>7]>95!PjgF^HNdP)r7/8]2H5uqZJa+#1*m25<lmZprd980=&R`nap]RH[_=-(?(V+nmtVb(<\8dboZ3(*tNYjIeFplDL2<@ZO^HOfOjsGiDQXA43"94jg$`"[#M)Q4*J'^U/!5]ri?tsis#!*Q_/DqXci^aVI]1jogO\a7;$bkLD0/dc"eNg7DHBliNMOs1s_nopTm(sO8iT^Xa?%D%scY=93_[E4\0+&C+TOiMDM$)/Oc^UgJoQ;U\Lm*XB$sPoiUH=e#qeQgY<a`pZ+A@hAN;mWcXn/r9$M:M\fqh9IjnnLs#:ckUJH3"C%p6ok`E"fT"Y1+Z<QIh=u%U]2a=$2M]1YhbO>UN%%SY+@%6)JCt82Ah8aHf(5+^IU'pS40D$nRAT1L%uq(I\Fl7<=uVai)NA^kTYB'e2R&"pS*SPjj@hOs5+L/P=Aql$hFHDB%CFf"gY'JI]kU$S0u`]\n5_XBh!"=nrEN=c]Ki#$onU)@bd7'Hp[W0>_:AlpNU$GP[H>5-Y]8P%/CKBjg6"3h0<CJ=hPTI9l8s??[GsEQ$a7`8Rl]HHH7QP*s(5"RD05rhFh,c'?,(Tnr2Qk,VH*O%3eK3Q%"N+hri3&-kbM_MQHR.R,1@K!7WJ)%dd#]s3bhfu.B@D9kAi&i0c3[NPl;F;efV'pTU1+o=8JEVDp70P!W18M0!4eS!K^7>ml0EP/cP<GrqbAV-eE%<>K=!2%W5sfaYptGFOeLK5lq,60E?63?Nmh55Fo'j%`cd"&R]_]T`%B6-h;,'8&!M>'gZ*(H4MO[$1YEd'"o,D*&\_8BVCtqcgnj=AqP_6[O\mR@Aj@@dEO.F/Ja8@:q#;VEG"L;gc8q@>uQ"'gk&!K002(J>s7=lfN<UPdWNO-$T4q=*%caA=T4_bbie,D18h!6:J"g/gGqslGf0%f9PZUPgMGEpE,a-RXR;':aTS3RTa\@<b2G&#)$L,d!::j.Znrm5D92c1h6A`S7hOfM_4m)J@W5($:8?c<l#(fhcLcm=qEMs<PPHQgl@P9;_Q@S@CtUZNBk7\LEg^d\!Ku7h;nD5YUUF;tIQ=5a.&0i:/LA;6JfRb*J<0IP`Glhc7ZAne.:OGQ`O)Zi;7)=lD)Ds]N+Q-0kH,'&!%eKVe".mIR*t5VA%AfIVOcLS_3*L@I5k!QH'>@Uao[`ikPosf#?*2\fMRU0Y(Br,e^[2eR'/9kkE38^L4b!B?*n.W1um/^@906`*Wq?l_N%2JFAONdXA.10?MW047NUThaHXMZ'RirdlN>1=WX,lH+DefR)&&:o#Qm?R!=8*\*CDeIB?H'cjST#n6ZRt?-q(X#%o>[\c\fK@`h+-rg'G6+F6`^0+bpY]g"/IrWBZp3P_SlOLuH8_C[d>&fef):XJ=ss11Z@^'>L/LhDAWj]J4s0KehbV#YBStNoD,mYKqo-9XH,b?dfTD=pkJE\F%9#_6Qd>`:].c:IR>_ihAnmYd@phE3'mM[A#dRCDiDb(''!m'`JCdYS&h8<;L>#Z/0FNphP6,+nC=DCbL0")&9tc9!qG^K9QJUpROcBAO:Jp&et'$BF)c\5Y`EWNS8mtB3_ohe>NdY5bB4s/EItodOE5s@H1r3e8`uM\@J#f4P"9XGsOg@-!W1SRSa?'gXlE;JuLeI'ueOl5qdbk(bGj,>T(=Sa9[53cNNou/eS^`DHQ&\WG4)Q2o]:L^+b-uqkfLVPoN]bNabNd1MN.&`\`?D;jpp1>[6S=A3S#`YkM5P#1pD,5%QFEN#9Db`kt]N'^!,7`J0#mN9!n%'FF?3OZ;%&*fl$1F/2."<fSYh_ut.;=&.!(DhYq'BL'VH*EB(='/C+\Q@EPB/skT\1"`ibb6:k0>Rj64G9_'nK,g=A!0(e$';\k6KFA2m2aD<_:eh4HIO8Q:80g_O(S1(:j`3mlk!_qJ7ZR:bIWH[sWe:L`K7Pa0l"WhA\\,Ih_dT\kg0O>Wob/o-ip4-d&5<SKB/$FfU8pcM76:iW#Cu(O;W.Zp5eME"?!cB`+=4Te'8ZTG@Utkm!\g@*Y+36"Id2sR=`A?W"ie()B6uaZX`F\`+1&,P@Pl=\?=%\)1hb+3O`4EZPQZ2M?`Ig,U7fYnXOYL-Lig`[#uo^9_1Y)8BGu"T/P$M-jQgJI-!k'VP^ZW_F?Hk_F/%%?Aa<qm]#8/(7\*,I'[pZP"T&MfU2Bp*]=*c7;i3-&TMu999KPBQN12^\R*=s<GK.k>2!oEJYP&4]rVCdr,<?I"5W=p$BR3g)S7RAmUmm+V@W7!=>X"o'"[ik,Mr^jI!FZ%81Ek.QfA\)SfOc$$&L*cXc4Cs?PZq@,d/ukR6^_G"nc)aB^+Uujh2;8"%tNIEb;#iZ/Z0BFd^La@W'd5j=2bQ+'UX,Z3Zr.WYsllX:gA?;1`F\#1N]Cu8eQlXDPb^/J)0@@[XFB;S]%.u40:c=;``ZP`c4-\0SA+SIr@]L7>TnG!!j+/gg>G4a:"+:IeelS"u7q*5R`u3:r\\#!)c,dPRD+t4eCVS!XaLgWC*B_k7bA`YEGSc>PfTLI`Km=3N";?4al,@Ag^[;Pje^jdtJZ>=$p__#uuIGqe-P9$"3!'U1dI5g#@mZSftXCOh,h#nf;:g5/6i`3`\o!.K1>&$0gMW:3I/b0h1"eVA\2>1,SsaQA)S4=1dGVbqU6fdCi(@pL6lb\^GTuE\!Mo#%3=b#9_'Q[-E(_:Nnna&-&1@P&hguk$cf\,O"t`e.XB&"H:aY8&@*ZGr9/W,L1Q$IS)VH;iekno-mP,Fn+J24cC:fk45'?"p:jjMuS5PGRc`n->ZFAQnZK;ggW+skeigopWfl8*!.5/H5EeYj%//n3A)CrfWL>7"$nhY!$RNt+#%%fMK9mcp_"G5dt@%V#)>Ac1Lt;:UT]qt[/Wi@R0jG'd1tHZ,DD7=%=(hV9C\^nE,54'=_X.sI?5?2XYl#V(h1_@l!I%UdF'</Gqg=n5D9Zmci@&4o<TF+&[npihAAD0]'+^m#)b]uFan^%D:4_3:(TV,<]jo%)\.D,huKp!HN=-@["5=^SG]#RjoDN\>mE@W]")L'$$";1)Z$658:F7M;37itEAU,7Q(gRD/+&P7DeCo'%e55W/`9A\7tB3q!6F=YoCS\Cekkl'4jYACTN>DA-lX6n-RE.Q%Wh7:njJI0#%;(q77i.Z"805b2_i8mW;^'KW70Rbq":ukBDR?r.DDWL:'?%B6L_=;!ufucKZ_#cgXjeQCj,fF!KW%aaM7E<qT=>+RBb>bbG,YY/IRI[Wl4j(iC@o0[2'"lL]"KPe+<*reL]ThbC0E()/3Zkm$/2_ZeoXc/*h!Tf-S&qYfT6<ri%G_Q&,8Qb^_EqgO\2M=)Fr)o0Y3LWuOnd"Sk]N#I$nRd7dtdZ%U5N/cbE/)\VNg-7);nnc7Um',-TR%=l<(K`$t_!:fXi"=:d?gl=#O:tg>npedJjh76\3pKLZ5RW\/3!+ih@73lZ&e;X)04C6D$!+(o/[fZ*lZG@=bG6tn),bcaC_^?4I&!RSb@Sl&[EX:$+CDu^bqLc2s?Egc]e)LrMQj$.lp8B]lT@2Y8_]!XqZNXHoOB-8I]+LibV&3J0"[3N47PVa86lQJ`'5a<IJBBhp+.oMJoWa7B1Zlo0/3RSF\"g_d3[*Tkf<f'J&:@UaAF@+^kM%l[Dt$SU$ese6GT`ed'gR"3&YVKW`#j_F9`>+^!ruNGW2!lH.dTm3Cl!G?kSl0D;7mXoX1!FB2i>I?bn.ZFd,-J7^u3g1;nCV_=;R>1p,?_`-Bs]BGu$mOMb\U'na::*-BOtRd>Y'FCHH1Y[*=A_G/N(;*8O&mQZu*Eb+`4\886Ij<`\lh?dm/"03K1Jj"a$*_k@BnT>_?$"pVi,g!Qj9V1K_<bmu(;IK]G>g]5!`p],:as&6lE_\2D$&A^I@(bqD?Gm!U_LT%^R\Tf+V7p1tW>YsjZ;ma]D'nY9=(ZE#PlV.VpH#TT)T=Go4h!o@b*p`V[*#[*;P]cS4*5-1(;ne>,;IV#HY`qZm\l%%cU-#sS6$T8fZq3:pqgFo^$VGdq";ijM$3B7t'"dq@`3b[<#?DF+Z[Jpf:.+FDc3\($7JZKU%)"S.TKNU4FDLUMaCi-(S7tq?2!r2t=)UdQ&+jan[*P9S:S0-=H4"t6ml82_T!^p=83*X)Gu[jhLS'LfR65n*b"%LY3Nfr*A2/W>"BVNtn!W^??SoGu<L!;1b[lK+TQkrNC;:fR$fET$ktSJtZ)IG<H[/`0$uCj,,%1=&A,ti8LN9c=F7I$,1'Ie2q"D^MPo!mY:Q'@T9kk3nL7+Ec@sq?aicDnm55!#7IVRjG1(^smM)psg&8EgUZ^o0r*bArJ`3K*b5J8!Hkaus(P/po\qTJ*\3=dN[LDBgfq!NX1='@]~>
+endstream
+endobj
+51 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 50 0 R
+>>
+endobj
+52 0 obj
+<< /Length 3411 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gb!#^99\DG%DL/ln?KUJUU;.ljjl5QbtMkre!9SXP63Da2JXR9dDOhdY7F'P![u5THVAQ^(\*Uj!K[9^b+B.cIen?'Lf_u=EB&AK/^>Ht`QJ2!M`;Tim4*tPmG^qorq3(/?M9)#?@5GtRqT-Q'%CaA\S"(Zn8UstoU3e7Pm(#4dh#B5;WZ'n/O=O:e>AMuP+]8002.7H<hd#<'pDFZif12SRYfBsR'>JLSuP_`r:\>k-q+k!o^R4sn&=8S'5MIY0s_Xo4Q'"hC1Q1pg("]8[9nFZ]HjLW<F*<NI?_:H-6p<rW9m"YSo&7`,s#UZ1b[a029P[QbdQ9<,qnDXM6%YA*B'SoMrC%#/>,Gh>th$mMg?Sg\4GFYXbkW7]:%eC!K'IAFPEE_G44fL[]!S$:fZjAW`!t5SB`g#`N]&mp"h<5hB;._BFqA/gSH`47r-TZgI`:IWet,IK]+p]b-9:L`U,?WD1AqX1L;Z@Hi75So1+I]&Qc4%aGVY@=piI<(NEt`(6q%hD:aTh1bHbY.V1VYJo!B7?r4ngcSk4P]i"Y.*eT[d"ie-M?'(-9N$4ns-Ul0_ATO!.=;#^V=/jk`:_F&i=%CrJkpP$u$5po"0Zj3hYm5i^fU,le$+\>a,i+Ir)6N;7-]+VkI_XblSdlRh6:+)nRLlGkOitip'Tc&$gH4\!H87B(gWl6PKPhUZaL`2E6DP5-5asA/0&h3',m87`4-=_cJSd6I`.R!8Cdt`@+j.d+1to`EIgq?eO)-&faL4"<<QB..;2_;uMaHbV,R^$X8RPY<Wk'pMC+&@cC@Dpt*%)sReo8b1dpo=$:b&N^O*#^Hg4mD6'kDd@.Mq2GG%^bshU(e]JsoVaB3cLR&o_t+d]-&s#_l6GUH;;*+JgGa/lGDup`!cI]\bKCDuK(>3DEQS_g9AZ0RIA+-*fk/#)A,RNk]"i$.bK4`TYFDE6%r,Z0$4;Q&RlPG-Z:?]nqLu%-JUe@q.N,W!>^ee))`B)-n`:BVho0]Lp3>/@Di^j:T"&C$PQ^j&&)8jVJdfAH_AA3iR2X#]rp.>Esane97.GZ,(aYhFGu0L7:$BXI%&hE:T<JF:_?4ZE@p9Oo[T((-mC?6<jH:8R4T!Kq&RZ0e@[O@)HeQAf=.VSpOnu$KK5sd?V=3OKrRk=3=nW2:rCD(,/b5LW&kGZKiF'BsM*VPgiMKK*2aQ\DenCh@,VZ$5#QWS0cl*W7iZD[!]&fS[/(T;g:R]MeFA?\WqFb]*DdOC5,t90nZhBqrP'*U1'/TO?$ffiuh3EVS(MmD?Jp(N;&4IDl,0jb'df!crksZ>Vi+^2rN_4STV;U3E_Y3n,DS7F:iGdA2f`Q?B8qYW0*q(DR0O1Z7'e+[G==LF_jjr(NTuJK*fVKb0%5pR@rHqE8mlsb>!q.s)6@'<]siVOR.0glLP$%Ni^-j#tJZcqsPrGY(6T!`&>qSQA!#%AH\VFgi*6jDW/VQC,_moE'YhF5AVI:^"a^nd_=D5>g:kkld_S2$IZRmjq:97#CpcJ_KGtS#?#'.j@?MaP9*(J+IH:4iFBSDMua9"e7`_D*(hlEken%qQbhTLE^>EuDt"VsMX"e*P$+Q>(@a/]5h/%?r:e^-TJ3%o++ZZ9*OUsJ0WJ':N/7D3fd)NS9*S8B)[rYb*T"!9]&`*d.r^scE'Xl^$Caer.Gp_WIE1#*0Ze3*-@lh5I!^#\g1i/0HOfF0QEm9;0&m,+]BRf['6sSl9/[[Z"[?oV.^PfCbij#iXPhDl7*n/?c3DqN]!Fu8/=6X!XlKTK\1m51c<pV)G!d#X5%e5"W=Ed7KX78FaM/BGVE@EDp.1H"oMSW2)\HIA,SZ0_A'FsmONt.[PU.7jUt9%:rXIo3#^k1RIV"AfcOW@IjVoNCA.6G0.CUb</<NiG'Zsg6ln.R='-rXTkY@`JNCK9YWoHa#6MCTI=/!T9\o<Xbj`KH3gQSOl)?R:o4laR!iaN3@HtRA";7]RaG@57;17DdQc0R^O7C7I[Mu@5QX#)dDASKd/#[YJ(dNDPPWh.[.ZCF512Y4:..53]o5I*Z=rEVThN,Mnhp/euu;s/&Yp"clGGIDPHTFL,3mBY8cnatKCn#u,N3F_5b(%!sXTeB-g-s`rUM05l5)uQ]A;/rrtC`-]QJ)hFYE&7d79c@K):s1nA0']kurMpN7r<LNqVH!AE)b*:T$f-RLT5RKs_C),QQ_-:4WJq7[=Y#uN#b1'DcqWaR)i:V3eEP55II;Z#i9W_6bSUp+&rF4>Q'crPpM0>J^bHi0@Z^0iFYP(Z^S4QE*n]RBa>rA2*@X5=;`q'a(SE!,la$?UQI@TAk4G#9Ki_J^3$,G_EQ/A\DIpV?4/@?B?p=Q<S+dEq33;)*:Wr&)*Y3(l1q\.>H*g^nU\\20\lml&j8R89TBnJ3IIdE94]^&gp[D9BhbOSGk'P3pZ<<Cbl(.O;D7%1\@5A.8#",UK2dI?\hZcR*-"i21`BFaHj%W'.@3Hur2OSI>m[5Unk]Tg"YS6(LbG6j9NE8^X3o`uFpN6L^,a[7MkM/3cABqpTN9iW:0.f8hL3[n)n?fbk*l6I2ac/9$"Gb1__2d7u1eO#&*C"MGj+W$UM$rXarrbY_AIcl]0F%k(JNm>rM7nQQ$fJf2oN<K9O+3']D2fDq/2G#o-A7+7Yc=MDT#N@.O2G('NQ=qVf-*QNCOC!/F5Gkm<E0\Ai7e[0e+E$<ICpEM6FNi1b)iih`#O:.m_YJKEP.@S`7j=(RI0K8a7)p[L7"@TL$XJE3fp%*Bp6[f`*PdY5@JTh+_akHSZCJp57FtlUoh2WftW\h#.f&G,<krYK\fAsX@>sMDYn1KUC/u2cLWC>Z%Q#>N>)QfLYC=?rk%QVF4#cnGo)<_=r#r,I$uBZ*'P&[`'-Kgi$3\DOJ&Ip%%LR!gLkE.&7605+VOJS7R[6YHkh&on&s\ZqE%nT2DubiPi05TB0p^ao<,Yhg:6kc%p=a9744gpbuHYij)ZmGWO!Z'n&?jhg4Gd=GPO4&!]5jXTA#UD>I*oO]RbXXa(N5A4Y%kH)669Ud!%:AFk\hla`3l`2]4;K_XbX7J-odJIKAN?!5l!4KRhgeAVack1",R&F3bh9)Kki$VBZY"]dJ'[=,,Zi&-9L]+k_-t_8_2)htf?s`@SRD:!J(^iK$ZQT$mJ=.nEr_'FirGN./PH^]2MJK`1a5cAM_>#(1Wihh\e";AHnO!m%u8:c[i]EPpp=KQNaSk8-$5FBegnEG)/'CP#&/).9BpoG\AYe$=8#8=8)<?,eh*Wd;q(I-b\G`4g1O$@caLg@WK%#P-/:=Th++X93KPC^W/da9Gf1Q&ju5\5Dn0(KrQU-i>_R_d[SfBqCf)RqTE;0=L4[/jUg%I%(9&;ipAFlDrZiDn8Q9ojVdi7A`-dnCPR5rrJq[OAc~>
+endstream
+endobj
+53 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 52 0 R
+>>
+endobj
+54 0 obj
+<< /Length 3415 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+GasIk?]uEX%Y)g&nDg=edrBU/n%DH$QeT4.e>4ehQK3U2A"-(0I,4Eg]UsYH&P18n;LW?Y!$R#WK^elZ2t&r5B'#ahbT7:5(:3%jEjd[:E`OeE\62d"\X>UPB4(,;rf]LYS_[?tgN*%aPX_\9h)"MW\$PP0a+#=tau6E3ARJE*0)$Y#H9?17eTG#2(Q1^>]DMLr9Xj<>a,#g\`J?]tdDZZ4)Dt&+K1\nY-'Pf3qDAo\Kg55so6E)[gF%"iZi1g;rUKFLQXlK=a+,$Q%Tag7^C]hVQTnF)NZ5@^/:qaYhJ$s@%F_u!C9H-GU3=HS.iTNS7^Yb#O"hg@0jqr0`lQLSHrr-jWA[1SqT'uON/4[CBp:`9j'__?ESj=96+"bdKBadma+35($)$uS=mO7&1oJ!H=1b,8+e*Z-F).&kVCm%#1.N/`H;WK/"F@5Yfj3%`R=OaEj?Q?(\^WrQ$\WsJ3bKF@2F2R^eDt#@,"I4$9oh;d*H+nI"AssPd[/^mFBIb8/lka:*_H?0oU:Dp3CqM/WHm[.%3"%W#3tfP+6H+FJq1Y_c8;IUl.DV1`3e]%a)t\iDfqj;R#N?'?6+0#bt1GiEG&#ood8n@Gqe[hK08!<H?gLtAq2f59Y1XghjX*mW^1+?1j`]FZWm`dZDJ:cK*qNHZb8@D*B>i(#@!t7iSVh$:rngrp\.:PX;bAqME)<o*l#N?ApV"/l\?a+!=ZsXe@.Nn`^mR>>qT3SpS.]XmHpG9m0FXS9gEdN&eGk(1/ajSG[rXhSEVS*5uTJ[%Pn#p#J15uI]Z?HL\4#U6;!l;-=b.VCL>_ZnEMms1niIY5hQ\TRf/b\O^r@O+hD5STk5<^2+qPn2=/QTZT;:WQ5J#NL59Q4"%Gu::=SH$Eb2nPB<n6Yq/F\u+k<Glmrl<[O9u$0C^B%%($U@4U-FX#d;d`;6R[G<4kY&_Q#$mfO24l%rIc8.gDI.eGld4Wh..]cL*ct2GfoEl)O%p'Vp9I!\LHgJ-:bTET-N9Qd<us$euWlRQPo*hm3n;h,!GS&H:N^;b)rpF$s/St+mAhjY4mBoKBZ^Ua;[U[_N`X>&p:k:,P.P?))O50aLfJQoU/9o]TkmgC/"9Ndj4cIL=,)#UQ`C572nt\a-"BHBF2r1F^3@aJ;3.1OZ'Jc%eQ_2KVKK+'%jZpKXsHAaGF:3Gsc-NbhG$Q>V\%`V7u$g`O$/4LlCYgVnI<`Gl=hfBb,Q@@)J/l\1k#4:1ej+*&9=cl<f[k.i4=dGN'/YXR/"%?XqFc5&ECjhn.MX[H*CcEec+'3#9r@E^hh(P.EdVS364DnqQm%l`P<I6@*6(YodL%P?62WCC]&:T=+aUPOH\k;jU7HN@YY>+u!peUJ.m;3FmuWBGiE:dE:TsokZR$#43).;-QVXocfn-iAa@+Ksm05o>\LL@hQ5p8%c-0JLM+h<VKp8A"8E5O.1M2$U=u\!C9@+T]CLX-`g9@2^]r5r"mh-Y<no0Zb/1#%RIbs;`]a!%%H$AXtY'`r22X=bL*GK1Ik'!(70?lo8S4"TuH6P*(L.+j1cl-dh,K#B1BlGcSJJ=cKu)XLcj,%EG+R2_ULb'3+NF+3Me8]=n@3J)P&_bs1iu;/L/.%1[I*J&0n2'/6!d['M(IbRgP^>0NPIqdB&S?5CT#Gr69SKOM,HRFYP<K'-ZdC,X<RBS:YKL6r@399sDo3pUiZ5&\sl4M3f%E@B=N4Emcku:p+YmK9r:[.UZ%u4[GS?P'4bO9M$k>T(Wt%TRHBt)&ce]8tZ%E'N%jD9:7+1F@X66pP-Ri?ja?c<#*YtL!#F"<JZ1EcXV(0&eeGkHar^LU1+n3aZ^u6l;S*m=u*"Z9D"&h3WVo$ROeJ6+tLrP(7BTLGGJX1X%s+</O2WgBgDg2,PpB`/@;:&rE4-)YAc)/`X0s-_R*18iV9:fXm&/P"mF>n$O$I=2%ipM"cIOSVju!a;@Z8dcV-8F;L"$sl'r@7C("W"dhLEDQ@j2>;>6Obj;/-:<;,<==jd-didUkheLSl(b(\C7`f5$8h<p.0Aoo9V?)XM@;]<DBC'fk`XdkSM;!.peUg[4<A_5=4j&C"J(/6V5P$.aC.?k&Yai#N@C>S0-DpD+DR+.gXRd9O5<rF!I2iI4\8+;WHXqPKIS+p%IF+G7Re[Qth%l\`B.[k5R_)obC11dC:45*rA68C=uk+X%7=UZ5Y.'Jh\4\9b)TgU!RMO7[f&OR'iVF+cI+;Z?"T#[3m=$$*BJ0<MC2L,s$TYHp?HB,7P[Z6uFYLDK7?tWg)*DYiWM)[6RO#bKEaEtAs3<`KOJG/9$U#tCKKXi8F79lmWr)'XgM="`a9@[TZ<5_K3mHZ#B&Sa0J7C%=P&4SSF-YsLmFuQrc!@'_/itS&BKb!@tRIkr?mD/=0T*Xl59/=7)96)<ghij1gnS'MOX_jUGG$S.fGUMI:*_A5&>Jo2?!qVH0>Q!!K:(>5W!m]U'nN^23_SBD:3el^lj,Jn.I_-S0't$9<\dSRk[,Ff'i\)dUJ&_RFPkdcUC(6AbX]*E?b$SuDmQlFF$%,cd88Uo88I[6\J^O>u4?I-ViZo0a'A\aY93M(m!+=T>ghKEsmpG(B]>rti^=PD-V?i4`9X4s?&oGS:KQY-;;f9O$boIBMD9B=^#!F8kST@S<'Yfg/8Hs/0/35*'@"$\smI''m<SWR$\&:,R4.\n&_L$5a%WLk`mMfqo!5OT:&KnY0@]g1)(8<)$`h8q-*#"+c(-($K+6FR9#`hVb%+8?c3'LesaVYVEiMXl(EXJg;Wp7``X0mA&JohV+i!kojY<r[VjL'?e_Jb1$jGlFg(9YYp&E3$ba<P[GHIDH(-Se#!FZ`#@ih[DY8<Z1XTaid)^EpQ:EOk&-9$D+B#@42Q,"-fL=CUC6eS,;"Hu&2!CB3>97KBA9`$kEE5l5eJZ\.f>gc1h'h"Uc$2tiXa&WS_5N3X()gbTpQB)7mP:%gl\T?#U5T_#0h?m<g.j\j_Irr[&rV[l8b/o[g7[naV!=bjc7OGhC4,Y<2HSdI;X03kI>"%)eO_j#'nD)[EX1`POD_n1u;EDXVt>MFX%H&ZWV+fI6>gj08FGqYamToL/*S??9KrnT%F3#7BpDW'.;Hg&59o\Z**HQ<WAONd(TSr!C=V"Ngkn(r17PEZJ?$PRlGqMRh6h_F?mr=ta#m@pP[G`jm=3k<FkaBea2mcC=+7sr08qR-gaJ4WGT2ZG*pT\in%MNNsP,^7<0ho@",6j]Va%j,9!()1mM1o-Y)doO/aMJBVWQWD&+5)GKe@d#sn>E]cFA+Y7%=n2gp^GIO*DZ9KkdLSj5*n&$YDUDQ8"0):KZ'XUV/)7d,;u(IuUA)$8[p,bW>qO[IB!IeAa<-#.QUgo@\a]>(H6b`N\/k+A(\S@QX1HsZ;BBrTkMr&H!S^]%!!~>
+endstream
+endobj
+55 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 54 0 R
+/Annots 56 0 R
+>>
+endobj
+56 0 obj
+[
+57 0 R
+]
+endobj
+57 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 86.16 140.009 126.71 130.009 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A << /URI (http://rhlinux.redhat.com/anaconda)
+/S /URI >>
+/H /I
+>>
+endobj
+58 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F3
+/BaseFont /Helvetica-Bold
+/Encoding /WinAnsiEncoding >>
+endobj
+59 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F5
+/BaseFont /Times-Roman
+/Encoding /WinAnsiEncoding >>
+endobj
+60 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F6
+/BaseFont /Times-Italic
+/Encoding /WinAnsiEncoding >>
+endobj
+61 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F1
+/BaseFont /Helvetica
+/Encoding /WinAnsiEncoding >>
+endobj
+62 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F9
+/BaseFont /Courier
+/Encoding /WinAnsiEncoding >>
+endobj
+63 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F7
+/BaseFont /Times-Bold
+/Encoding /WinAnsiEncoding >>
+endobj
+1 0 obj
+<< /Type /Pages
+/Count 9
+/Kids [6 0 R 41 0 R 43 0 R 45 0 R 47 0 R 49 0 R 51 0 R 53 0 R 55 0 R ] >>
+endobj
+2 0 obj
+<< /Type /Catalog
+/Pages 1 0 R
+ >>
+endobj
+3 0 obj
+<< 
+/Font << /F3 58 0 R /F5 59 0 R /F1 61 0 R /F6 60 0 R /F9 62 0 R /F7 63 0 R >> 
+/ProcSet [ /PDF /ImageC /Text ] >> 
+endobj
+9 0 obj
+<<
+/S /GoTo
+/D [6 0 R /XYZ 67.0 317.439 null]
+>>
+endobj
+11 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 693.0 null]
+>>
+endobj
+13 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 550.675 null]
+>>
+endobj
+15 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 517.35 null]
+>>
+endobj
+17 0 obj
+<<
+/S /GoTo
+/D [43 0 R /XYZ 67.0 673.0 null]
+>>
+endobj
+19 0 obj
+<<
+/S /GoTo
+/D [43 0 R /XYZ 67.0 246.561 null]
+>>
+endobj
+21 0 obj
+<<
+/S /GoTo
+/D [45 0 R /XYZ 67.0 448.0 null]
+>>
+endobj
+23 0 obj
+<<
+/S /GoTo
+/D [45 0 R /XYZ 67.0 179.561 null]
+>>
+endobj
+25 0 obj
+<<
+/S /GoTo
+/D [47 0 R /XYZ 67.0 370.0 null]
+>>
+endobj
+27 0 obj
+<<
+/S /GoTo
+/D [47 0 R /XYZ 67.0 336.675 null]
+>>
+endobj
+29 0 obj
+<<
+/S /GoTo
+/D [49 0 R /XYZ 67.0 437.0 null]
+>>
+endobj
+31 0 obj
+<<
+/S /GoTo
+/D [51 0 R /XYZ 67.0 250.0 null]
+>>
+endobj
+33 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 573.0 null]
+>>
+endobj
+35 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 478.561 null]
+>>
+endobj
+37 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 417.122 null]
+>>
+endobj
+39 0 obj
+<<
+/S /GoTo
+/D [55 0 R /XYZ 67.0 194.0 null]
+>>
+endobj
+xref
+0 64
+0000000000 65535 f 
+0000036861 00000 n 
+0000036975 00000 n 
+0000037025 00000 n 
+0000000015 00000 n 
+0000000071 00000 n 
+0000002331 00000 n 
+0000002451 00000 n 
+0000002581 00000 n 
+0000037159 00000 n 
+0000002715 00000 n 
+0000037223 00000 n 
+0000002851 00000 n 
+0000037287 00000 n 
+0000002987 00000 n 
+0000037353 00000 n 
+0000003122 00000 n 
+0000037418 00000 n 
+0000003257 00000 n 
+0000037482 00000 n 
+0000003392 00000 n 
+0000037548 00000 n 
+0000003527 00000 n 
+0000037612 00000 n 
+0000003661 00000 n 
+0000037678 00000 n 
+0000003797 00000 n 
+0000037742 00000 n 
+0000003932 00000 n 
+0000037808 00000 n 
+0000004067 00000 n 
+0000037872 00000 n 
+0000004202 00000 n 
+0000037936 00000 n 
+0000004337 00000 n 
+0000038000 00000 n 
+0000004472 00000 n 
+0000038066 00000 n 
+0000004608 00000 n 
+0000038132 00000 n 
+0000004744 00000 n 
+0000008250 00000 n 
+0000008358 00000 n 
+0000011924 00000 n 
+0000012032 00000 n 
+0000016141 00000 n 
+0000016249 00000 n 
+0000020242 00000 n 
+0000020350 00000 n 
+0000024018 00000 n 
+0000024126 00000 n 
+0000028641 00000 n 
+0000028749 00000 n 
+0000032253 00000 n 
+0000032361 00000 n 
+0000035869 00000 n 
+0000035992 00000 n 
+0000036019 00000 n 
+0000036204 00000 n 
+0000036317 00000 n 
+0000036427 00000 n 
+0000036538 00000 n 
+0000036646 00000 n 
+0000036752 00000 n 
+trailer
+<<
+/Size 64
+/Root 2 0 R
+/Info 4 0 R
+>>
+startxref
+38196
+%%EOF
diff --git a/documentation/boot-manager-pdn.xml b/documentation/boot-manager-pdn.xml
new file mode 100644 (file)
index 0000000..2f28c94
--- /dev/null
@@ -0,0 +1,829 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.3//EN"
+"http://www.oasis-open.org/docbook/xml/4.3/docbookx.dtd">
+<article>
+  <articleinfo>
+    <title>The PlanetLab Boot Manager</title>
+
+    <author>
+      <firstname>Aaron</firstname>
+
+      <surname>Klingaman</surname>
+
+      <email>alk@cs.princeton.edu</email>
+    </author>
+
+    <affiliation>
+      <orgname>Princeton University</orgname>
+    </affiliation>
+
+    <abstract>
+      <para>This document outlines the design and policy decisions of a new
+      PlanetLab component called the Boot Manager. The Boot Manager
+      encompasses several systems and all policy regarding how new nodes are
+      brought into the system, how they are authenticated with PlanetLab
+      Central (PLC), what authenticated operations they can perform, and what
+      constitutes a node's identity.</para>
+    </abstract>
+
+    <revhistory>
+      <revision>
+        <revnumber>1.0</revnumber>
+
+        <date>January 14, 2005</date>
+
+        <authorinitials>AK</authorinitials>
+
+        <revdescription>
+          <para>Initial draft.</para>
+        </revdescription>
+      </revision>
+    </revhistory>
+  </articleinfo>
+
+  <section>
+    <title>Overview</title>
+
+    <para>This document describes the history of and groups several previously
+    separate, undocumented components and policy decisions of the PlanetLab
+    infrastructure into one logical group, which will be called the
+    <firstterm>Boot Manager</firstterm>. In addition, specific recommendations
+    are made for changes and additions to these parts to support new features
+    and better security outlined in detail later. These include:</para>
+
+    <orderedlist>
+      <listitem>
+        <para>How new nodes are added to the PlanetLab system, and the chain
+        of trust that accompanies that addition</para>
+      </listitem>
+
+      <listitem>
+        <para>How to prevent unauthorized nodes from becoming part of the
+        system, and the consequences of that happening</para>
+      </listitem>
+
+      <listitem>
+        <para>How any existing node authenticates itself with PlanetLab
+        Central (PLC), and what operations can it perform</para>
+      </listitem>
+
+      <listitem>
+        <para>What constitutes node identity, and, when this identity should
+        and should not change</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Not covered by this document are topics including node to node
+    authentication, or any service or system running after a node is fully
+    booted and the Boot Manager is no longer applicable.</para>
+  </section>
+
+  <section>
+    <title>Terminology</title>
+
+    <para>Before continuing, terms used through this document, including what
+    a site is, what nodes are, and what PlanetLab consists of will be defined.
+    Current organizational structure consists of groups of
+    <firstterm>sites</firstterm>, usually a geographical location
+    corresponding one to one with a company or university. These sites have
+    any number of <firstterm>users</firstterm> or
+    <firstterm>researchers</firstterm>, including a <firstterm>principle
+    investigator</firstterm> , or <firstterm>PI</firstterm>, responsible for
+    the users, and one or more <firstterm>technical contacts</firstterm>.
+    Sites are usually composed of at least two machines running the PlanetLab
+    software, usually referred to as <firstterm>nodes</firstterm>. All user
+    and node management operations are done through a set of servers located
+    in one physical location which is known as <firstterm>PlanetLab
+    Central</firstterm>, or <firstterm>PLC</firstterm>.There are also a set of
+    PlanetLab <firstterm>administrators</firstterm>; not necessarily
+    affiliated with a particular site. <firstterm>PlanetLab</firstterm> then
+    collectively refers to all sites and their nodes and users, and PlanetLab
+    Central.</para>
+  </section>
+
+  <section>
+    <title>Background</title>
+
+    <section>
+      <title>How Sites Become Part of PlanetLab</title>
+
+      <para>A full discussion and evaluation of the process and security
+      implications of sites becoming part of PlanetLab is outside the scope of
+      this document. It will be assumed that the process is relatively secure,
+      and that user and PI accounts at that site are legitimate. However, it
+      is necessary to provide some basic information about the process.</para>
+
+      <para>What does it mean for a site to be part of PlanetLab?
+      Primarily:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>The site's record (e.g. name, url, geographical location,
+          contact information) is in the PLC database</para>
+        </listitem>
+
+        <listitem>
+          <para>There are a set of users (their email address, password,
+          personal information) associated with the site in the PLC
+          database</para>
+        </listitem>
+
+        <listitem>
+          <para>The ability for those users and PIs to perform some operations
+          at PLC, and gain direct access to the nodes</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The process for allowing new sites to become part of PlanetLab has
+      been continually evolving since the beginning of PlanetLab. Initially,
+      the first sites were selected and invited, and record of their existence
+      in PLC was entered in by hand by an administrator. With a site now part
+      of PlanetLab, users and PIs at those sites could then register for
+      accounts to perform operations at PLC. Privileged accounts, such as PI
+      accounts, were enabled by administrators. At the time, this
+      administrative overhead was not a problem given the relatively limited
+      number of total sites.</para>
+
+      <para>Over time, parts of these operations have been streamlined. Now, a
+      site can submit all their relevant info on the PLC website, for review
+      and approval by administrators. They also no longer require an explicit
+      invitation. With the creation of the PlanetLab Consortium, there is now
+      an additional paperwork step before a site becomes a member of
+      PlanetLab.</para>
+
+      <para>With the introduction of the additional consortium step, the
+      process now exists as:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>A site either requests to join PlanetLab by contacting
+          administrators over email, or through other external
+          communication</para>
+        </listitem>
+
+        <listitem>
+          <para>Necessary consortium paper work is signed by all
+          parties</para>
+        </listitem>
+
+        <listitem>
+          <para>PI(s) submit connect (join) requests with remaining site and
+          personal information</para>
+        </listitem>
+
+        <listitem>
+          <para>Administrators verify that the PI is who they say they are,
+          and enable their site and accounts at PLC</para>
+        </listitem>
+      </orderedlist>
+    </section>
+
+    <section>
+      <title>How Nodes Become Part of PlanetLab</title>
+
+      <para>After a site has been approved and added to PLC, they are required
+      to install and make available to other users at least two nodes (as per
+      current policy).</para>
+
+      <para>In the first revisions of the PLC software, nodes were only added
+      to the system by hand. Usually a PI or technical contact would
+      communicate the network settings of the node, and it was then added to
+      PLC by an administrator. This prevented any nodes that weren't part of
+      PlanetLab to be recognized by PLC. No mechanisms existed to ensure that
+      the node's network (effectively its identity) was not hijacked by
+      another machine.</para>
+
+      <para>Since the beginning of PlanetLab, there have been little to no
+      restrictions on what machines the PlanetLab software can run on. This is
+      primarily due to the fact that all source code is now available, and it
+      is technically feasible for anyone to bring up a machine that is running
+      the PlanetLab software, or closely resembles it. What is important,
+      however, is when these nodes become recognized by PLC, and then
+      available to the users via PLC. Otherwise, a user would have to go
+      through non-PLC channels in order to find these nodes. Even then, they
+      could not use PLC to run their experiments on the nodes, because PLC
+      does not know about those nodes.</para>
+
+      <para>When a node becomes part of PlanetLab, it:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>Is recognized by PLC as being at the site by its existence in
+          our database</para>
+        </listitem>
+
+        <listitem>
+          <para>The existing node boot mechanisms allow the machine to come
+          online after communicating its identity to PLC</para>
+        </listitem>
+
+        <listitem>
+          <para>Researchers can use the node for their experiments by using
+          administrative interfaces at PLC</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Rather than adding each node by hand, the current system instead
+      allows for an entire network subnet to be authorized to contain nodes.
+      When a site joins, a PLC administrator authorizes the subnet the nodes
+      will be on, and any machines on that network are allowed to become
+      recognized by PLC automatically. This had immediate advantages,
+      primarily being one of not requiring overhead for PLC administrators to
+      add each node by hand as was done in the beginning. Given that a common
+      interest was to see PlanetLab grow in terms of number of nodes (as one
+      metric), the assumption was made that allowing any node to come online
+      on an authorized subnet without explicit approval from an administrator
+      or PI would benefit everyone.</para>
+    </section>
+
+    <section>
+      <title>Node Installation</title>
+
+      <para>To date, there have been three major revisions of the software
+      that installs a PlanetLab node. Not only have the mechanisms in which
+      the nodes get installed changed, but, under what context the
+      installation is running.</para>
+
+      <para>The first revision of the installer was primarily nothing more
+      than a customized RedHat (version 7.3) boot disk, with a PlanetLab
+      specific post script to perform final initialization steps. The network
+      settings, and which packages to install were all stored on the disk, so
+      a custom disk was generated on demand for each node. Anyone with one of
+      these disks could install a PlanetLab node.</para>
+
+      <para>The second revision of the installer was released in conjunction
+      the release of the new PlanetLab boot cd. The intention was not
+      necessarily to have the node packages on the cd (as they would quickly
+      go out of date), but, to provide a mechanism to allow administrators to
+      regain control of a machine, in the event that the node was compromised,
+      or the installed software was corrupted. The nodes were configured to
+      always start off the cd, and, rather than have a custom cd per node, the
+      network settings were stored on a floppy disk. Both the floppy disk and
+      the boot cd were to remain in the machine at all times. The RedHat
+      installer, Anaconda <citation>1</citation>, that was used prior to the
+      boot cd was modified to run in the context of this boot cd. This allowed
+      us a great deal of flexibility, as the cd was built so that all it would
+      do was:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>Bring a full Linux system online, running only off the
+          cd</para>
+        </listitem>
+
+        <listitem>
+          <para>Load any network and other drivers necessary, based on the
+          hardware of the node</para>
+        </listitem>
+
+        <listitem>
+          <para>Configure the network interface with the settings from the
+          floppy disk</para>
+        </listitem>
+
+        <listitem>
+          <para>Contact a special PLC boot server, and download and execute a
+          script.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The boot cd uses HTTPS to contact the boot server, and uses a
+      certification authority (CA) certificate to verify the identity of the
+      machine at PLC. This way, it can be assured that the installation of a
+      particular node is correct, in at least that all packages originated
+      from PLC. The script downloaded by the boot cd for a node depends on the
+      current state of that node, in the PLC database. The PLC database must
+      identify the node in order to accomplish that. That is covered below, in
+      Node Identity.</para>
+
+      <para>The third and current version of the installer still runs in the
+      context of the boot cd, but was a complete rewrite to better handle
+      packages, and remove much unneeded complexity in the previous
+      installer.</para>
+    </section>
+
+    <section>
+      <title>Node Identity</title>
+
+      <para>In the first revisions of the PlanetLab software, nodes were
+      solely identified by their network settings, primarily, the hostname and
+      the physical address of the network adapter (MAC address). This worked
+      well then, as this set of information was unique, and allowed for the
+      direct mapping of node identity to a physical machine. It was stored
+      this way in the PLC database as well.</para>
+
+      <para>As the design of the database progressed, the PlanetLab software
+      needed to identify nodes not by any one aspect of the physical machine,
+      but by a more generic identifier (as this identifier needed to be used
+      internally to refer to other aspects of a node, like which site it is
+      at) - what has been called a node id. Although better in some respects,
+      there are still drawbacks. For example, deleting a node entry from the
+      database and recreating a similar one could result in a new node id,
+      when nothing on the node itself really has changed. These problems are
+      primarily due to a lack of policy being documented, and instead, the
+      implementation details defining the policy.</para>
+
+      <para>Currently, when a node requests a script from the boot server as
+      the last step of the boot cd operation, it sends to PLC the output of
+      the program 'ifconfig' (among other data), which contains the network
+      settings the machine was configured with. From the network settings, the
+      primary MAC address is extracted by PLC and used to check the database
+      if the node exists. Here, the MAC address is used to look up a
+      corresponding numeric node id, which is used internally. The MAC address
+      and the node id are tied - if a new MAC address is used, a new node id
+      will be generated. If the node does exist, an appropriate script is sent
+      in response, based on the current node state. Again, this was fine, as
+      long as a node was identified correctly.</para>
+    </section>
+
+    <section>
+      <title>Node Authentication</title>
+
+      <para>What does a node (or PI, for that matter) have to do to prove that
+      it is one of the real, or legitimate, PlanetLab nodes? At first, this
+      was not an issue because the nodes were added to the system by
+      administrators, and all communication paths led only from PLC to the
+      nodes. Everything was downloaded from PLC, including information about
+      what experimenters can use the system, what packages to install for
+      updates. For this, a node only needed to send enough information in the
+      request to identify itself with PLC. From the PLC point of view, it did
+      not matter which node downloaded the packages for a node, so long as the
+      node was identified correctly and received the packages it was supposed
+      to. This was acceptable since the node was added to PLC by hand, thus it
+      was already 'authenticated'. During this period, a number of assumptions
+      were made:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>That a rogue node with the same network settings would not be
+          a problem, as the site technical contacts could prevent or detect
+          that</para>
+        </listitem>
+
+        <listitem>
+          <para>The ability to check to ensure a particular node was already
+          authenticated was not done (aside from assuring that the host's
+          public ssh key fingerprint did not change from one login to the
+          next)</para>
+        </listitem>
+      </orderedlist>
+
+      <para>As more previously manual steps became automated, a number of
+      situations came up in which a node would need to initiate and perform
+      some operation at PLC. There is only a small set of these operations,
+      and are limited to items such as, adding a node to the system (under a
+      previously authorized subnet), changing the 'boot state' (a record of if
+      the machine is being installed, or is in a debug mode) of a node, or,
+      uploading the logs of an installation.</para>
+
+      <para>To handle this new node authentication, a 32 byte random nonce
+      value was generated and sent to PLC during node boot time (at the same
+      time the network settings are sent). The nonce value in the PLC database
+      for that particular node is updated if the node is identified correctly,
+      and is used for authenticating subsequent, node initiated operations.
+      Then, for example, when a node install finished, a node could request
+      it's state updated, and all it would need to do would be to resend its
+      network settings, and the original nonce for authentication. If the
+      nonce in the database matched what was sent, then the requested
+      operation was performed.</para>
+
+      <para>The problem here is obvious: now, any node that can be identified
+      is essentially automatically authenticated. For a node to be identified,
+      it has to be in the database, and, new nodes can be automatically added
+      on any authorized subnets without intervention of an administrator or
+      tech contact. With this system, it is trivial to add a rogue node to the
+      system, even at a different site that was not originally authorized,
+      because the whole system is based on what a node sends PLC, which is
+      trivial to spoof.</para>
+    </section>
+  </section>
+
+  <section>
+    <title>Recommendations</title>
+
+    <section>
+      <title>How PLC Will Identify Nodes</title>
+
+      <para>Before any suggestions on what to change regarding the node
+      identity policy can me made, the question, what makes a node a node,
+      should be answered. This primarily depends on who is asking. From an
+      administrators point of view, a node could be tied to a particular
+      installation of the software. Reinstall the node, and it becomes a new
+      node with a new identity. However, from an end user's perspective, the
+      machine still has the same network address and hostname, and their
+      software simply was removed. For them, changing the node identity in
+      this situation does not make any sense, and usually causes them
+      unnecessary work, as they have to re-add that machine to their
+      experiment (because, as far as the PLC database is concerned, the node
+      never existed before then). This question is particularly import for
+      several reasons:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>It gives users a way to identify it, in order to use it for
+          their research</para>
+        </listitem>
+
+        <listitem>
+          <para>The node identity could be used by other external systems, as
+          a universal identifier</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The following recommendation is made for a new node identity
+      policy. Rather that tie node identity to some attribute of the physical
+      machine, such as its hardware configuration as is currently, instead,
+      PLC will assign an arbitrary, unused identity to the node upon its
+      creation, and that identity will be stored locally at the node (most
+      likely on an external medium like floppy disk). Then as long as that
+      identity is still on the node, any hardware or software changes will not
+      necessarily require a change of the node identity. This will then allow
+      PLC, if necessary in the future, to change the node identity policy as
+      needed.</para>
+
+      <para>The following policy will apply to this new node identity:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>In the past, a tech contact was able to change the network
+          settings on a node automatically by updating the network
+          configuration floppy. Now, these changes will have to be done at PLC
+          (with the option of assigning a new node identity). Thus, the node's
+          network settings (excluding MAC address), are tied to the
+          identity.</para>
+        </listitem>
+
+        <listitem>
+          <para>Attempting to move the node identity to another machine will
+          halt that machine from being used by researchers until the change is
+          dealt with by either a PLC administrator or a site technical
+          contact. If approved, the node would reconfigure itself
+          appropriately.</para>
+        </listitem>
+
+        <listitem>
+          <para>A node identity cannot be reused after the node has been
+          deleted from the PLC database.</para>
+        </listitem>
+
+        <listitem>
+          <para>The node identity will not change across software reinstalls,
+          changes of the harddisks or network adapters (as long as the network
+          settings remain), or any other hardware changes.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Given the current design of the PLC database, there is still a
+      need to use, at least internally, a numeric based node identifier. Other
+      software and APIs available to researchers also use this identifier, so
+      the question becomes whether or not the above policy can be applied to
+      it without significantly changing either the PLC software or the
+      researcher's experiments. Answering this question is beyond the scope of
+      this document, and is left as implementation decision.</para>
+    </section>
+
+    <section>
+      <title>Authenticating Node Identity</title>
+
+      <para>It is clear that the previous model for authentication will need
+      to change, which assumes with identity comes authorization, to one where
+      a node can present its identity, then authenticate it as a separate step
+      in order to become authorized. During the boot process, a node can still
+      send sufficient information to identify itself, but, a new system is
+      required to prove that what it sends in fact does come from the node,
+      and not someone attempting to impersonate the node. This is especially
+      important as node identities are made public knowledge.</para>
+
+      <para>Authentication in distributed systems is a fairly widely
+      researched problem, and the goal here is not to build a new mechanism
+      from scratch, but rather to identify an existing method that can be used
+      to fulfill our requirements. Our requirements are fairly simple, and
+      include:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>The ability to trace the origin of a node added to PlanetLab,
+          including the party responsible for the addition.</para>
+        </listitem>
+
+        <listitem>
+          <para>Authenticating requests initiated by nodes to change
+          information at PLC. These requests involve little actual
+          communication between the nodes and PLC, and the overhead for
+          authenticating each request is small given the number and frequency
+          of them. This also means the need to open an authenticated channel
+          for multiple requests will not be necessary.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Given the public nature of PlanetLab, the need to encrypt data
+      during these system processes to prevent other parties from seeing it is
+      not necessary (also, simply hiding the details of the authentication
+      process is not a valid security model). Assuring the requests are not
+      modified during transmission is necessary, however. A public/private key
+      pair system could be used, where each site would be responsible for
+      generating a private key, and signing their node's identity. PLC could
+      then have a list of all public keys, and could validate the identities.
+      However, this is not recommended for several reasons:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>It places an additional burden on the site to generate and
+          keep secure these private keys. Having a private key for each node
+          would be unreasonable, so one key would be used for all nodes at a
+          particular site.</para>
+        </listitem>
+
+        <listitem>
+          <para>By using one key for all nodes, it not only increases the cost
+          of a compromised key (all identities would have to be resigned),
+          but, use of the key to add unauthorized nodes could not as easily be
+          detected.</para>
+        </listitem>
+
+        <listitem>
+          <para>Differences in versions of the software used to generate keys
+          would have to be handling, increasing the complexity of supporting a
+          system at PLC</para>
+        </listitem>
+      </orderedlist>
+
+      <para>To fulfill the above requirements for node identity, the
+      recommendation is made to use a message authenticate system using hash
+      functions and shared secrets such as in <citation>2</citation>. In such
+      a system, the shared secret (or refered to as key, but not in the
+      public/private key pair sense), is as simple as a fixed size, random
+      generated number. Of primary importance in such a system is the control
+      and distribution of the key.</para>
+
+      <para>Securing a key at PLC is relatively straight forward. Only a
+      limited number of administrators have direct access to the PLC database,
+      so keys can be stored there with relative confidence, provided access to
+      the PLC machines is secure. Should any of these keys be compromised, all
+      keys would need to be regenerated and redistributed, so security here is
+      highly important.</para>
+
+      <para>However, securing the secret on the client side, at the node, is
+      more difficult. The key could be placed on some removable media that
+      will not be erased, such as a floppy disk or a small usb based disk, but
+      mechanisms must be in place to prevent the key from being read by anyone
+      except the boot manager and the boot cd processes, and not by any users
+      of the machine. In a situation like this, physical security is a
+      problem. Anyone who could get access to the machine can easily copy that
+      key and use it elsewhere. One possible solution to such a problem is to
+      instead make the key a combination of two different values, one stored
+      on the floppy disk, the other being a value that is only known to the
+      PI, and must be entered by hand for each message authentication. Then,
+      in order to compromise the entire key, not only must the attacker have
+      physical access to the machine, but would have to know the other half of
+      the key, which would not be recorded anywhere except in the PLC
+      database. This ultimately cannot work because of the need for human
+      intervention each time a node needs to be authenticated.</para>
+
+      <para>Ultimately, the best solution for the circumstances here is to
+      leave the entire key on the disk; leave physical security to the
+      individual sites; and put checks in place to attempt to identify if the
+      key is being reused elsewhere. As before, the post-boot manager system
+      (running the real PlanetLab kernel), can be configured to prevent the
+      floppy disk from being read by any logged in user (local or not).</para>
+
+      <para>If the key was identified as being reused elsewhere, appropriate
+      actions would include deleting the key from the PLC database
+      (effectively halting any use of it), and notifying the technical
+      contacts and PIs at the site. If necessary, they could regenerate a new
+      keys after corrective actions had been taken.</para>
+    </section>
+
+    <section>
+      <title>Adding New Nodes</title>
+
+      <para>It is important to have control over the process for which nodes
+      are added to the PlanetLab system, and to be able to derive which party
+      is responsible for that machine at any point in the future. This is
+      because several different parties come to PLC for the list of nodes, and
+      PLC needs to provide a list that only includes nodes that have been
+      authorized. For one, the researchers who are looking to run experiments
+      need to identify a set of PlanetLab machines. Two, non-PlanetLab related
+      people who may have traffic related concerns or complaints, and are
+      trying to track down who is responsible for a node and/or the
+      researcher's experiment.</para>
+
+      <para>It is possible to envision at least several scenarios where having
+      a non-authorized node in the PLC database would be a problem. One of
+      which would be a researcher inadvertently using a rogue node (those who
+      installed it could easily have root access) to run an experiment, and,
+      that experiment being compromised across all of PlanetLab, or the
+      results from their research being tampered with. Another could include a
+      rogue node being used for malicious purposes, such as a spam relay, and
+      the (initial) blame being directed at PLC, simply because of the
+      association.</para>
+
+      <para>As shown previously, simply authorizing an entire network is
+      insufficient, as the ability to identify who authorized an individual
+      node on that subnet is unknown. Having the PlanetLab administrators add
+      all nodes by hand incorporates too much overhead, given the number of
+      nodes and the current growth of PlanetLab. This also places the
+      administrators in a state where they may not have the contact
+      information for the responsible party. A decent compromise will be to
+      require either the PIs or technical contacts at each site to enter in
+      their own nodes using the existing PLC interfaces. Given that one of the
+      existing steps for bringing a node online involves generating a
+      floppy-based network configuration file on the PlanetLab website, this
+      process can be extended to also add record of the nodes with little
+      additional impact to PIs and tech contacts. At this point, the per-node
+      shared secret and a node identity necessary for node authentication
+      would be generated and saved at PLC as well.</para>
+    </section>
+
+    <section>
+      <title>How To Remove Nodes</title>
+
+      <para>There may be the need for an administrator, PI, or technical
+      contact to remove a node from the system. This can be done simply by
+      removing the node record from the PLC database, thereby preventing it
+      from successfully authenticating at boot time. In addition, a node could
+      be effectively disabled (but not removed), by deleting the private key
+      for that node from the database. Once restarted, it would not be able to
+      come back online until a new key is generated.</para>
+    </section>
+
+    <section>
+      <title>Node Installation</title>
+
+      <para>The node installer shall be integrated into the Boot Manager,
+      rather than continue to be a standalone component. This will allow the
+      boot manager, when appropriate, to invoke the installer directly.</para>
+    </section>
+  </section>
+
+  <section>
+    <title>Conclusion</title>
+
+    <para>As outlined above, this new system effectively encapsulates a new
+    policy for node identity, and a new mechanism for verifying the node
+    identity and authenticating node-initiated PLC changes. In total, the boot
+    manager collectively will consist of:</para>
+
+    <orderedlist>
+      <listitem>
+        <para>A set of interfaces at PLC that are used to perform
+        authenticated, node-initiated changes.</para>
+      </listitem>
+
+      <listitem>
+        <para>A set of interfaces at PLC that are used to add new nodes to the
+        system.</para>
+      </listitem>
+
+      <listitem>
+        <para>A package downloaded by the boot cd at every boot, which used to
+        install nodes, update configurations, or boot nodes, using the
+        interfaces above.</para>
+      </listitem>
+
+      <listitem>
+        <para>The policy for identifying nodes, and when that identity should
+        change.</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Given the above recommendations, the boot strap process and the
+    chain of trust for adding a new node now exists as detailed below. A site,
+    a principle investigator, and a tech contact are assumed to be already
+    present, and authorized.</para>
+
+    <orderedlist>
+      <listitem>
+        <para>The technical contact downloads a boot cd for the new node.
+        Since the HTTPS certificate for the public web server is signed by a
+        trusted third party, the image can be verified by either ensuring it
+        was downloaded via HTTPS, or by downloading the PlanetLab public key
+        and verifying a signed copy of the cd, also available on the
+        website.</para>
+      </listitem>
+
+      <listitem>
+        <para>The now validated boot cd contains the CA certificate for the
+        boot server, so any host initiated communication that is using this
+        certificate on the cd can be sure that the server is in fact the
+        PlanetLab boot server.</para>
+      </listitem>
+
+      <listitem>
+        <para>The PI logs into their account on the PlanetLab website, also
+        over HTTPS and verifying the SSL certificates. Once logged in, they
+        use a tool to generate a configuration file for the new node, which
+        includes the network settings and node identity. During this
+        configuration file generation, record of the nodes existence is
+        entered into PLC, and a random, shared secret is generated for this
+        machine. The shared secret is saved in the PLC database, and is also
+        included in this configuration file.</para>
+      </listitem>
+
+      <listitem>
+        <para>Both the cd and the new configuration file (on a floppy disk),
+        are inserted into the machine. The machine is configured such that it
+        always starts off the cd, and never the floppy disk or the machines
+        hard disks.</para>
+      </listitem>
+
+      <listitem>
+        <para>After the boot cd finishes bringing the machine online, loading
+        all hardware and network settings from the floppy, it contacts the
+        boot server using HTTPS and the certificate on the cd, and downloads
+        and executes the boot manager.</para>
+      </listitem>
+
+      <listitem>
+        <para>The boot manager then contacts PLC to get the current state of
+        the node it is currently running on.</para>
+      </listitem>
+
+      <listitem>
+        <para>Based on this state, the boot manager can either continue
+        booting the node (if already installed), install the machine if
+        necessary, or take any other action as appropriate. Since this is a
+        new machine, the installation will be initiated.</para>
+      </listitem>
+
+      <listitem>
+        <para>After successful installation, the boot manager needs to change
+        the state of the node such that the next time it starts, it will
+        instead continue the normal boot process. The boot manager contacts
+        PLC and requests a change of node state. This request consists of the
+        node identity, data pertaining to the request itself, and a message
+        authentication code based on the shared secret from the floppy disk
+        and the request data.</para>
+      </listitem>
+
+      <listitem>
+        <para>The boot manager, in order to authenticate the request,
+        generates its own message authentication code based on the submitted
+        data and its own copy of the shared secret. If the message
+        authenticate codes match, then the requested action is performed and
+        the boot manager notified of success.</para>
+      </listitem>
+
+      <listitem>
+        <para>If the node is already installed, and no actions are necessary,
+        the machine is booted. To protect the shared secret on the floppy disk
+        from users of the machine, the kernel during runtime cannot access the
+        floppy disk. At this point, control of the system is removed from the
+        boot manager and run-time software takes control.</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Any action the boot manager may need to take that requires some
+    value to be changed in PLC can use the steps outlined in 8 through 10. As
+    an extra precaution to prevent unauthorized nodes from booting, the
+    process in step 7 should also use the authentication steps in 8 through
+    10.</para>
+
+    <para>Given that the shared secret on the floppy disk can only be accessed
+    in the cd environment (when the boot manager is running and the boot cd
+    kernel provides floppy disk access), any operation that a node can perform
+    that results in a change in data at PLC must be performed during this
+    stage. During runtime, a node can still present its identity to PLC to
+    receive node-specific packages or configuration files, but all interfaces
+    that provide these packages or files cannot change any record or data at
+    PLC.</para>
+  </section>
+
+  <bibliography>
+    <biblioentry>
+      <abbrev>1</abbrev>
+
+      <title><ulink
+      url="http://rhlinux.redhat.com/anaconda">Anaconda</ulink></title>
+    </biblioentry>
+
+    <biblioentry>
+      <abbrev>2</abbrev>
+
+      <title>Message Authentication using Hash Functions - The HMAC
+      construction</title>
+
+      <authorgroup>
+        <author>
+          <firstname>Mihir</firstname>
+
+          <surname>Bellare</surname>
+        </author>
+
+        <author>
+          <firstname>Ran</firstname>
+
+          <surname>Canetti</surname>
+        </author>
+
+        <author>
+          <firstname>Hugo</firstname>
+
+          <surname>Krawczyk</surname>
+        </author>
+      </authorgroup>
+
+      <date>Spring 1996</date>
+    </biblioentry>
+  </bibliography>
+</article>
diff --git a/documentation/pdn-pdf-style.xsl b/documentation/pdn-pdf-style.xsl
new file mode 100644 (file)
index 0000000..ff5b631
--- /dev/null
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/docbook.xsl"/>
+
+<xsl:param name="header.rule" select="0"></xsl:param>
+<xsl:param name="footer.rule" select="0"></xsl:param>
+<xsl:param name="section.autolabel" select="1"></xsl:param>
+
+<!-- more room for the titles at the top of each page -->
+<xsl:param name="header.column.widths" select="'1 2 1'"></xsl:param>
+
+<!-- remove revision history -->
+<xsl:template match="revhistory" mode="titlepage.mode">
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/dummy_bootloader/Makefile b/dummy_bootloader/Makefile
new file mode 100644 (file)
index 0000000..4eafd6a
--- /dev/null
@@ -0,0 +1,17 @@
+all: dummy_bootloader
+
+dummy_bootloader: dummy_bootloader.S
+       nasm $< -o $@
+
+testbootdisk: dummy_bootloader
+       dd if=/dev/zero of=testbootdisk bs=512 count=2880
+
+run: testbootdisk
+       dd if=dummy_bootloader of=testbootdisk conv=notrunc
+       qemu -fda testbootdisk -boot a
+
+clean:
+       rm -f dummy_bootloader
+       rm -f testbootdisk
+
+.phony: all run clean
diff --git a/dummy_bootloader/dummy_bootloader b/dummy_bootloader/dummy_bootloader
new file mode 100644 (file)
index 0000000..ea5d7e5
Binary files /dev/null and b/dummy_bootloader/dummy_bootloader differ
diff --git a/dummy_bootloader/dummy_bootloader.S b/dummy_bootloader/dummy_bootloader.S
new file mode 100644 (file)
index 0000000..c8f1512
--- /dev/null
@@ -0,0 +1,44 @@
+SCREEN_COLS     equ 80
+SCREEN_ROWS     equ 25
+STACK_SEGMENT   equ 09000h     ; top of memory
+STACK_SIZE      equ 00fffh     ; 4K - 1 bytes of stack
+       
+TEXT_COLOR     equ 0x07        ; white on black
+
+       jmp 07c0h:start
+
+message                db "PlanetLab nodes require a boot cd at all times to function.",0
+       
+start:
+       mov ax, cs
+       mov ds, ax
+       mov es, ax
+       
+       mov sp, STACK_SEGMENT   ; setup stack (not really used)
+       mov ss, sp
+       mov sp, STACK_SIZE
+
+       ;; clear out the screen, using the scroll down bios int.
+       mov ah, 0x07            ; for int 0x10, 0x07 is scroll down window
+       mov al, 0               ; entire window
+       mov cx, 0               ; upper left corner = (0,0)
+       mov dh, SCREEN_ROWS     ; row of bottom
+       mov dl, SCREEN_COLS     ; column of right
+       mov bh, 7
+       int 10h                 
+       
+       mov si, message
+
+strout: lodsb
+       cmp al, 0
+       je done
+       mov ah, 0x0E            ; for int 0x10, 0xOE is char out
+       mov bx, TEXT_COLOR
+       int 0x10
+       jmp strout
+
+done:  
+       jmp done
+       
+       times 510 - ($ - $$) db 0 ;  last two bytes are magic for x86 boot sectors
+       dw 0aa55h
diff --git a/nodeconfig/boot/getnodeid.php b/nodeconfig/boot/getnodeid.php
new file mode 100755 (executable)
index 0000000..f160bdd
--- /dev/null
@@ -0,0 +1,28 @@
+<?php
+
+//
+// Returns node ID of requestor
+//
+// Mark Huang <mlhuang@cs.princeton.edu>
+// Copyright (C) 2006 The Trustees of Princeton University
+//
+
+// Get admin API handle
+require_once 'plc_api.php';
+global $adm;
+
+if (!empty($_REQUEST['mac_addr'])) {
+  $mac_lower = strtolower(trim($_REQUEST['mac_addr']));
+  $mac_upper = strtoupper(trim($_REQUEST['mac_addr']));
+  $interfaces = $adm->GetInterfaces(array('mac' => array($mac_lower, $mac_upper)));
+} else {
+  $interfaces = $adm->GetInterfaces(array('ip' => $_SERVER['REMOTE_ADDR']));
+}
+
+if (!empty($interfaces)) {
+  print $interfaces[0]['node_id'];
+} else {
+  print "-1";
+}
+
+?>
diff --git a/nodeconfig/boot/index.php b/nodeconfig/boot/index.php
new file mode 100755 (executable)
index 0000000..97500ac
--- /dev/null
@@ -0,0 +1,80 @@
+<?php
+
+//
+// Returns node boot script
+//
+// Mark Huang <mlhuang@cs.princeton.edu>
+// Copyright (C) 2006 The Trustees of Princeton University
+//
+
+// Get admin API handle
+require_once 'plc_api.php';
+global $adm;
+
+// location for signed scripts
+$bmdir="/var/www/html/boot";
+$bmext=".sh.sgn";
+
+$candidates = array();
+
+// Look up the node
+$interfaces = $adm->GetInterfaces(array('ip' => $_SERVER['REMOTE_ADDR']));
+if (!empty($interfaces)) {
+  $nodes = $adm->GetNodes(array($interfaces[0]['node_id']));
+  if (!empty($nodes)) {
+    $node = $nodes[0];
+  }
+}
+
+if (isset($node)) {
+  // Allow very old nodes that do not have a node key in their
+  // configuration files to use their "boot nonce" instead. The boot
+  // nonce is a random value generated by the node itself and POSTed
+  // by the Boot CD when it requests the Boot Manager. This is
+  // obviously not very secure, so we only allow it to be used if the
+  // requestor IP is the same as the IP address we have on record for
+  // the node.
+
+  // 3.x CDs post 'version', 2.x CDs post 'id'.
+  if (!empty($_REQUEST['version'])) {
+    $version = trim($_REQUEST['version']);
+  } elseif (!empty($_REQUEST['id'])) {
+    $version = trim($_REQUEST['id']);
+  } else {
+    $version = "2.0";
+  }
+
+  if (empty($node['key']) && !empty($_REQUEST['nonce'])) {
+    // 3.x CDs post the boot nonce in ASCII hex. 2.x CDs post it in binary.
+    if (strstr($version, "2.0") === FALSE) {
+      // 3.x CDs post a trailing newline...sigh
+      $nonce = trim($_REQUEST['nonce']);
+    } else {
+      $nonce = bin2hex($_REQUEST['nonce']);
+    }
+    $adm->UpdateNode($node['node_id'], array('boot_nonce' => $nonce));
+  }
+
+  // Custom bootmanager for the node, e.g.
+  // planetlab-1.cs.princeton.edu_bootmanager.sh.sgn
+  $candidates [] = "bootmanager" . "_" . strtolower($node['hostname']);
+
+  // Custom bootmanager for the deployment tag, e.g.
+  $deployment = $adm->GetNodeDeployment($node['node_id']);
+  if ( ! empty ($deployment) ) {
+    $candidates[] = "bootmanager" . "_" . $deployment;
+  }
+}
+
+// Default bootmanager
+$candidates[] = "bootmanager_regular";
+
+foreach ($candidates as $bootmanager) {
+  $candidate=$bmdir . "/" . $bootmanager . $bmext ;
+  if (file_exists($candidate)) {
+    readfile($candidate);
+    exit();
+  }
+}
+
+?>
diff --git a/nodeconfig/boot/upload-bmlog.php b/nodeconfig/boot/upload-bmlog.php
new file mode 100755 (executable)
index 0000000..6e29e23
--- /dev/null
@@ -0,0 +1,112 @@
+<?php
+
+// Thierry Parmentelat -- INRIA
+// first draft for a revival of former (3.x?) alpina-logs in 5.0
+
+// this needs be created with proper permissions at package install time
+$logdir="/var/log/bm";
+
+// limit: applies to uploads coming from an unrecognized IP
+$limit_bytes=4*1024;
+
+$default_hostname="unknown";
+
+function mkdir_if_needed ($dirname) {
+  if (is_dir ($dirname))
+    return;
+  mkdir ($dirname) or die ("Cannot create dir " . $dirname);
+}
+  
+// Get admin API handle
+require_once 'plc_api.php';
+global $adm;
+
+// find the node that these longs should belong to by looking for a node_id
+// with an ip the same as the http requestor ip
+$ip = $_SERVER['REMOTE_ADDR'];
+
+$hostname=$default_hostname;
+// locate hostname from DB based on this IP
+$interfaces=$adm->GetInterfaces(array("ip"=>$ip));
+if (! empty($interfaces) ) {
+  $interface=$interfaces[0];
+  $node_id=$interface['node_id'];
+  $nodes=$adm->GetNodes($node_id,array("hostname"));
+  if (!empty($nodes)) {
+    $hostname=$nodes[0]['hostname'];
+  }
+ }
+
+// store the actual data in /var/log/bm/raw/2008-11-31-20-02-onelab01.inria.fr-138.96.250.141.txt
+
+$rawdir=$logdir . "/raw";
+$date=strftime("%Y-%m-%d-%H-%M");
+$log_name=$date . "-" . $hostname . "-" . $ip . ".txt";
+$log_path=$rawdir . "/" . $log_name;
+$month=strftime("%Y-%m");
+$time=strftime("%d-%H-%M");
+
+mkdir_if_needed ($rawdir);
+
+////////////////////////////////////////
+
+$log=fopen($log_path,"w") or die ("Cannot open logfile "+$log_path);
+
+$uploaded_name= $_FILES['log']['tmp_name'];
+$uploaded_size=filesize($uploaded_name);
+
+fprintf ($log, "BootManager log created on: %s-%s\n",$month,$time);
+fprintf( $log, "From IP: %s\n",$ip);
+fprintf( $log, "hostname: %s\n",$hostname);
+fprintf ( $log, "uploaded file: %s (%d bytes)\n",$uploaded_name,$uploaded_size);
+if ( ( strcmp($hostname,$default_hostname)==0) && ( $uploaded_size >= $limit_bytes) ) {
+  fprintf ( $log, "contents from an unrecognized IP address was truncated to %d bytes\n",$limit_bytes);
+  $truncated=TRUE;
+  $uploaded_size=$limit_bytes;
+ } else {
+  $truncated=FALSE;
+ }
+
+fprintf( $log, "-----------------\n\n" );
+$uploaded = fopen($uploaded_name,'r');
+$contents = fread($uploaded, $uploaded_size);
+fclose($uploaded);
+fwrite($log,$contents);
+if ($truncated)
+  fwrite ($log, " ..<" . "truncated" . ">..\n");
+fclose($log);
+
+////////////////////////////////////////
+
+// create symlinks for easy browsing
+
+// /var/log/bm/per-month/2008-11/onelab1.inria.fr/31-20-02.bmlog
+$linkdir=$logdir;
+$linkdir=$linkdir . "/per-month";
+mkdir_if_needed ($linkdir);
+$linkdir=$linkdir . "/" . $month;
+mkdir_if_needed ($linkdir);
+$linkdir = $linkdir . "/" . $hostname;
+mkdir_if_needed ($linkdir);
+$link = $linkdir . "/" . $time ;
+symlink ("../../../raw/".$log_name,$link);
+
+# /var/log/bm/per-hostname/onelab1.inria.fr/2008-11-31-20-02.bmlog
+$linkdir=$logdir;
+$linkdir=$linkdir . "/per-hostname";
+mkdir_if_needed ($linkdir);
+$linkdir=$linkdir . "/" . $hostname;
+mkdir_if_needed ($linkdir);
+$link = $linkdir . "/" . $month . "-" . $time ;
+symlink ("../../raw/".$log_name,$link);
+
+# /var/log/bm/per-ip/138.96.250.141/2008-11-31-20-02.bmlog
+$linkdir=$logdir;
+$linkdir=$linkdir . "/per-ip";
+mkdir_if_needed ($linkdir);
+$linkdir=$linkdir . "/" . $ip;
+mkdir_if_needed ($linkdir);
+$link = $linkdir . "/" . $month . "-" . $time ;
+symlink ("../../raw/".$log_name,$link);
+
+?>
diff --git a/plc.d/bootmanager b/plc.d/bootmanager
new file mode 100755 (executable)
index 0000000..4f82b09
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# priority: 1100
+#
+# Rebuild the Boot Manager
+#
+# Mark Huang <mlhuang@cs.princeton.edu>
+# Copyright (C) 2006 The Trustees of Princeton University
+#
+
+# Source function library and configuration
+. /etc/plc.d/functions
+. /etc/planetlab/plc_config
+
+# Be verbose
+set -x
+
+case "$1" in
+    start)
+       if [ "$PLC_BOOT_ENABLED" != "1" -a \
+            "$PLC_WWW_ENABLED" != "1" ] ; then
+           exit 0
+       fi
+
+       shopt -s nullglob
+       for topdir in /usr/share/bootmanager/* ; do
+           [ -d "$topdir" ] || continue
+           deployment=$(basename $topdir)
+           if [ "$deployment" = "regular" ] ; then
+               action $"Rebuilding Boot Manager"  $topdir/build.sh regular
+               check
+           elif [ -x $topdir/build.sh ] ; then
+               action $"Rebuilding Boot Manager for deployment $deployment" $topdir/build.sh $deployment
+               check
+           elif [ "$deployment" == "source" ] ; then
+               action $"Ignoring obsolete dir, please cleanup $topdir"
+           else
+               action $"WARNING: missing build.sh script in $topdir"
+           fi
+       done
+
+       result "$MESSAGE"
+       ;;
+esac
+
+exit $ERRORS
diff --git a/source/BootAPI.py b/source/BootAPI.py
new file mode 100644 (file)
index 0000000..c7d8105
--- /dev/null
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import xmlrpclib
+import xml.parsers.expat
+import hmac
+import string
+import sha
+import cPickle
+import utils
+import os
+
+from Exceptions import *
+
+stash = None
+
+def create_auth_structure( vars, call_params ):
+    """
+    create and return an authentication structure for a Boot API
+    call. Vars contains the boot manager runtime variables, and
+    call_params is a tuple of the parameters that will be passed to the
+    API call. Return None if unable to (typically due to missing
+    keys in vars, such as node_id or node_key)
+    """
+
+    auth= {}
+
+    try:
+        auth_session = {}
+        auth_session['AuthMethod'] = 'session'
+
+        if not vars.has_key('NODE_SESSION'):
+            # Try to load /etc/planetlab/session if it exists.
+            sessionfile = open('/etc/planetlab/session', 'r')
+            session = sessionfile.read().strip()
+
+            auth_session['session'] = session
+            # Test session.  Faults if it's no good.
+            vars['API_SERVER_INST'].AuthCheck(auth_session)
+            vars['NODE_SESSION'] = session
+
+            sessionfile.close()
+        else:
+            auth_session['session'] = vars['NODE_SESSION']
+
+        auth = auth_session
+
+    except:
+        auth['AuthMethod']= 'hmac'
+
+        try:
+            auth['node_id'] = vars['NODE_ID']
+            auth['node_ip'] = vars['INTERFACE_SETTINGS']['ip']
+        except KeyError, e:
+            return None
+
+        node_hmac= hmac.new(vars['NODE_KEY'], "[]".encode('utf-8'), sha).hexdigest()
+        auth['value']= node_hmac
+        try:
+            auth_session = {}
+            if not vars.has_key('NODE_SESSION'):
+                session = vars['API_SERVER_INST'].GetSession(auth)
+                auth_session['session'] = session
+                vars['NODE_SESSION'] = session
+                # NOTE: save session value to /etc/planetlab/session for 
+                # RunlevelAgent and future BootManager runs
+                if not os.path.exists("/etc/planetlab"):
+                    os.makedirs("/etc/planetlab")
+                sessionfile = open('/etc/planetlab/session', 'w')
+                sessionfile.write( vars['NODE_SESSION'] )
+                sessionfile.close()
+            else:
+                auth_session['session'] = vars['NODE_SESSION']
+
+            auth_session['AuthMethod'] = 'session'
+            auth = auth_session
+
+        except Exception, e:
+            # NOTE: BM has failed to authenticate utterly.
+            raise BootManagerAuthenticationException, "%s" % e
+
+    return auth
+
+
+def serialize_params( call_params ):
+    """
+    convert a list of parameters into a format that will be used in the
+    hmac generation. both the boot manager and plc must have a common
+    format. full documentation is in the boot manager technical document,
+    but essentially we are going to take all the values (and keys for
+    dictionary objects), and put them into a list. sort them, and combine
+    them into one long string encased in a set of braces.
+    """
+
+    values= []
+    
+    for param in call_params:
+        if isinstance(param,list) or isinstance(param,tuple):
+            values += serialize_params(param)
+        elif isinstance(param,dict):
+            values += serialize_params(param.values())
+        elif isinstance(param,xmlrpclib.Boolean):
+            # bool was not a real type in Python <2.3 and had to be
+            # marshalled as a custom type in xmlrpclib. Make sure that
+            # bools serialize consistently.
+            if param:
+                values.append("True")
+            else:
+                values.append("False")
+        else:
+            values.append(unicode(param))
+                
+    return values
+
+    
+def call_api_function( vars, function, user_params ):
+    """
+    call the named api function with params, and return the
+    value to the caller. the authentication structure is handled
+    automatically, and doesn't need to be passed in with params.
+
+    If the call fails, a BootManagerException is raised.
+    """
+    global stash
+
+    try:
+        api_server= vars['API_SERVER_INST']
+    except KeyError, e:
+        raise BootManagerException, "No connection to the API server exists."
+
+    if api_server is None:
+        if not stash:
+            load(vars)
+        for i in stash:
+            if i[0] == function and i[1] == user_params:
+               return i[2]
+        raise BootManagerException, \
+              "Disconnected operation failed, insufficient stash."
+
+    auth= create_auth_structure(vars,user_params)
+    if auth is None:
+        raise BootManagerException, \
+              "Could not create auth structure, missing values."
+    
+    params= (auth,)
+    params= params + user_params
+
+    try:
+        exec( "rc= api_server.%s(*params)" % function )
+        if stash is None:
+            stash = []
+        stash += [ [ function, user_params, rc ] ]
+        return rc
+    except xmlrpclib.Fault, fault:
+        raise BootManagerException, "API Fault: %s" % fault
+    except xmlrpclib.ProtocolError, err:
+        raise BootManagerException,"XML RPC protocol error: %s" % err
+    except xml.parsers.expat.ExpatError, err:
+        raise BootManagerException,"XML parsing error: %s" % err
+
+
+class Stash(file):
+    mntpnt = '/tmp/stash'
+    def __init__(self, vars, mode):
+        utils.makedirs(self.mntpnt)
+        try:
+            utils.sysexec('mount -t auto -U %s %s' % (vars['DISCONNECTED_OPERATION'], self.mntpnt))
+            # make sure it's not read-only
+            f = file('%s/api.cache' % self.mntpnt, 'a')
+            f.close()
+            file.__init__(self, '%s/api.cache' % self.mntpnt, mode)
+        except:
+            utils.sysexec_noerr('umount %s' % self.mntpnt)
+            raise BootManagerException, "Couldn't find API-cache for disconnected operation"
+
+    def close(self):
+        file.close(self)
+        utils.sysexec_noerr('umount %s' % self.mntpnt)
+
+def load(vars):
+    global stash
+    s = Stash(vars, 'r')
+    stash = cPickle.load(s)
+    s.close()
+
+def save(vars):
+    global stash
+    if vars['DISCONNECTED_OPERATION']:
+        s = Stash(vars, 'w')
+        cPickle.dump(stash, s)
+        s.close()
diff --git a/source/BootManager.py b/source/BootManager.py
new file mode 100755 (executable)
index 0000000..3c3f1d4
--- /dev/null
@@ -0,0 +1,416 @@
+#!/usr/bin/python -u
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+import sys, os, traceback
+import time
+import gzip
+
+from steps import *
+from Exceptions import *
+import notify_messages
+import BootServerRequest
+import utils
+
+# all output is written to this file
+BM_NODE_LOG= "/tmp/bm.log"
+VARS_FILE = "configuration"
+
+# the new contents of PATH when the boot manager is running
+BIN_PATH= ('/usr/local/bin',
+           '/usr/local/sbin',
+           '/usr/bin',
+           '/usr/sbin',
+           '/bin',
+           '/sbin')
+
+def read_configuration_file(filename):
+    # read in and store all variables in VARS_FILE into each line
+    # is in the format name=val (any whitespace around the = is
+    # removed. everything after the = to the end of the line is
+    # the value
+    vars = {}
+    vars_file= file(filename,'r')
+    validConfFile = True
+    for line in vars_file:
+        # if its a comment or a whitespace line, ignore
+        if line[:1] == "#" or string.strip(line) == "":
+            continue
+
+        parts= string.split(line,"=")
+        if len(parts) != 2:
+            validConfFile = False
+            raise Exception( "Invalid line in vars file: %s" % line )
+
+        name= string.strip(parts[0])
+        value= string.strip(parts[1])
+        value= value.replace("'", "")   # remove quotes
+        value= value.replace('"', "")   # remove quotes
+        vars[name]= value
+
+    vars_file.close()
+    if not validConfFile:
+        raise Exception( "Unable to read configuration vars." )
+
+    # find out which directory we are running it, and set a variable
+    # for that. future steps may need to get files out of the bootmanager
+    # directory
+    current_dir= os.getcwd()
+    vars['BM_SOURCE_DIR']= current_dir
+
+    return vars
+
+##############################
+class log:
+
+    format="%H:%M:%S(%Z) "
+
+    def __init__( self, OutputFilePath= None ):
+        try:
+            self.OutputFile= open( OutputFilePath, "w")
+            self.OutputFilePath= OutputFilePath
+        except:
+            print( "bootmanager log : Unable to open output file %r, continuing"%OutputFilePath )
+            self.OutputFile= None
+
+        self.VARS = None
+        try:
+            vars = read_configuration_file(VARS_FILE)
+            self.VARS = vars
+        except Exception, e:
+            self.LogEntry( str(e) )
+            return
+    
+    def LogEntry( self, str, inc_newline= 1, display_screen= 1 ):
+        now=time.strftime(log.format, time.localtime())
+        if self.OutputFile:
+            self.OutputFile.write( now+str )
+        if display_screen:
+            sys.stdout.write( now+str )
+            
+        if inc_newline:
+            if display_screen:
+                sys.stdout.write( "\n" )
+            if self.OutputFile:
+                self.OutputFile.write( "\n" )
+
+        if self.OutputFile:
+            self.OutputFile.flush()
+
+    def write( self, str ):
+        """
+        make log behave like a writable file object (for traceback
+        prints)
+        """
+        self.LogEntry( str, 0, 1 )
+    
+    # bm log uploading is available back again, as of nodeconfig-5.0-2
+    def Upload( self, extra_file=None ):
+        """
+        upload the contents of the log to the server
+        """
+        if self.OutputFile is not None:
+            self.OutputFile.flush()
+
+            self.LogEntry( "Uploading logs to %s" % self.VARS['UPLOAD_LOG_SCRIPT'] )
+            
+            self.OutputFile.close()
+            self.OutputFile= None
+
+            hostname= self.VARS['INTERFACE_SETTINGS']['hostname'] + "." + \
+                      self.VARS['INTERFACE_SETTINGS']['domainname']
+            bs_request = BootServerRequest.BootServerRequest(self.VARS)
+            try:
+                # this was working until f10
+                bs_request.MakeRequest(PartialPath = self.VARS['UPLOAD_LOG_SCRIPT'],
+                                       GetVars = None, PostVars = None,
+                                       DoSSL = True, DoCertCheck = True,
+                                       FormData = ["log=@" + self.OutputFilePath,
+                                                   "hostname=" + hostname, 
+                                                   "type=bm.log"])
+            except:
+                # new pycurl
+                import pycurl
+                bs_request.MakeRequest(PartialPath = self.VARS['UPLOAD_LOG_SCRIPT'],
+                                       GetVars = None, PostVars = None,
+                                       DoSSL = True, DoCertCheck = True,
+                                       FormData = [('log',(pycurl.FORM_FILE, self.OutputFilePath)),
+                                                   ("hostname",hostname),
+                                                   ("type","bm.log")])
+        if extra_file is not None:
+            # NOTE: for code-reuse, evoke the bash function 'upload_logs'; 
+            # by adding --login, bash reads .bash_profile before execution.
+            # Also, never fail, since this is an optional feature.
+            utils.sysexec_noerr( """bash --login -c "upload_logs %s" """ % extra_file, self)
+
+
+##############################
+class BootManager:
+
+    # file containing initial variables/constants
+
+    # the set of valid node run states
+    NodeRunStates = {'reinstall':None,
+                     'boot':None,
+                     'safeboot':None,
+                     'disabled':None,
+                     }
+    
+    def __init__(self, log, forceState):
+        # override machine's current state from the command line
+        self.forceState = forceState
+
+        # the main logging point
+        self.LOG= log
+
+        # set to 1 if we can run after initialization
+        self.CAN_RUN = 0
+
+        if log.VARS:
+            # this contains a set of information used and updated by each step
+            self.VARS= log.VARS
+        else:
+            return
+             
+        # not sure what the current PATH is set to, replace it with what
+        # we know will work with all the boot cds
+        os.environ['PATH']= string.join(BIN_PATH,":")
+
+        self.CAN_RUN= 1
+
+    def Run(self):
+        """
+        core boot manager logic.
+
+        the way errors are handled is as such: if any particular step
+        cannot continue or unexpectibly fails, an exception is thrown.
+        in this case, the boot manager cannot continue running.
+
+        these step functions can also return a 0/1 depending on whether
+        or not it succeeded. In the case of steps like ConfirmInstallWithUser,
+        a 0 is returned and no exception is thrown if the user chose not
+        to confirm the install. The same goes with the CheckHardwareRequirements.
+        If requriements not met, but tests were succesfull, return 0.
+
+        for steps that run within the installer, they are expected to either
+        complete succesfully and return 1, or throw an execption.
+
+        For exact return values and expected operations, see the comments
+        at the top of each of the invididual step functions.
+        """
+
+        def _nodeNotInstalled(message='MSG_NODE_NOT_INSTALLED'):
+            # called by the _xxxState() functions below upon failure
+            self.VARS['RUN_LEVEL']= 'failboot'
+            notify = getattr(notify_messages, message)
+            self.VARS['STATE_CHANGE_NOTIFY']= 1
+            self.VARS['STATE_CHANGE_NOTIFY_MESSAGE']= notify
+            raise BootManagerException, notify
+
+        def _bootRun():
+            # implements the boot logic, which consists of first
+            # double checking that the node was properly installed,
+            # checking whether someone added or changed disks, and
+            # then finally chain boots.
+
+            # starting the fallback/debug ssh daemon for safety:
+            # if the node install somehow hangs, or if it simply takes ages, 
+            # we can still enter and investigate
+            try:
+                StartDebug.Run(self.VARS, self.LOG, last_resort = False)
+            except:
+                pass
+
+            InstallInit.Run( self.VARS, self.LOG )                    
+            ret = ValidateNodeInstall.Run( self.VARS, self.LOG )
+            if ret == 1:
+                WriteModprobeConfig.Run( self.VARS, self.LOG )
+                WriteNetworkConfig.Run( self.VARS, self.LOG )
+                CheckForNewDisks.Run( self.VARS, self.LOG )
+                SendHardwareConfigToPLC.Run( self.VARS, self.LOG )
+                ChainBootNode.Run( self.VARS, self.LOG )
+            elif ret == -1:
+                _nodeNotInstalled('MSG_NODE_FILESYSTEM_CORRUPT')
+            elif ret == -2:
+                _nodeNotInstalled('MSG_NODE_MOUNT_FAILED')
+            elif ret == -3:
+                _nodeNotInstalled('MSG_NODE_MISSING_KERNEL')
+            else:
+                _nodeNotInstalled()
+
+        def _reinstallRun():
+
+            # starting the fallback/debug ssh daemon for safety:
+            # if the node install somehow hangs, or if it simply takes ages, 
+            # we can still enter and investigate
+            try:
+                StartDebug.Run(self.VARS, self.LOG, last_resort = False)
+            except:
+                pass
+
+            # implements the reinstall logic, which will check whether
+            # the min. hardware requirements are met, install the
+            # software, and upon correct installation will switch too
+            # 'boot' state and chainboot into the production system
+            if not CheckHardwareRequirements.Run( self.VARS, self.LOG ):
+                self.VARS['RUN_LEVEL']= 'failboot'
+                raise BootManagerException, "Hardware requirements not met."
+
+            # runinstaller
+            InstallInit.Run( self.VARS, self.LOG )                    
+            InstallPartitionDisks.Run( self.VARS, self.LOG )            
+            InstallBootstrapFS.Run( self.VARS, self.LOG )            
+            InstallWriteConfig.Run( self.VARS, self.LOG )
+            InstallUninitHardware.Run( self.VARS, self.LOG )
+            self.VARS['BOOT_STATE']= 'boot'
+            self.VARS['STATE_CHANGE_NOTIFY']= 1
+            self.VARS['STATE_CHANGE_NOTIFY_MESSAGE']= \
+                 notify_messages.MSG_INSTALL_FINISHED
+            UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+            _bootRun()
+            
+        def _installRun():
+            # implements the new install logic, which will first check
+            # with the user whether it is ok to install on this
+            # machine, switch to 'reinstall' state and then invoke the reinstall
+            # logic.  See reinstallState logic comments for further
+            # details.
+            if not ConfirmInstallWithUser.Run( self.VARS, self.LOG ):
+                return 0
+            self.VARS['BOOT_STATE']= 'reinstall'
+            _reinstallRun()
+
+        def _debugRun(state='failboot'):
+            # implements debug logic, which starts the sshd and just waits around
+            self.VARS['RUN_LEVEL']=state
+            StartDebug.Run( self.VARS, self.LOG )
+            # fsck/mount fs if present, and ignore return value if it's not.
+            ValidateNodeInstall.Run( self.VARS, self.LOG )
+
+        def _badstateRun():
+            # should never happen; log event
+            self.LOG.write( "\nInvalid BOOT_STATE = %s\n" % self.VARS['BOOT_STATE'])
+            _debugRun()
+
+        # setup state -> function hash table
+        BootManager.NodeRunStates['reinstall']  = _reinstallRun
+        BootManager.NodeRunStates['boot']       = _bootRun
+        BootManager.NodeRunStates['safeboot']   = lambda : _debugRun('safeboot')
+        BootManager.NodeRunStates['disabled']   = lambda : _debugRun('disabled')
+
+        success = 0
+        try:
+            InitializeBootManager.Run( self.VARS, self.LOG )
+            ReadNodeConfiguration.Run( self.VARS, self.LOG )
+            AuthenticateWithPLC.Run( self.VARS, self.LOG )
+            UpdateLastBootOnce.Run( self.VARS, self.LOG )
+            StartRunlevelAgent.Run( self.VARS, self.LOG )
+            GetAndUpdateNodeDetails.Run( self.VARS, self.LOG )
+
+            # override machine's current state from the command line
+            if self.forceState is not None:
+                self.VARS['BOOT_STATE']= self.forceState
+                UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+
+            stateRun = BootManager.NodeRunStates.get(self.VARS['BOOT_STATE'],_badstateRun)
+            stateRun()
+            success = 1
+
+        except KeyError, e:
+            self.LOG.write( "\n\nKeyError while running: %s\n" % str(e) )
+        except BootManagerException, e:
+            self.LOG.write( "\n\nException while running: %s\n" % str(e) )
+        except BootManagerAuthenticationException, e:
+            self.LOG.write( "\n\nFailed to Authenticate Node: %s\n" % str(e) )
+            # sets /tmp/CANCEL_BOOT flag
+            StartDebug.Run(self.VARS, self.LOG )
+            # Return immediately b/c any other calls to API will fail
+            return success
+        except:
+            self.LOG.write( "\n\nImplementation Error\n")
+            traceback.print_exc(file=self.LOG.OutputFile)
+            traceback.print_exc()
+
+        if not success:
+            try:
+                _debugRun()
+            except BootManagerException, e:
+                self.LOG.write( "\n\nException while running: %s\n" % str(e) )
+            except:
+                self.LOG.write( "\n\nImplementation Error\n")
+                traceback.print_exc(file=self.LOG.OutputFile)
+                traceback.print_exc()
+
+        return success
+            
+            
+def main(argv):
+
+    import utils
+    utils.prompt_for_breakpoint_mode()
+
+#    utils.breakpoint ("Entering BootManager::main")
+    
+    # set to 1 if error occurred
+    error= 0
+    
+    # all output goes through this class so we can save it and post
+    # the data back to PlanetLab central
+    LOG= log( BM_NODE_LOG )
+
+    # NOTE: assume CWD is BM's source directory, but never fail
+    utils.sysexec_noerr("./setup_bash_history_scripts.sh", LOG)
+
+    LOG.LogEntry( "BootManager started at: %s" % \
+                  time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) )
+
+    try:
+        forceState = None
+        if len(argv) == 2:
+            fState = argv[1]
+            if BootManager.NodeRunStates.has_key(fState):
+                forceState = fState
+            else:
+                LOG.LogEntry("FATAL: cannot force node run state to=%s" % fState)
+                error = 1
+    except:
+        traceback.print_exc(file=LOG.OutputFile)
+        traceback.print_exc()
+        
+    if error:
+        LOG.LogEntry( "BootManager finished at: %s" % \
+                      time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) )
+        LOG.Upload()
+        return error
+
+    try:
+        bm= BootManager(LOG,forceState)
+        if bm.CAN_RUN == 0:
+            LOG.LogEntry( "Unable to initialize BootManager." )
+        else:
+            LOG.LogEntry( "Running version %s of BootManager." % bm.VARS['VERSION'] )
+            success= bm.Run()
+            if success:
+                LOG.LogEntry( "\nDone!" );
+            else:
+                LOG.LogEntry( "\nError occurred!" );
+                error = 1
+    except:
+        traceback.print_exc(file=LOG.OutputFile)
+        traceback.print_exc()
+
+    LOG.LogEntry( "BootManager finished at: %s" % \
+                  time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) )
+    LOG.Upload()
+
+    return error
+
+    
+if __name__ == "__main__":
+    error = main(sys.argv)
+    sys.exit(error)
diff --git a/source/BootServerRequest.py b/source/BootServerRequest.py
new file mode 100644 (file)
index 0000000..88cd081
--- /dev/null
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os, sys
+import re
+import string
+import urllib
+import tempfile
+
+# try to load pycurl
+try:
+    import pycurl
+    PYCURL_LOADED= 1
+except:
+    PYCURL_LOADED= 0
+
+
+# if there is no cStringIO, fall back to the original
+try:
+    from cStringIO import StringIO
+except:
+    from StringIO import StringIO
+
+
+
+class BootServerRequest:
+
+    VERBOSE = 0
+
+    # all possible places to check the cdrom mount point.
+    # /mnt/cdrom is typically after the machine has come up,
+    # and /usr is when the boot cd is running
+    CDROM_MOUNT_PATH = ("/mnt/cdrom/","/usr/")
+    BOOTSERVER_CERTS= {}
+    MONITORSERVER_CERTS= {}
+    BOOTCD_VERSION=""
+    HTTP_SUCCESS=200
+    HAS_BOOTCD=0
+    USE_PROXY=0
+    PROXY=0
+
+    # in seconds, how maximum time allowed for connect
+    DEFAULT_CURL_CONNECT_TIMEOUT=30
+    # in seconds, maximum time allowed for any transfer
+    DEFAULT_CURL_MAX_TRANSFER_TIME=3600
+    # location of curl executable, if pycurl isn't available
+    # and the DownloadFile method is called (backup, only
+    # really need for the boot cd environment where pycurl
+    # doesn't exist
+    CURL_CMD = 'curl'
+    CURL_SSL_VERSION=3
+
+    def __init__(self, vars, verbose=0):
+
+        self.VERBOSE= verbose
+        self.VARS=vars
+            
+        # see if we have a boot cd mounted by checking for the version file
+        # if HAS_BOOTCD == 0 then either the machine doesn't have
+        # a boot cd, or something else is mounted
+        self.HAS_BOOTCD = 0
+
+        for path in self.CDROM_MOUNT_PATH:
+            self.Message( "Checking existance of boot cd on %s" % path )
+
+            os.system("/bin/mount %s > /dev/null 2>&1" % path )
+                
+            version_file= self.VARS['BOOTCD_VERSION_FILE'] % {'path' : path}
+            self.Message( "Looking for version file %s" % version_file )
+
+            if os.access(version_file, os.R_OK) == 0:
+                self.Message( "No boot cd found." );
+            else:
+                self.Message( "Found boot cd." )
+                self.HAS_BOOTCD=1
+                break
+
+        if self.HAS_BOOTCD:
+
+            # check the version of the boot cd, and locate the certs
+            self.Message( "Getting boot cd version." )
+        
+            versionRegExp= re.compile(r"PlanetLab BootCD v(\S+)")
+                
+            bootcd_version_f= file(version_file,"r")
+            line= string.strip(bootcd_version_f.readline())
+            bootcd_version_f.close()
+            
+            match= versionRegExp.findall(line)
+            if match:
+                (self.BOOTCD_VERSION)= match[0]
+            
+            # right now, all the versions of the bootcd are supported,
+            # so no need to check it
+            
+            self.Message( "Getting server from configuration" )
+            
+            bootservers= [ self.VARS['BOOT_SERVER'] ]
+            for bootserver in bootservers:
+                bootserver = string.strip(bootserver)
+                cacert_path= "%s/%s/%s" % \
+                             (self.VARS['SERVER_CERT_DIR'] % {'path' : path},
+                              bootserver,self.VARS['CACERT_NAME'])
+                if os.access(cacert_path, os.R_OK):
+                    self.BOOTSERVER_CERTS[bootserver]= cacert_path
+
+            monitorservers= [ self.VARS['MONITOR_SERVER'] ]
+            for monitorserver in monitorservers:
+                monitorserver = string.strip(monitorserver)
+                cacert_path= "%s/%s/%s" % \
+                             (self.VARS['SERVER_CERT_DIR'] % {'path' : path},
+                              monitorserver,self.VARS['CACERT_NAME'])
+                if os.access(cacert_path, os.R_OK):
+                    self.MONITORSERVER_CERTS[monitorserver]= cacert_path
+
+            self.Message( "Set of servers to contact: %s" %
+                          str(self.BOOTSERVER_CERTS) )
+            self.Message( "Set of servers to upload to: %s" %
+                          str(self.MONITORSERVER_CERTS) )
+        else:
+            self.Message( "Using default boot server address." )
+            self.BOOTSERVER_CERTS[self.VARS['DEFAULT_BOOT_SERVER']]= ""
+            self.MONITORSERVER_CERTS[self.VARS['DEFAULT_BOOT_SERVER']]= ""
+
+
+    def CheckProxy( self ):
+        # see if we have any proxy info from the machine
+        self.USE_PROXY= 0
+        self.Message( "Checking existance of proxy config file..." )
+        
+        if os.access(self.VARS['PROXY_FILE'], os.R_OK) and \
+               os.path.isfile(self.VARS['PROXY_FILE']):
+            self.PROXY= string.strip(file(self.VARS['PROXY_FILE'],'r').readline())
+            self.USE_PROXY= 1
+            self.Message( "Using proxy %s." % self.PROXY)
+        else:
+            self.Message( "Not using any proxy." )
+
+
+
+    def Message( self, Msg ):
+        if( self.VERBOSE ):
+            print( Msg )
+
+
+
+    def Error( self, Msg ):
+        sys.stderr.write( Msg + "\n" )
+
+
+
+    def Warning( self, Msg ):
+        self.Error(Msg)
+
+
+
+    def MakeRequest( self, PartialPath, GetVars,
+                     PostVars, DoSSL, DoCertCheck,
+                     ConnectTimeout= DEFAULT_CURL_CONNECT_TIMEOUT,
+                     MaxTransferTime= DEFAULT_CURL_MAX_TRANSFER_TIME,
+                     FormData= None):
+
+        (fd, buffer_name) = tempfile.mkstemp("MakeRequest-XXXXXX")
+        os.close(fd)
+        buffer = open(buffer_name, "w+b")
+
+        # the file "buffer_name" will be deleted by DownloadFile()
+
+        ok = self.DownloadFile(PartialPath, GetVars, PostVars,
+                               DoSSL, DoCertCheck, buffer_name,
+                               ConnectTimeout,
+                               MaxTransferTime,
+                               FormData)
+
+        # check the ok code, return the string only if it was successfull
+        if ok:
+            buffer.seek(0)
+            ret = buffer.read()
+        else:
+            ret = None
+
+        buffer.close()
+        try:
+            # just in case it is not deleted by DownloadFile()
+            os.unlink(buffer_name)
+        except OSError:
+            pass
+            
+        return ret
+
+    def DownloadFile(self, PartialPath, GetVars, PostVars,
+                     DoSSL, DoCertCheck, DestFilePath,
+                     ConnectTimeout= DEFAULT_CURL_CONNECT_TIMEOUT,
+                     MaxTransferTime= DEFAULT_CURL_MAX_TRANSFER_TIME,
+                     FormData= None):
+
+        self.Message( "Attempting to retrieve %s" % PartialPath )
+
+        # we can't do ssl and check the cert if we don't have a bootcd
+        if DoSSL and DoCertCheck and not self.HAS_BOOTCD:
+            self.Error( "No boot cd exists (needed to use -c and -s.\n" )
+            return 0
+
+        if DoSSL and not PYCURL_LOADED:
+            self.Warning( "Using SSL without pycurl will by default " \
+                          "check at least standard certs." )
+
+        # ConnectTimeout has to be greater than 0
+        if ConnectTimeout <= 0:
+            self.Error( "Connect timeout must be greater than zero.\n" )
+            return 0
+
+
+        self.CheckProxy()
+
+        dopostdata= 0
+
+        # setup the post and get vars for the request
+        if PostVars:
+            dopostdata= 1
+            postdata = urllib.urlencode(PostVars)
+            self.Message( "Posting data:\n%s\n" % postdata )
+            
+        getstr= ""
+        if GetVars:
+            getstr= "?" + urllib.urlencode(GetVars)
+            self.Message( "Get data:\n%s\n" % getstr )
+
+        # now, attempt to make the request, starting at the first
+        # server in the list
+        if FormData:
+            cert_list = self.MONITORSERVER_CERTS
+        else:
+            cert_list = self.BOOTSERVER_CERTS
+        
+        for server in cert_list:
+            self.Message( "Contacting server %s." % server )
+                        
+            certpath = cert_list[server]
+
+            
+            # output what we are going to be doing
+            self.Message( "Connect timeout is %s seconds" % \
+                          ConnectTimeout )
+
+            self.Message( "Max transfer time is %s seconds" % \
+                          MaxTransferTime )
+
+            if DoSSL:
+                url = "https://%s/%s%s" % (server,PartialPath,getstr)
+                
+                if DoCertCheck and PYCURL_LOADED:
+                    self.Message( "Using SSL version %d and verifying peer." %
+                             self.CURL_SSL_VERSION )
+                else:
+                    self.Message( "Using SSL version %d." %
+                             self.CURL_SSL_VERSION )
+            else:
+                url = "http://%s/%s%s" % (server,PartialPath,getstr)
+                
+            self.Message( "URL: %s" % url )
+            
+            # setup a new pycurl instance, or a curl command line string
+            # if we don't have pycurl
+            
+            if PYCURL_LOADED:
+                curl= pycurl.Curl()
+
+                # don't want curl sending any signals
+                curl.setopt(pycurl.NOSIGNAL, 1)
+            
+                curl.setopt(pycurl.CONNECTTIMEOUT, ConnectTimeout)
+                curl.setopt(pycurl.TIMEOUT, MaxTransferTime)
+
+                # do not follow location when attempting to download a file
+                curl.setopt(pycurl.FOLLOWLOCATION, 0)
+
+                if self.USE_PROXY:
+                    curl.setopt(pycurl.PROXY, self.PROXY )
+
+                if DoSSL:
+                    curl.setopt(pycurl.SSLVERSION, self.CURL_SSL_VERSION)
+                
+                    if DoCertCheck:
+                        curl.setopt(pycurl.CAINFO, certpath)
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 2)
+                        
+                    else:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+                
+                if dopostdata:
+                    curl.setopt(pycurl.POSTFIELDS, postdata)
+
+                # setup multipart/form-data upload
+                if FormData:
+                    curl.setopt(pycurl.HTTPPOST, FormData)
+
+                curl.setopt(pycurl.URL, url)
+            else:
+
+                cmdline = "%s " \
+                          "--connect-timeout %d " \
+                          "--max-time %d " \
+                          "--header Pragma: " \
+                          "--output %s " \
+                          "--fail " % \
+                          (self.CURL_CMD, ConnectTimeout,
+                           MaxTransferTime, DestFilePath)
+
+                if dopostdata:
+                    cmdline = cmdline + "--data '" + postdata + "' "
+
+                if FormData:
+                    cmdline = cmdline + "".join(["--form '" + field + "' " for field in FormData])
+
+                if not self.VERBOSE:
+                    cmdline = cmdline + "--silent "
+                    
+                if self.USE_PROXY:
+                    cmdline = cmdline + "--proxy %s " % self.PROXY
+
+                if DoSSL:
+                    cmdline = cmdline + "--sslv%d " % self.CURL_SSL_VERSION
+                    if DoCertCheck:
+                        cmdline = cmdline + "--cacert %s " % certpath
+                 
+                cmdline = cmdline + url
+
+                self.Message( "curl command: %s" % cmdline )
+                
+                
+            if PYCURL_LOADED:
+                try:
+                    # setup the output file
+                    outfile = open(DestFilePath,"wb")
+                    
+                    self.Message( "Opened output file %s" % DestFilePath )
+                
+                    curl.setopt(pycurl.WRITEDATA, outfile)
+                
+                    self.Message( "Fetching..." )
+                    curl.perform()
+                    self.Message( "Done." )
+                
+                    http_result= curl.getinfo(pycurl.HTTP_CODE)
+                    curl.close()
+                
+                    outfile.close()
+                    self.Message( "Results saved in %s" % DestFilePath )
+
+                    # check the code, return 1 if successfull
+                    if http_result == self.HTTP_SUCCESS:
+                        self.Message( "Successfull!" )
+                        return 1
+                    else:
+                        self.Message( "Failure, resultant http code: %d" % \
+                                      http_result )
+
+                except pycurl.error, err:
+                    errno, errstr= err
+                    self.Error( "connect to %s failed; curl error %d: '%s'\n" %
+                       (server,errno,errstr) )
+        
+                if not outfile.closed:
+                    try:
+                        os.unlink(DestFilePath)
+                        outfile.close()
+                    except OSError:
+                        pass
+
+            else:
+                self.Message( "Fetching..." )
+                rc = os.system(cmdline)
+                self.Message( "Done." )
+                
+                if rc != 0:
+                    try:
+                        os.unlink( DestFilePath )
+                    except OSError:
+                        pass
+                    self.Message( "Failure, resultant curl code: %d" % rc )
+                    self.Message( "Removed %s" % DestFilePath )
+                else:
+                    self.Message( "Successfull!" )
+                    return 1
+            
+        self.Error( "Unable to successfully contact any boot servers.\n" )
+        return 0
+
+
+
+
+def usage():
+    print(
+    """
+Usage: BootServerRequest.py [options] <partialpath>
+Options:
+ -c/--checkcert        Check SSL certs. Ignored if -s/--ssl missing.
+ -h/--help             This help text
+ -o/--output <file>    Write result to file
+ -s/--ssl              Make the request over HTTPS
+ -v                    Makes the operation more talkative
+""");  
+
+
+
+if __name__ == "__main__":
+    import getopt
+    
+    # check out our command line options
+    try:
+        opt_list, arg_list = getopt.getopt(sys.argv[1:],
+                                           "o:vhsc",
+                                           [ "output=", "verbose", \
+                                             "help","ssl","checkcert"])
+
+        ssl= 0
+        checkcert= 0
+        output_file= None
+        verbose= 0
+        
+        for opt, arg in opt_list:
+            if opt in ("-h","--help"):
+                usage(0)
+                sys.exit()
+            
+            if opt in ("-c","--checkcert"):
+                checkcert= 1
+            
+            if opt in ("-s","--ssl"):
+                ssl= 1
+
+            if opt in ("-o","--output"):
+                output_file= arg
+
+            if opt == "-v":
+                verbose= 1
+    
+        if len(arg_list) != 1:
+            raise Exception
+
+        partialpath= arg_list[0]
+        if string.lower(partialpath[:4]) == "http":
+            raise Exception
+
+    except:
+        usage()
+        sys.exit(2)
+
+    # got the command line args straightened out
+    requestor= BootServerRequest(verbose)
+        
+    if output_file:
+        requestor.DownloadFile( partialpath, None, None, ssl,
+                                checkcert, output_file)
+    else:
+        result= requestor.MakeRequest( partialpath, None, None, ssl, checkcert)
+        if result:
+            print result
+        else:
+            sys.exit(1)
diff --git a/source/COPYRIGHT b/source/COPYRIGHT
new file mode 100644 (file)
index 0000000..6bf1167
--- /dev/null
@@ -0,0 +1,55 @@
+The BootManager source code was initially developed by Intel
+Corporation and subsequently rewritten by Princeton University.  The
+copyright for the BootManager source code is as follows:
+
+Copyright (c) 2003 Intel Corporation
+All rights reserved.
+
+Copyright (c) 2004-2006 The Trustees of Princeton University
+All rights reserved.
+
+The License from both Intel and Princeton for this software is
+follows:
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met: 
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+    * Neither the name of the copyright holder nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRINCETON
+UNIVERSITY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. 
+
+Intel Corporation places the following export restrictions on the
+software:
+
+EXPORT LAWS: THIS LICENSE ADDS NO RESTRICTIONS TO THE EXPORT LAWS OF
+YOUR JURISDICTION. It is licensee's responsibility to comply with any
+export regulations applicable in licensee's jurisdiction. Under
+CURRENT (May 2000) U.S. export regulations this software is eligible
+for export from the U.S. and can be downloaded by or otherwise
+exported or reexported worldwide EXCEPT to U.S. embargoed destinations
+which include Cuba, Iraq, Libya, North Korea, Iran, Syria, Sudan,
+Afghanistan and any other country to which the U.S. has embargoed
+goods and services.
+
diff --git a/source/Exceptions.py b/source/Exceptions.py
new file mode 100644 (file)
index 0000000..de2a670
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+class BootManagerException(Exception):
+    def __init__( self, err ):
+        self.__fault= err
+
+    def __str__( self ):
+        return self.__fault
+    
+class BootManagerAuthenticationException(Exception):
+    def __init__( self, err ):
+        self.__fault= err
+
+    def __str__( self ):
+        return self.__fault
diff --git a/source/ModelOptions.py b/source/ModelOptions.py
new file mode 100644 (file)
index 0000000..3eb89ba
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import string
+
+MINHW   = 0x001
+SMP     = 0x002
+X86_64  = 0x004
+INTEL   = 0x008
+AMD     = 0x010
+NUMA    = 0x020
+GEODE   = 0x040
+BADHD   = 0x080
+LAST    = 0x100
+RAWDISK = 0x200
+
+modeloptions = {'smp':SMP,
+                'x64':X86_64,
+                'i64':X86_64|INTEL,
+                'a64':X86_64|AMD,
+                'i32':INTEL,
+                'a32':AMD,
+                'numa':NUMA,
+                'geode':GEODE,
+                'badhd':BADHD,
+                'minhw':MINHW,
+                'rawdisk':RAWDISK}
+
+def Get(model):
+    modelinfo = string.split(model,'/')
+    options= 0
+    for mi in modelinfo:
+        info = string.strip(mi)
+        info = info.lower()
+        options = options | modeloptions.get(info,0)
+
+    return options
+
diff --git a/source/RunlevelAgent.py b/source/RunlevelAgent.py
new file mode 100755 (executable)
index 0000000..e3047b3
--- /dev/null
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+#
+# RunlevelAgent - acts as a heartbeat back to myplc reporting that the node is
+#     online and whether it is in boot or pre-boot run-level.
+#   This is useful to identify nodes that are behind a firewall, as well as to
+#   have the machine report run-time status both in safeboot and boot modes,
+#   so that it is immediately visible at myplc (gui or api).
+# 
+
+import xml, xmlrpclib
+import logging
+import time
+import traceback
+import sys
+import os
+import string
+
+CONFIG_FILE="/tmp/source/configuration"
+SESSION_FILE="/etc/planetlab/session"
+RLA_PID_FILE="/var/run/rla.pid"
+
+def read_config_file(filename):
+    ## NOTE: text copied from BootManager.py 
+    # TODO: unify this code to make it common. i.e. use ConfigParser module
+    vars = {}
+    vars_file= file(filename,'r')
+    validConfFile = True
+    for line in vars_file:
+        # if its a comment or a whitespace line, ignore
+        if line[:1] == "#" or string.strip(line) == "":
+            continue
+
+        parts= string.split(line,"=")
+        if len(parts) != 2:
+            print "Invalid line in vars file: %s" % line
+            validConfFile = False
+            break
+
+        name= string.strip(parts[0])
+        value= string.strip(parts[1])
+        vars[name]= value
+
+    vars_file.close()
+    if not validConfFile:
+        print "Unable to read configuration vars."
+
+    return vars
+
+try:
+    sys.path = ['/etc/planetlab'] + sys.path
+    import plc_config
+    api_server_url = "https://" + plc_config.PLC_API_HOST + plc_config.PLC_API_PATH
+except:
+    filename=CONFIG_FILE
+    vars = read_config_file(filename)
+    api_server_url = vars['BOOT_API_SERVER']
+
+
+class Auth:
+    def __init__(self, username=None, password=None, **kwargs):
+        if 'session' in kwargs:
+            self.auth= { 'AuthMethod' : 'session',
+                    'session' : kwargs['session'] }
+        else:
+            if username==None and password==None:
+                self.auth = {'AuthMethod': "anonymous"}
+            else:
+                self.auth = {'Username' : username,
+                            'AuthMethod' : 'password',
+                            'AuthString' : password}
+class PLC:
+    def __init__(self, auth, url):
+        self.auth = auth
+        self.url = url
+        self.api = xmlrpclib.Server(self.url, verbose=False, allow_none=True)
+
+    def __getattr__(self, name):
+        method = getattr(self.api, name)
+        if method is None:
+            raise AssertionError("method does not exist")
+
+        return lambda *params : method(self.auth.auth, *params)
+
+    def __repr__(self):
+        return self.api.__repr__()
+
+def extract_from(filename, pattern):
+    f = os.popen("grep -E %s %s" % (pattern, filename))
+    val = f.read().strip()
+    return val
+
+def check_running(commandname):
+    f = os.popen("ps ax | grep -E %s | grep -v grep" % (commandname))
+    val = f.read().strip()
+    return val
+
+
+def save_pid():
+    # save PID
+    try:
+        pid = os.getpid()
+        f = open(RLA_PID_FILE, 'w')
+        f.write("%s\n" % pid)
+        f.close()
+    except:
+        print "Uuuhhh.... this should not occur."
+        sys.exit(1)
+
+def start_and_run():
+
+    save_pid()
+
+    # Keep trying to authenticate session, waiting for NM to re-write the
+    # session file, or DNS to succeed, until AuthCheck succeeds.
+    while True:
+        try:
+            f=open(SESSION_FILE,'r')
+            session_str=f.read().strip()
+            api = PLC(Auth(session=session_str), api_server_url)
+            # NOTE: What should we do if this call fails?
+            # TODO: handle dns failure here.
+            api.AuthCheck()
+            break
+        except:
+            print "Retry in 30 seconds: ", os.popen("uptime").read().strip()
+            traceback.print_exc()
+            time.sleep(30)
+
+    try:
+        env = 'production'
+        if len(sys.argv) > 2:
+            env = sys.argv[2]
+    except:
+        traceback.print_exc()
+
+    while True:
+        try:
+            # NOTE: here we are inferring the runlevel by environmental
+            #         observations.  We know how this process was started by the
+            #         given command line argument.  Then in bootmanager
+            #         runlevel, the bm.log gives information about the current
+            #         activity.
+            # other options:
+            #   call plc for current boot state?
+            #   how long have we been running?
+            if env == "bootmanager":
+                bs_val = extract_from('/tmp/bm.log', "'Current boot state:'")
+                if len(bs_val) > 0: bs_val = bs_val.split()[-1]
+                ex_val = extract_from('/tmp/bm.log', 'Exception')
+                fs_val = extract_from('/tmp/bm.log', 'mke2fs')
+                bm_val = check_running("BootManager.py")
+
+                if bs_val in ['diag', 'diagnose', 'safeboot', 'disabled', 'disable']:
+                    api.ReportRunlevel({'run_level' : 'safeboot'})
+
+                elif len(ex_val) > len("Exception"):
+                    api.ReportRunlevel({'run_level' : 'failboot'})
+
+                elif len(fs_val) > 0 and len(bm_val) > 0:
+                    api.ReportRunlevel({'run_level' : 'reinstall'})
+
+                else:
+                    api.ReportRunlevel({'run_level' : 'failboot'})
+
+            elif env == "production":
+                api.ReportRunlevel({'run_level' : 'boot'})
+            else:
+                api.ReportRunlevel({'run_level' : 'failboot'})
+                
+        except:
+            print "reporting error: ", os.popen("uptime").read().strip()
+            traceback.print_exc()
+
+        sys.stdout.flush()
+        # TODO: change to a configurable value
+        time.sleep(60*15)
+
+def agent_running():
+    try:
+        os.stat(RLA_PID_FILE)
+        f = os.popen("ps ax | grep RunlevelAgent | grep -Ev 'grep|vim' | awk '{print $1}' | wc -l")
+        l = f.read().strip()
+        if int(l) >= 2:
+            return True
+        else:
+            try:
+                os.unlink(RLA_PID_FILE)
+            except:
+                pass
+            return False
+    except:
+        return False
+        
+
+def shutdown():
+    import signal
+
+    pid = open(RLA_PID_FILE, 'r').read().strip()
+
+    # Try three different ways to kill the process.  Just to be sure.
+    os.kill(int(pid), signal.SIGKILL)
+    os.system("pkill RunlevelAgent.py")
+    os.system("ps ax | grep RunlevelAgent | grep -v grep | awk '{print $1}' | xargs kill -9 ")
+
+if __name__ == "__main__":
+    if "start" in sys.argv and not agent_running():
+        start_and_run()
+
+    if "stop" in sys.argv and agent_running():
+        shutdown()
diff --git a/source/configuration b/source/configuration
new file mode 100644 (file)
index 0000000..9f0fe74
--- /dev/null
@@ -0,0 +1,95 @@
+# this file contains a list of variables
+# to import to the INSTALL_STORE before
+# any of the steps run.
+
+
+# the current version of the bootmanager
+VERSION=3.2
+
+# this is the server to contact if we don't have a bootcd
+DEFAULT_BOOT_SERVER=boot.planet-lab.org
+# full url to which api server to contact
+BOOT_API_SERVER=https://boot.planet-lab.org:443/PLCAPI/
+
+# keep redundant information to plc_config for simplicity
+BOOT_SERVER=boot.planet-lab.org
+
+# hostname for MyOps server
+MONITOR_SERVER=monitor.planet-lab.org
+#UPLOAD_LOG_SCRIPT=/monitor/upload
+UPLOAD_LOG_SCRIPT=/boot/upload-bmlog.php
+
+# bootcd variables : use %(path)s for path relative to bootcd
+BOOTCD_VERSION_FILE='%(path)s/bootme/ID'
+SERVER_CERT_DIR=/tmp/source/cacert
+CACERT_NAME=cacert.pem
+
+
+# path to store temporary files during the install,
+# do not include trailing slashes
+TEMP_PATH=/tmp/mnt
+
+
+# path to the system mount point
+SYSIMG_PATH=/tmp/mnt/sysimg
+
+
+# where the cacerts for the boot cd can be found
+# currently, this must start with /mnt/cdrom
+# which is hardcoded in the installer
+CACERT_PATH=/mnt/cdrom/bootme/cacert
+
+
+# the nonce the boot cd created, need to authenticate
+# requests that are made to the boot server
+NONCE_FILE=/tmp/nonce
+
+
+# directory containing planetlab specific configuration
+# files, like the http_proxy file
+PLCONF_DIR=/etc/planetlab
+
+
+# this sets the size of the root logical volume,
+# after the root and swap has been created, remaining
+# goes to the vserver partition
+ROOT_SIZE=14G
+
+
+# override the swap size
+SWAP_SIZE=1G
+
+
+# in raw disk mode, the size of /vservers
+# if unset or -1, use the entire first disk
+VSERVERS_SIZE=-1
+
+
+# whether or not to skip hardware requirement check
+SKIP_HARDWARE_REQUIREMENT_CHECK=0
+
+
+# minimum amount of memory needed for installer, in kb
+MINIMUM_MEMORY=511000
+
+
+# minimum block disk size in GB to be added to lvm.
+# if any block devices are smaller than this, they are ignored.
+MINIMUM_DISK_SIZE=17
+
+
+# total minimum disk size in GB if all usable disks are below this
+# size, the node cannot be installed
+TOTAL_MINIMUM_DISK_SIZE=50
+
+
+# set of langugase for install (used in /etc/rpm/macros)
+INSTALL_LANGS=en_US
+
+
+# number of auth failures before starting debug mode
+NUM_AUTH_FAILURES_BEFORE_DEBUG=2
+
+
+# location of file containing http/https proxy info, if needed
+PROXY_FILE=/etc/planetlab/http_proxy
diff --git a/source/debug_files/debug_root_ssh_key b/source/debug_files/debug_root_ssh_key
new file mode 100644 (file)
index 0000000..0f4105e
--- /dev/null
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAs3jl1PRq97O4WKngafKUe4LTkQrKqgaHUj6sUKfC9KT40ek19jlzU2YWnuoaxEpSLks+Z0KPnSAIyZW5fnFYasIh9mrLSbY06d2Mor5919sCv9fIm/6QHq6gBiFjs50HITx53jWjeu/nmZeLOBsBtioLkNW2vBMQKHz6+q+wea2nh+YX3X5ZRpSp6znPR5fjaWzm0TEfA6oStUfsOIBds98XswghfT0GtWehG5FpPT/X9g7EObQKN/fzSSe1SdMSEMLPl+e0+KQ0+jB/pCULfSm9Qlw6I5cYQXwxKeT2tEPIcmLPe/U1hhoqGyaADo+a0OmCQ84yJ3obMNMWGH0uIQ== debug@planet-lab.org
diff --git a/source/debug_files/sshd_config b/source/debug_files/sshd_config
new file mode 100644 (file)
index 0000000..2a8f428
--- /dev/null
@@ -0,0 +1,92 @@
+# boot cd version 3.x sshd configuration file for debug mode
+
+#Port 22
+Protocol 2
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+#obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+#PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+
+#RSAAuthentication yes
+#PubkeyAuthentication yes
+#AuthorizedKeysFile    .ssh/authorized_keys
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#RhostsRSAAuthentication no
+# similar for protocol version 2
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+#PermitEmptyPasswords no
+PasswordAuthentication no
+
+# Change to no to disable s/key passwords
+#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# Set this to 'yes' to enable PAM authentication, account processing, 
+# and session processing. If this is enabled, PAM authentication will 
+# be allowed through the ChallengeResponseAuthentication mechanism. 
+# Depending on your PAM configuration, this may bypass the setting of 
+# PasswordAuthentication, PermitEmptyPasswords, and 
+# "PermitRootLogin without-password". If you just want the PAM account and 
+# session checks to run without PAM authentication, then enable this but set 
+# ChallengeResponseAuthentication=no
+
+#AllowTcpForwarding yes
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PrintMotd yes
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression yes
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#ShowPatchLevel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem      sftp    /usr/libexec/openssh/sftp-server
diff --git a/source/libc-opendir-hack.c b/source/libc-opendir-hack.c
new file mode 100644 (file)
index 0000000..182a59d
--- /dev/null
@@ -0,0 +1,139 @@
+#define _GNU_SOURCE 1
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <glob.h>
+#include <stdarg.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#define INIT(x)        real_ ## x = dlsym(RTLD_NEXT, #x); \
+               if (!real_ ## x) { \
+                 fprintf(stderr, "Would the real " #x " please stand up? %s\n", dlerror()); \
+                 exit(1); \
+               }
+
+DIR *opendir(const char *name)
+{
+  int fd = open(name, O_RDONLY|O_NDELAY|O_DIRECTORY|O_LARGEFILE);
+  if (fd == -1)
+    return NULL;
+  return fdopendir(fd);
+}
+
+DIR *__opendir(const char *name)
+{
+  return opendir(name);
+}
+
+static int (*real_glob)(const char *pattern, int flags,
+         int (*errfunc) (const char *epath, int eerrno),
+         glob_t *pglob);
+
+int glob(const char *pattern, int flags,
+         int (*errfunc) (const char *epath, int eerrno),
+         glob_t *pglob)
+{
+  if (!(flags & GLOB_ALTDIRFUNC)) {
+    pglob->gl_closedir = closedir;
+    pglob->gl_readdir = readdir;
+    pglob->gl_opendir = opendir;
+    pglob->gl_lstat = lstat;
+    pglob->gl_stat = stat;
+    flags |= GLOB_ALTDIRFUNC;
+  }
+  if (!real_glob) {
+    INIT(glob)
+  }
+  return real_glob(pattern, flags, errfunc, pglob);
+}
+
+#define PWD_LOCKFILE "/etc/.pwd.lock"
+
+static int lock_fd = -1;
+
+/* FIXME: Ignores multi-thread issues.
+ *        Doesn't wait for the file to become lockable
+ */
+int lckpwdf(void)
+{
+  struct flock fl = { 0 };
+
+  /* This process already holds the lock */
+  if (lock_fd != -1)
+    return -1;
+
+  lock_fd = open(PWD_LOCKFILE, O_WRONLY|O_CREAT, 0600);
+  if (lock_fd == -1)
+    return -1;
+
+  if (fcntl(lock_fd, F_SETFD, fcntl(lock_fd, F_GETFD, 0) | FD_CLOEXEC) == -1) {
+    close(lock_fd);
+    return -1;
+  }
+
+  fl.l_type = F_WRLCK;
+  fl.l_whence = SEEK_SET;
+  return fcntl(lock_fd, F_SETLKW, &fl);
+}
+
+int ulckpwdf(void)
+{
+  int result;
+
+  if (lock_fd == -1)
+    return -1;
+
+  result = close(lock_fd);
+  lock_fd = -1;
+  return result;
+}
+
+static (*real_open)(const char *name, int flags, ...);
+int open(const char *name, int flags, ...)
+{
+  mode_t mode;
+  if (flags & O_CREAT) {
+    va_list va;
+    va_start(va, flags);
+    mode = va_arg(va, mode_t);
+    va_end(va);
+  }
+  if (!real_open) {
+    INIT(open)
+  }
+  return real_open(name, flags, mode);
+}
+
+static FILE *(*real_fopen)(const char *name, const char *flags);
+FILE *fopen(const char *name, const char *flags)
+{
+  char *str, *ptr = strchr(flags, 'e');
+  FILE *ret;
+  if (ptr) {
+    str = strdup(flags);
+    ptr = (str + (ptr - flags));
+    strcpy(ptr, ptr + 1);
+  }
+  else
+    str = flags;
+  if (!real_fopen) {
+    INIT(fopen)
+  }
+  ret = real_fopen(name, str);
+  if (ptr)
+    free(str);
+  return ret;
+}
+
+static void _init() __attribute__((constructor));
+static void _init()
+{
+  INIT(glob)
+  INIT(open)
+  INIT(fopen)
+}
diff --git a/source/notify_messages.py b/source/notify_messages.py
new file mode 100644 (file)
index 0000000..682c854
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+"""
+this file contains the ids of messages that we can send the contacts
+at a site, through the BootNotifyOwners call
+"""
+
+MSG_INSTALL_FINISHED= "installfinished"
+MSG_INSUFFICIENT_DISK= "insufficientdisk"
+MSG_INSUFFICIENT_MEMORY= "insufficientmemory"
+MSG_NO_NODE_CONFIG_FILE= "noconfig"
+MSG_AUTH_FAIL= "authfail"
+MSG_NODE_NOT_INSTALLED= "notinstalled"
+MSG_NODE_FILESYSTEM_CORRUPT= "filesystemcorrupted"
+MSG_NODE_MOUNT_FAILED= "mountfailed"
+MSG_NODE_MISSING_KERNEL= "missingkernel"
+MSG_HOSTNAME_NOT_RESOLVE= "hostnamenotresolve"
+MSG_NO_DETECTED_NETWORK= "nodetectednetwork"
diff --git a/source/setup_bash_history_scripts.sh b/source/setup_bash_history_scripts.sh
new file mode 100644 (file)
index 0000000..6298d16
--- /dev/null
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+BASH_PROFILE=/root/.bash_profile
+HISTORY_PROFILE=/etc/profile.d/histlog.sh
+PERIODIC_SCRIPT=/usr/bin/periodic_upload.sh
+
+cat <<\EOF > $BASH_PROFILE
+# NOTE: only upload incremental diffs
+if [ -f /tmp/source/configuration ] ; then
+    source /tmp/source/configuration
+fi
+if [ -z "$MONITOR_SERVER" ] ; then
+    MONITOR_SERVER=monitor.planet-lab.org
+fi
+function upload_log ()
+{
+    file=$1
+    path=$2
+    base=$( basename $file )
+    old=/tmp/${base}.old
+    new=/tmp/${base}.new
+    log=/tmp/${base}.log
+    if [ ! -f $file ] ; then
+        return
+    fi
+    if [ -f $new ] ; then
+        cp $new $old
+    else
+        touch $old
+    fi
+    cp $file $new
+    comm -1 -3 $old $new > $log
+    if [ $( stat -c %s $log ) -ne 0 ] ; then
+        curl --max-time 60 --silent --insecure https://$MONITOR_SERVER/monitor/uploadlogs --form "dir=$path" --form "log=@$log"
+        if [ $? -ne 0 ] ; then
+            # the upload has failed, so remove new file so no data is lost
+            rm -f /tmp/$( basename $file ).new
+        fi
+    fi
+}
+
+function upload_logs ()
+{
+    upload_log $1 histfail
+}
+
+# NOTE: these aliases aim to upload the history before losing it.
+alias reboot="upload_logs /root/.bash_eternal_history ; /sbin/reboot"
+alias shutdown="upload_logs /root/.bash_eternal_history ; /sbin/shutdown"
+EOF
+
+cat <<\EOF > $HISTORY_PROFILE
+export HISTTIMEFORMAT="%s ";
+# NOTE: HOSTNAME is not reliably set in failboot or safeboot mode
+# NOTE: These steps assign at least a default hostname based on IP
+# NOTE: This hostname is used in the bash-prompt-script commands
+if [[ -z "$HOSTNAME" || "$HOSTNAME" = "(none)" ]] ; then
+    HOSTNAME=`ip addr show dev eth0 | grep inet | tr '/' ' ' | sed -e 's/^ *//g' | cut -f2 -d' '`
+fi
+if [ -f /etc/sysconfig/network-scripts/ifcfg-eth0 ] ; then
+    source /etc/sysconfig/network-scripts/ifcfg-eth0 
+    if [ -n "$DHCP_HOSTNAME" ] ; then
+        HOSTNAME=$DHCP_HOSTNAME
+    else 
+        if [ -n "$IPADDR" ] ; then
+            HOSTNAME=$IPADDR
+        fi
+    fi
+fi
+hostname $HOSTNAME &> /dev/null
+if [ -n "$BASH_EXECUTION_STRING" ]; then
+    # NOTE: does not work on 2.x versions of bash.
+    # NOTE: log commands executed over ssh
+    echo "$HOSTNAME $$ ssh:$USER xx `date +%s` $BASH_EXECUTION_STRING" >> /root/.bash_eternal_history;
+fi
+if [ -e /etc/sysconfig/bash-prompt-xterm ]; then
+    PROMPT_COMMAND=/etc/sysconfig/bash-prompt-xterm
+fi
+EOF
+chmod 755 $HISTORY_PROFILE
+
+cat <<\EOF > bash-prompt-script
+# NOTE: intended to run after and log every interactive-command 
+echo $HOSTNAME $$ $USER "$(history 1)" >> /root/.bash_eternal_history
+EOF
+
+for f in bash-prompt-default bash-prompt-xterm ; do
+    cp bash-prompt-script /etc/sysconfig/$f
+    chmod 755 /etc/sysconfig/$f
+done
+
+# NOTE: allow command run directly over ssh to be logged also.
+echo "source /etc/profile ; source $BASH_PROFILE" > /root/.bashrc
+
+# NOTE 1: crond is not installed on the boot image, so this maintains a
+#         persistent process to upload logs on legacy nodes.
+# NOTE 2: A day has 86400 seconds, $RANDOM is between 0-32767
+# NOTE 2: So, $RANDOM * 3 is between 0 and 27 hours.
+# NOTE 3: The initial delay is randomized in case many nodes reboot at the
+#         same time.
+initial_delay=$(( $RANDOM * 3 )) 
+
+cat <<EOF > $PERIODIC_SCRIPT
+#!/bin/bash
+if [ -f $BASH_PROFILE ] ; then
+    source $BASH_PROFILE
+else
+    echo "Cannot source upload_logs() definition!"
+    exit 1
+fi
+
+# NOTE: exit if anoter process is already running.
+if [ \$$ -ne \`pgrep -o periodic\` ] ; then
+    # the current PID differs from the oldest periodic_upload pid
+    exit 0
+fi
+sleep $initial_delay
+while /bin/true ; do
+    upload_logs /root/.bash_eternal_history
+    sleep 86400   # sleep for a day
+done
+EOF
+
+chmod 755 $PERIODIC_SCRIPT
+$PERIODIC_SCRIPT < /dev/null > /tmp/upload.log 2>&1 &
diff --git a/source/steps/AuthenticateWithPLC.py b/source/steps/AuthenticateWithPLC.py
new file mode 100644 (file)
index 0000000..72d5ec2
--- /dev/null
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+from Exceptions import *
+import BootAPI
+
+
+AUTH_FAILURE_COUNT_FILE= "/tmp/authfailurecount"
+
+
+def Run( vars, log ):
+    """
+    Authenticate this node with PLC. This ensures that the node can operate
+    as normal, and that our management authority has authorized it.
+
+    For this, just call the PLC api function BootCheckAuthentication
+
+    Return 1 if authorized, a BootManagerException if not or the
+    call fails entirely.
+
+    If there are two consecutive authentication failures, put the node
+    into debug mode and exit the bootmanager.
+
+    Expect the following variables from the store:
+    NUM_AUTH_FAILURES_BEFORE_DEBUG    How many failures before debug
+    """
+
+    log.write( "\n\nStep: Authenticating node with PLC.\n" )
+
+    # make sure we have the variables we need
+    try:
+        NUM_AUTH_FAILURES_BEFORE_DEBUG= int(vars["NUM_AUTH_FAILURES_BEFORE_DEBUG"])
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        authorized= BootAPI.call_api_function( vars, "BootCheckAuthentication", () )
+        if authorized == 1:
+            log.write( "Authentication successful.\n" )
+
+            try:
+                os.unlink( AUTH_FAILURE_COUNT_FILE )
+            except OSError, e:
+                pass
+            
+            return 1
+    except BootManagerException, e:
+        log.write( "Authentication failed: %s.\n" % e )
+    except:
+        # This is ugly.
+        if vars['DISCONNECTED_OPERATION']:
+            vars['API_SERVER_INST']= None
+            return 1
+        else:
+            raise
+
+    # increment auth failure
+    auth_failure_count= 0
+    try:
+        auth_failure_count= int(file(AUTH_FAILURE_COUNT_FILE,"r").read().strip())
+    except IOError:
+        pass
+    except ValueError:
+        pass
+
+    auth_failure_count += 1
+
+    try:
+        fail_file= file(AUTH_FAILURE_COUNT_FILE,"w")
+        fail_file.write( str(auth_failure_count) )
+        fail_file.close()
+    except IOError:
+        pass
+
+    if auth_failure_count >= NUM_AUTH_FAILURES_BEFORE_DEBUG:
+        log.write( "Maximum number of authentication failures reached.\n" )
+        log.write( "Canceling boot process and going into debug mode.\n" )
+
+    raise BootManagerException, "Unable to authenticate node."
+    
+
diff --git a/source/steps/ChainBootNode.py b/source/steps/ChainBootNode.py
new file mode 100644 (file)
index 0000000..848cb3d
--- /dev/null
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import string
+import re
+import os
+
+import UpdateNodeConfiguration
+import MakeInitrd
+import StopRunlevelAgent
+from Exceptions import *
+import utils
+import systeminfo
+import BootAPI
+import notify_messages
+import time
+
+import ModelOptions
+
+def Run( vars, log ):
+    """
+    Load the kernel off of a node and boot to it.
+    This step assumes the disks are mounted on SYSIMG_PATH.
+    If successful, this function will not return. If it returns, no chain
+    booting has occurred.
+    
+    Expect the following variables:
+    SYSIMG_PATH           the path where the system image will be mounted
+                          (always starts with TEMP_PATH)
+    ROOT_MOUNTED          the node root file system is mounted
+    NODE_SESSION             the unique session val set when we requested
+                             the current boot state
+    PLCONF_DIR               The directory to store PL configuration files in
+    
+    Sets the following variables:
+    ROOT_MOUNTED          the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Chain booting node.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+        # its ok if this is blank
+        NODE_SESSION= vars["NODE_SESSION"]
+
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    ROOT_MOUNTED= 0
+    if vars.has_key('ROOT_MOUNTED'):
+        ROOT_MOUNTED= vars['ROOT_MOUNTED']
+    
+    if ROOT_MOUNTED == 0:
+        log.write( "Mounting node partitions\n" )
+
+        # simply creating an instance of this class and listing the system
+        # block devices will make them show up so vgscan can find the planetlab
+        # volume group
+        systeminfo.get_block_device_list(vars, log)
+        
+        utils.sysexec( "vgscan", log )
+        utils.sysexec( "vgchange -ay planetlab", log )
+
+        utils.makedirs( SYSIMG_PATH )
+
+        cmd = "mount %s %s" % (PARTITIONS["root"],SYSIMG_PATH)
+        utils.sysexec( cmd, log )
+        cmd = "mount -t proc none %s/proc" % SYSIMG_PATH
+        utils.sysexec( cmd, log )
+        cmd = "mount %s %s/vservers" % (PARTITIONS["vservers"],SYSIMG_PATH)
+        utils.sysexec( cmd, log )
+
+        ROOT_MOUNTED= 1
+        vars['ROOT_MOUNTED']= 1
+        
+
+    # write out the session value /etc/planetlab/session
+    try:
+        session_file_path= "%s/%s/session" % (SYSIMG_PATH,PLCONF_DIR)
+        session_file= file( session_file_path, "w" )
+        session_file.write( str(NODE_SESSION) )
+        session_file.close()
+        session_file= None
+        log.write( "Updated /etc/planetlab/session\n" )
+    except IOError, e:
+        log.write( "Unable to write out /etc/planetlab/session, continuing anyway\n" )
+
+    # update configuration files
+    log.write( "Updating configuration files.\n" )
+    try:
+        cmd = "/etc/init.d/conf_files start --noscripts"
+        utils.sysexec_chroot( SYSIMG_PATH, cmd, log )
+    except IOError, e:
+        log.write("conf_files failed with \n %s" % e)
+
+    # update node packages
+    log.write( "Running node update.\n" )
+    if os.path.exists( SYSIMG_PATH + "/usr/bin/NodeUpdate.py" ):
+        cmd = "/usr/bin/NodeUpdate.py start noreboot"
+    else:
+        # for backwards compatibility
+        cmd = "/usr/local/planetlab/bin/NodeUpdate.py start noreboot"
+    utils.sysexec_chroot( SYSIMG_PATH, cmd, log )
+
+    # Re-generate initrd right before kexec call
+    MakeInitrd.Run( vars, log )
+
+    # the following step should be done by NM
+    UpdateNodeConfiguration.Run( vars, log )
+
+    log.write( "Updating ssh public host key with PLC.\n" )
+    ssh_host_key= ""
+    try:
+        ssh_host_key_file= file("%s/etc/ssh/ssh_host_rsa_key.pub"%SYSIMG_PATH,"r")
+        ssh_host_key= ssh_host_key_file.read().strip()
+        ssh_host_key_file.close()
+        ssh_host_key_file= None
+    except IOError, e:
+        pass
+
+    update_vals= {}
+    update_vals['ssh_rsa_key']= ssh_host_key
+    BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+
+
+    # get the kernel version
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+
+    log.write( "Copying kernel and initrd for booting.\n" )
+    utils.sysexec( "cp %s/boot/kernel-boot%s /tmp/kernel" % (SYSIMG_PATH,option), log )
+    utils.sysexec( "cp %s/boot/initrd-boot%s /tmp/initrd" % (SYSIMG_PATH,option), log )
+
+    BootAPI.save(vars)
+
+    log.write( "Unmounting disks.\n" )
+    utils.sysexec( "umount %s/vservers" % SYSIMG_PATH, log )
+    utils.sysexec( "umount %s/proc" % SYSIMG_PATH, log )
+    utils.sysexec_noerr( "umount %s/dev" % SYSIMG_PATH, log )
+    utils.sysexec_noerr( "umount %s/sys" % SYSIMG_PATH, log )
+    utils.sysexec( "umount %s" % SYSIMG_PATH, log )
+    utils.sysexec( "vgchange -an", log )
+
+    ROOT_MOUNTED= 0
+    vars['ROOT_MOUNTED']= 0
+
+    # Change runlevel to 'boot' prior to kexec.
+    StopRunlevelAgent.Run( vars, log )
+
+    log.write( "Unloading modules and chain booting to new kernel.\n" )
+
+    # further use of log after Upload will only output to screen
+    log.Upload("/root/.bash_eternal_history")
+
+    # regardless of whether kexec works or not, we need to stop trying to
+    # run anything
+    cancel_boot_flag= "/tmp/CANCEL_BOOT"
+    utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+    # on 2.x cds (2.4 kernel) for sure, we need to shutdown everything
+    # to get kexec to work correctly. Even on 3.x cds (2.6 kernel),
+    # there are a few buggy drivers that don't disable their hardware
+    # correctly unless they are first unloaded.
+    
+    utils.sysexec_noerr( "ifconfig eth0 down", log )
+
+    utils.sysexec_noerr( "killall dhclient", log )
+        
+    utils.sysexec_noerr( "umount -a -r -t ext2,ext3", log )
+    utils.sysexec_noerr( "modprobe -r lvm-mod", log )
+    
+    # modules that should not get unloaded
+    # unloading cpqphp causes a kernel panic
+    blacklist = [ "floppy", "cpqphp", "i82875p_edac", "mptspi"]
+    try:
+        modules= file("/tmp/loadedmodules","r")
+        
+        for line in modules:
+            module= string.strip(line)
+            if module in blacklist :
+                log.write("Skipping unload of kernel module '%s'.\n"%module)
+            elif module != "":
+                log.write( "Unloading %s\n" % module )
+                utils.sysexec_noerr( "modprobe -r %s" % module, log )
+                if "e1000" in module:
+                    log.write("Unloading e1000 driver; sleeping 4 seconds...\n")
+                    time.sleep(4)
+
+        modules.close()
+    except IOError:
+        log.write( "Couldn't read /tmp/loadedmodules, continuing.\n" )
+
+    try:
+        modules= file("/proc/modules", "r")
+
+        # Get usage count for USB
+        usb_usage = 0
+        for line in modules:
+            try:
+                # Module Size UsageCount UsedBy State LoadAddress
+                parts= string.split(line)
+
+                if parts[0] == "usb_storage":
+                    usb_usage += int(parts[2])
+            except IndexError, e:
+                log.write( "Couldn't parse /proc/modules, continuing.\n" )
+
+        modules.seek(0)
+
+        for line in modules:
+            try:
+                # Module Size UsageCount UsedBy State LoadAddress
+                parts= string.split(line)
+
+                # While we would like to remove all "unused" modules,
+                # you can't trust usage count, especially for things
+                # like network drivers or RAID array drivers. Just try
+                # and unload a few specific modules that we know cause
+                # problems during chain boot, such as USB host
+                # controller drivers (HCDs) (PL6577).
+                # if int(parts[2]) == 0:
+                if False and re.search('_hcd$', parts[0]):
+                    if usb_usage > 0:
+                        log.write( "NOT unloading %s since USB may be in use\n" % parts[0] )
+                    else:
+                        log.write( "Unloading %s\n" % parts[0] )
+                        utils.sysexec_noerr( "modprobe -r %s" % parts[0], log )
+            except IndexError, e:
+                log.write( "Couldn't parse /proc/modules, continuing.\n" )
+    except IOError:
+        log.write( "Couldn't read /proc/modules, continuing.\n" )
+
+
+    kargs = "root=%s ramdisk_size=8192" % PARTITIONS["mapper-root"]
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        kargs = kargs + " " + "acpi=off"
+    try:
+        kargsfb = open("/kargs.txt","r")
+        moreargs = kargsfb.readline()
+        kargsfb.close()
+        moreargs = moreargs.strip()
+        log.write( 'Parsed in "%s" kexec args from /kargs.txt\n' % moreargs )
+        kargs = kargs + " " + moreargs
+    except IOError:
+        # /kargs.txt does not exist, which is fine. Just kexec with default
+        # kargs, which is ramdisk_size=8192
+        pass 
+
+    utils.sysexec_noerr( 'hwclock --systohc --utc ', log )
+    utils.breakpoint ("Before kexec");
+    try:
+        utils.sysexec( 'kexec --force --initrd=/tmp/initrd --append="%s" /tmp/kernel' % kargs, log)
+    except BootManagerException, e:
+        # if kexec fails, we've shut the machine down to a point where nothing
+        # can run usefully anymore (network down, all modules unloaded, file
+        # systems unmounted. write out the error, and cancel the boot process
+
+        log.write( "\n\n" )
+        log.write( "-------------------------------------------------------\n" )
+        log.write( "kexec failed with the following error. Please report\n" )
+        log.write( "this problem to support@planet-lab.org.\n\n" )
+        log.write( str(e) + "\n\n" )
+        log.write( "The boot process has been canceled.\n" )
+        log.write( "-------------------------------------------------------\n\n" )
+
+    return
diff --git a/source/steps/CheckForNewDisks.py b/source/steps/CheckForNewDisks.py
new file mode 100644 (file)
index 0000000..09a001d
--- /dev/null
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+
+import InstallPartitionDisks
+from Exceptions import *
+import systeminfo
+import utils
+import os
+
+import ModelOptions
+
+
+def Run( vars, log ):
+    """
+    Find any new large block devices we can add to the vservers volume group
+    
+    Expect the following variables to be set:
+    SYSIMG_PATH          the path where the system image will be mounted
+    MINIMUM_DISK_SIZE       any disks smaller than this size, in GB, are not used
+    NODE_MODEL_OPTIONS   the node's model options
+    
+    Set the following variables upon successfully running:
+    ROOT_MOUNTED             the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Checking for unused disks to add to LVM.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        MINIMUM_DISK_SIZE= int(vars["MINIMUM_DISK_SIZE"])
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+        
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    all_devices= systeminfo.get_block_device_list(vars, log)
+    
+    # will contain the new devices to add to the volume group
+    new_devices= []
+
+    # total amount of new space in gb
+    extended_gb_size= 0
+    
+    for device in all_devices.keys():
+
+        (major,minor,blocks,gb_size,readonly)= all_devices[device]
+
+        if device[:14] == "/dev/planetlab":
+            log.write( "Skipping device %s in volume group.\n" % device )
+            continue
+
+        if readonly:
+            log.write( "Skipping read only device %s\n" % device )
+            continue
+
+        if gb_size < MINIMUM_DISK_SIZE:
+            log.write( "Skipping too small device %s (%4.2f)\n" %
+                       (device,gb_size) )
+            continue
+
+        log.write( "Checking device %s to see if it is part " \
+                   "of the volume group.\n" % device )
+
+        # this is the lvm partition, if it exists on that device
+        lvm_partition= InstallPartitionDisks.get_partition_path_from_device( device, vars, log )
+        cmd = "pvdisplay %s | grep -q 'planetlab'" % lvm_partition
+        already_added = utils.sysexec_noerr(cmd, log, shell=True)
+
+        if already_added:
+            log.write( "It appears %s is part of the volume group, continuing.\n" %
+                       device )
+            continue
+
+        # just to be extra paranoid, ignore the device if it already has
+        # an lvm partition on it (new disks won't have this, and that is
+        # what this code is for, so it should be ok).
+        cmd = "parted --script --list %s | grep -q lvm$" % device
+        has_lvm= utils.sysexec_noerr(cmd, log)
+        if has_lvm:
+            log.write( "It appears %s has lvm already setup on it.\n" % device)
+            paranoid = False
+            if paranoid:
+                log.write("To paranoid to add %s to vservers lvm.\n" % device)
+                continue
+        
+        if not InstallPartitionDisks.single_partition_device( device, vars, log ):
+            log.write( "Unable to partition %s, not using it.\n" % device )
+            continue
+
+        log.write( "Successfully partitioned %s\n" % device )
+
+        if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK:
+            log.write( "Running on a raw disk node, not using it.\n" )
+            continue
+
+        part_path= InstallPartitionDisks.get_partition_path_from_device( device,
+                                                                         vars, log )
+
+        log.write( "Attempting to add %s to the volume group\n" % device )
+
+        if not InstallPartitionDisks.create_lvm_physical_volume( part_path,
+                                                                 vars, log ):
+            log.write( "Unable to create lvm physical volume %s, not using it.\n" %
+                       part_path )
+            continue
+
+        log.write( "Adding %s to list of devices to add to " \
+                   "planetlab volume group.\n" % device )
+
+        extended_gb_size= extended_gb_size + gb_size
+        new_devices.append( part_path )
+        
+
+    if len(new_devices) > 0:
+
+        log.write( "Extending planetlab volume group.\n" )
+        
+        log.write( "Unmounting disks.\n" )
+        try:
+            # backwards compat, though, we should never hit this case post PL 3.2
+            os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+            utils.sysexec_chroot_noerr( SYSIMG_PATH, "umount /rcfs", log )
+        except OSError, e:
+            pass
+
+        # umount in order to extend disk size
+        utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH, log )
+        utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH, log )
+        utils.sysexec_noerr( "umount %s" % SYSIMG_PATH, log )
+        utils.sysexec( "vgchange -an", log )
+        
+        vars['ROOT_MOUNTED']= 0
+
+        while True:
+            cmd = "vgextend planetlab %s" % string.join(new_devices," ")
+            if not utils.sysexec_noerr( cmd, log ):
+                log.write( "Failed to add physical volumes %s to " \
+                           "volume group, continuing.\n" % string.join(new_devices," "))
+                res = 1
+                break
+            
+            # now, get the number of unused extents, and extend the vserver
+            # logical volume by that much.
+            remaining_extents= \
+               InstallPartitionDisks.get_remaining_extents_on_vg( vars, log )
+
+            log.write( "Extending vservers logical volume.\n" )
+            utils.sysexec( "vgchange -ay", log )
+            cmd = "lvextend -l +%s %s" % (remaining_extents, PARTITIONS["vservers"])
+            if not utils.sysexec_noerr(cmd, log):
+                log.write( "Failed to extend vservers logical volume, continuing\n" )
+                res = 1
+                break
+
+            log.write( "making the ext filesystem match new logical volume size.\n" )
+
+            vars['ROOT_MOUNTED']= 1
+            cmd = "mount %s %s" % (PARTITIONS["root"],SYSIMG_PATH)
+            utils.sysexec_noerr( cmd, log )
+            cmd = "mount %s %s/vservers" % \
+                (PARTITIONS["vservers"],SYSIMG_PATH)
+            utils.sysexec_noerr( cmd, log )
+            cmd = "resize2fs %s" % PARTITIONS["vservers"]
+            resize = utils.sysexec_noerr(cmd,log)
+            utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH, log )
+            utils.sysexec_noerr( "umount %s" % SYSIMG_PATH, log )
+            vars['ROOT_MOUNTED']= 0
+
+            utils.sysexec( "vgchange -an", log )
+
+            if not resize:
+                log.write( "Failed to resize vservers partition, continuing.\n" )
+                res = 1
+                break
+            else:
+                log.write( "Extended vservers partition by %4.2f GB\n" %
+                           extended_gb_size )
+                res = 1
+                break
+
+    else:
+        log.write( "No new disk devices to add to volume group.\n" )
+        res = 1
+
+    return res
diff --git a/source/steps/CheckHardwareRequirements.py b/source/steps/CheckHardwareRequirements.py
new file mode 100644 (file)
index 0000000..3e8b0b5
--- /dev/null
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os
+import popen2
+import string
+
+import systeminfo
+from Exceptions import *
+import utils
+import notify_messages
+import BootAPI
+
+
+def Run( vars, log ):
+    """
+    Make sure the hardware we are running on is sufficient for
+    the PlanetLab OS to be installed on. In the process, identify
+    the list of block devices that may be used for a node installation,
+    and identify the cdrom device that we booted off of.
+
+    Return 1 if requiremenst met, 0 if requirements not met. Raise
+    BootManagerException if any problems occur that prevent the requirements
+    from being checked.
+
+    Expect the following variables from the store:
+
+    MINIMUM_MEMORY          minimum amount of memory in kb required
+                            for install
+    NODE_ID                 the node_id from the database for this node
+    MINIMUM_DISK_SIZE       any disks smaller than this size, in GB, are not used
+    TOTAL_MINIMUM_DISK_SIZE total disk size in GB, if all usable disks
+                            meet this number, there isn't enough disk space for
+                            this node to be usable after install
+    SKIP_HARDWARE_REQUIREMENT_CHECK
+                            If set, don't check if minimum requirements are met
+    Sets the following variables:
+    INSTALL_BLOCK_DEVICES    list of block devices to install onto
+    """
+
+    log.write( "\n\nStep: Checking if hardware requirements met.\n" )        
+        
+    try:
+        MINIMUM_MEMORY= int(vars["MINIMUM_MEMORY"])
+        if MINIMUM_MEMORY == "":
+            raise ValueError, "MINIMUM_MEMORY"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError("NODE_ID")
+
+        MINIMUM_DISK_SIZE= int(vars["MINIMUM_DISK_SIZE"])
+
+        TOTAL_MINIMUM_DISK_SIZE= \
+                   int(vars["TOTAL_MINIMUM_DISK_SIZE"])
+
+        SKIP_HARDWARE_REQUIREMENT_CHECK= \
+                   int(vars["SKIP_HARDWARE_REQUIREMENT_CHECK"])
+        
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing variable in install store: %s" % var
+    except ValueError, var:
+        raise BootManagerException, \
+              "Variable in install store blank, shouldn't be: %s" % var
+
+    # lets see if we have enough memory to run
+    log.write( "Checking for available memory.\n" )
+
+    total_mem= systeminfo.get_total_phsyical_mem(vars, log)
+    if total_mem is None:
+        raise BootManagerException, "Unable to read total physical memory"
+        
+    if total_mem < MINIMUM_MEMORY:
+        if not SKIP_HARDWARE_REQUIREMENT_CHECK:
+            log.write( "Insufficient memory to run node: %s kb\n" % total_mem )
+            log.write( "Required memory: %s kb\n" % MINIMUM_MEMORY )
+
+            include_pis= 0
+            include_techs= 1
+            include_support= 0
+            
+            sent= 0
+            try:
+                sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                         (notify_messages.MSG_INSUFFICIENT_MEMORY,
+                                          include_pis,
+                                          include_techs,
+                                          include_support) )
+            except BootManagerException, e:
+                log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+                
+            if sent == 0:
+                log.write( "Unable to notify site contacts of problem.\n" )
+            else:
+                log.write( "Notified contacts of problem.\n" )
+                
+            return 0
+        else:
+            log.write( "Memory requirements not met, but running anyway: %s kb\n"
+                       % total_mem )
+    else:
+        log.write( "Looks like we have enough memory: %s kb\n" % total_mem )
+
+
+
+    # get a list of block devices to attempt to install on
+    # (may include cdrom devices)
+    install_devices= systeminfo.get_block_device_list(vars, log)
+
+    # save the list of block devices in the log
+    log.write( "Detected block devices:\n" )
+    log.write( repr(install_devices) + "\n" )
+
+    if not install_devices or len(install_devices) == 0:
+        log.write( "No block devices detected.\n" )
+        
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+        
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                       (notify_messages.MSG_INSUFFICIENT_DISK,
+                                        include_pis,
+                                        include_techs,
+                                        include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+        if sent == 0:
+            log.write( "Unable to notify site contacts of problem.\n" )
+
+        return 0
+
+    # now, lets remove any block devices we know won't work (readonly,cdroms),
+    # or could be other writable removable disks (usb keychains, zip disks, etc)
+    # i'm not aware of anything that helps with the latter test, so,
+    # what we'll probably do is simply not use any block device below
+    # some size threshold (set in installstore)
+
+    # also, keep track of the total size for all devices that appear usable
+    total_size= 0
+
+    for device in install_devices.keys():
+
+        (major,minor,blocks,gb_size,readonly)= install_devices[device]
+        
+        # if the device string starts with
+        # planetlab or dm- (device mapper), ignore it (could be old lvm setup)
+        if device[:14] == "/dev/planetlab" or device[:8] == "/dev/dm-":
+            del install_devices[device]
+            continue
+
+        if gb_size < MINIMUM_DISK_SIZE:
+            log.write( "Device is too small to use: %s \n(appears" \
+                           " to be %4.2f GB)\n" % (device,gb_size) )
+            try:
+                del install_devices[device]
+            except KeyError, e:
+                pass
+            continue
+
+        if readonly:
+            log.write( "Device is readonly, not using: %s\n" % device )
+            try:
+                del install_devices[device]
+            except KeyError, e:
+                pass
+            continue
+            
+        # add this sector count to the total count of usable
+        # sectors we've found.
+        total_size= total_size + gb_size
+
+
+    if len(install_devices) == 0:
+        log.write( "No suitable block devices found for install.\n" )
+
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+        
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                       (notify_messages.MSG_INSUFFICIENT_DISK,
+                                        include_pis,
+                                        include_techs,
+                                        include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+        if sent == 0:
+            log.write( "Unable to notify site contacts of problem.\n" )
+
+        return 0
+
+
+    # show the devices we found that are usable
+    log.write( "Usable block devices:\n" )
+    log.write( repr(install_devices.keys()) + "\n" )
+
+    # save the list of devices for the following steps
+    vars["INSTALL_BLOCK_DEVICES"]= install_devices.keys()
+
+
+    # ensure the total disk size is large enough. if
+    # not, we need to email the tech contacts the problem, and
+    # put the node into debug mode.
+    if total_size < TOTAL_MINIMUM_DISK_SIZE:
+        if not SKIP_HARDWARE_REQUIREMENT_CHECK:
+            log.write( "The total usable disk size of all disks is " \
+                       "insufficient to be usable as a PlanetLab node.\n" )
+            include_pis= 0
+            include_techs= 1
+            include_support= 0
+            
+            sent= 0
+            try:
+                sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                            (notify_messages.MSG_INSUFFICIENT_DISK,
+                                             include_pis,
+                                             include_techs,
+                                             include_support) )
+            except BootManagerException, e:
+                log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+            if sent == 0:
+                log.write( "Unable to notify site contacts of problem.\n" )
+
+            return 0
+        
+        else:
+            log.write( "The total usable disk size of all disks is " \
+                       "insufficient, but running anyway.\n" )
+            
+    log.write( "Total size for all usable block devices: %4.2f GB\n" % total_size )
+
+    return 1
diff --git a/source/steps/ConfirmInstallWithUser.py b/source/steps/ConfirmInstallWithUser.py
new file mode 100644 (file)
index 0000000..8890fcc
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+
+welcome_message= \
+"""
+********************************************************************************
+*                                                                              *
+*                             Welcome to PlanetLab                             *
+*                             ~~~~~~~~~~~~~~~~~~~~                             *
+*                                                                              *
+* The PlanetLab boot CD allows you to automatically install this machine as a  *
+* node within the PlanetLab overlay network.                                   *
+*                                                                              *
+* PlanetLab is a global overlay network for developing and accessing new       *
+* network services. Our goal is to grow to 1000 geographically distributed     *
+* nodes, connected by a diverse collection of links. Toward this end, we are   *
+* putting PlanetLab nodes into edge sites, co-location and routing centers,    *
+* and homes (i.e., at the end of DSL lines and cable modems). PlanetLab is     *
+* designed to support both short-term experiments and long-running services.   *
+* Currently running services include network weather maps, network-embedded    *
+* storage, peer-to-peer networks, and content distribution networks.           *
+*                                                                              *
+* Information on joining PlanetLab available at planet-lab.org/consortium/     *
+*                                                                              *
+********************************************************************************
+
+WARNING : Installing PlanetLab will remove any existing operating system and 
+          data from this computer.
+"""
+
+
+def Run( vars, log ):
+    """
+    Ask the user if we really want to wipe this machine.
+
+    Return 1 if the user accept, 0 if the user denied, and
+    a BootManagerException if anything unexpected occurred.
+    """
+
+    log.write( "\n\nStep: Confirming install with user.\n" )
+    
+    try:
+        confirmation= ""
+        install= 0
+        print welcome_message
+        
+        while confirmation not in ("yes","no"):
+            confirmation= \
+                raw_input("Are you sure you wish to continue (yes/no):")
+        install= confirmation=="yes"
+    except EOFError, e:
+        pass
+    except KeyboardInterrupt, e:
+        pass
+    
+    if install:
+        log.write( "\nUser accepted install.\n" )
+    else:
+        log.write( "\nUser canceled install.\n" )
+        
+    return install
diff --git a/source/steps/GetAndUpdateNodeDetails.py b/source/steps/GetAndUpdateNodeDetails.py
new file mode 100644 (file)
index 0000000..849e25b
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+
+from Exceptions import *
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+    """
+
+    Contact PLC and get the attributes for this node. Also, parse in
+    options from the node model strong.
+
+    Also, update any node network settings at PLC, minus the ip address,
+    so, upload the mac (if node_id was in conf file), gateway, network,
+    broadcast, netmask, dns1/2, and the hostname/domainname.
+
+    Expect the following keys to be set:
+    SKIP_HARDWARE_REQUIREMENT_CHECK     Whether or not we should skip hardware
+                                        requirement checks
+                                        
+    The following keys are set/updated:
+    WAS_NODE_ID_IN_CONF                 Set to 1 if the node id was in the conf file
+    WAS_NODE_KEY_IN_CONF                Set to 1 if the node key was in the conf file
+    BOOT_STATE                          The current node boot state
+    NODE_MODEL                          The user specified model of this node
+    NODE_MODEL_OPTIONS                  The options extracted from the user specified
+                                                model of this node 
+    SKIP_HARDWARE_REQUIREMENT_CHECK     Whether or not we should skip hardware
+                                                requirement checks
+    NODE_SESSION                        The session value returned from BootGetNodeDetails
+    INTERFACES                          The network interfaces associated with this node
+    INTERFACE_SETTINGS                  A dictionary of the values of the interface settings
+    
+    Return 1 if able to contact PLC and get node info.
+    Raise a BootManagerException if anything fails.
+    """
+
+    log.write( "\n\nStep: Retrieving details of node from PLC.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SKIP_HARDWARE_REQUIREMENT_CHECK= vars["SKIP_HARDWARE_REQUIREMENT_CHECK"]
+        if SKIP_HARDWARE_REQUIREMENT_CHECK == "":
+            raise ValueError, "SKIP_HARDWARE_REQUIREMENT_CHECK"
+
+        INTERFACE_SETTINGS= vars["INTERFACE_SETTINGS"]
+        if INTERFACE_SETTINGS == "":
+            raise ValueError, "INTERFACE_SETTINGS"
+
+        WAS_NODE_ID_IN_CONF= vars["WAS_NODE_ID_IN_CONF"]
+        if WAS_NODE_ID_IN_CONF == "":
+            raise ValueError, "WAS_NODE_ID_IN_CONF"
+
+        WAS_NODE_KEY_IN_CONF= vars["WAS_NODE_KEY_IN_CONF"]
+        if WAS_NODE_KEY_IN_CONF == "":
+            raise ValueError, "WAS_NODE_KEY_IN_CONF"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    node_details= BootAPI.call_api_function( vars, "GetNodes", 
+                                             (vars['NODE_ID'], 
+                                              ['boot_state', 'nodegroup_ids', 'interface_ids', 'model', 'site_id']))[0]
+
+    vars['BOOT_STATE']= node_details['boot_state']
+    vars['RUN_LEVEL']= node_details['boot_state']
+    vars['NODE_MODEL']= string.strip(node_details['model'])
+    vars['SITE_ID'] = node_details['site_id'] 
+    log.write( "Successfully retrieved node record.\n" )
+    log.write( "Current boot state: %s\n" % vars['BOOT_STATE'] )
+    log.write( "Node make/model: %s\n" % vars['NODE_MODEL'] )
+    
+    # parse in the model options from the node_model string
+    model= vars['NODE_MODEL']
+    options= ModelOptions.Get(model)
+    vars['NODE_MODEL_OPTIONS']=options
+
+    # Check if we should skip hardware requirement check
+    if options & ModelOptions.MINHW:
+        vars['SKIP_HARDWARE_REQUIREMENT_CHECK']=1
+        log.write( "node model indicates override to hardware requirements.\n" )
+
+    # this contains all the node networks, for now, we are only concerned
+    # in the primary network
+    interfaces= BootAPI.call_api_function( vars, "GetInterfaces", (node_details['interface_ids'],))
+    got_primary= 0
+    for network in interfaces:
+        if network['is_primary'] == 1:
+            log.write( "Primary network as returned from PLC: %s\n" % str(network) )
+            got_primary= 1
+            break
+
+    if not got_primary:
+        raise BootManagerException, "Node did not have a primary network."
+
+    vars['INTERFACES']= interfaces
+    
+    return 1
diff --git a/source/steps/InitializeBootManager.py b/source/steps/InitializeBootManager.py
new file mode 100644 (file)
index 0000000..c98f960
--- /dev/null
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+import xmlrpclib
+import socket
+import string
+
+from Exceptions import *
+import utils
+
+
+# locations of boot os version files
+BOOT_VERSION_2X_FILE='/usr/bootme/ID'
+BOOT_VERSION_3X_FILE='/pl_version'
+
+# minimium version of the boot os we need to run, as a (major,minor) tuple
+MINIMUM_BOOT_VERSION= (3,0)
+
+# minimum version of python required to run the boot manager
+MINIMUM_PYTHON_VERSION= (2,2,0)
+
+
+def Run( vars, log ):
+    """
+    Setup the boot manager so it can run, do any extra necessary
+    hardware setup (to fix old cd problems)
+
+    Sets the following variables:
+    PARTITIONS        A dictionary of generic partition types and their
+                      associated devices.
+    BOOT_CD_VERSION   A two number tuple of the boot cd version
+    """
+
+    log.write( "\n\nStep: Initializing the BootManager.\n" )
+
+    # Default model option.  Required in case we go into debug mode
+    # before we successfully called GetAndUpdateNodeDetails().
+    vars["NODE_MODEL_OPTIONS"] = vars.get("NODE_MODEL_OPTIONS",0)
+
+    # define the basic partition paths
+    PARTITIONS= {}
+    PARTITIONS["root"]= "/dev/planetlab/root"
+    PARTITIONS["swap"]= "/dev/planetlab/swap"
+    PARTITIONS["vservers"]= "/dev/planetlab/vservers"
+    # Linux 2.6 mounts LVM with device mapper
+    PARTITIONS["mapper-root"]= "/dev/mapper/planetlab-root"
+    PARTITIONS["mapper-swap"]= "/dev/mapper/planetlab-swap"
+    PARTITIONS["mapper-vservers"]= "/dev/mapper/planetlab-vservers"
+    vars["PARTITIONS"]= PARTITIONS
+
+    log.write( "Opening connection to API server\n" )
+    try:
+        api_inst= xmlrpclib.Server( vars['BOOT_API_SERVER'], verbose=0 )
+    except KeyError, e:
+        raise BootManagerException, \
+              "configuration file does not specify API server URL"
+
+    vars['API_SERVER_INST']= api_inst
+
+    if not __check_boot_version( vars, log ):
+        raise BootManagerException, \
+              "Boot CD version insufficient to run the Boot Manager"
+    else:
+        log.write( "Running on boot cd version: %s\n" %
+                   str(vars['BOOT_CD_VERSION']) )
+
+    BOOT_CD_VERSION= vars['BOOT_CD_VERSION']
+    
+    # In case we are booted with a kernel that does not have the
+    # device mapper code compiled into the kernel.
+    if not os.path.exists("/dev/mapper"):
+        log.write( "Loading support for LVM\n" )
+        utils.sysexec_noerr( "modprobe dm_mod", log )
+
+    # for anything that needs to know we are running under the boot cd and
+    # not the runtime os
+    os.environ['PL_BOOTCD']= "1"
+        
+    return 1
+
+
+
+def __check_boot_version( vars, log ):
+    """
+    identify which version of the boot os we are running on, and whether
+    or not we can run at all on the given version. later, this will be
+    used to identify extra packages to download to enable the boot manager
+    to run on any supported version.
+
+    2.x cds have the version file in /usr/bootme/ID, which looked like:
+    'PlanetLab BootCD v2.0.3'
+
+    3.x cds have the version file in /pl_version, which lookes like:
+    'PlanetLab BootCD 3.0-beta0.3'
+
+    All current known version strings that we support:
+    PlanetLab BootCD 3.0
+    PlanetLab BootCD 3.0-beta0.1
+    PlanetLab BootCD 3.0-beta0.3
+    PlanetLab BootCD v2.0
+    PlanetLab BootCD v2.0.1
+    PlanetLab BootCD v2.0.2
+    PlanetLab BootCD v2.0.3
+
+    Returns 1 if the boot os version is identified and will work
+    to run the boot manager. Two class variables are set:
+    BOOT_OS_MAJOR_VERSION
+    BOOT_OS_MINOR_VERSION
+    version strings with three parts parts to the version ignore the
+    middle number (so 2.0.3 is major 2, minor 3)
+
+    Returns 0 if the boot os is insufficient to run the boot manager
+    """
+
+    try:
+        # check for a 3.x version first
+        version_file= file(BOOT_VERSION_3X_FILE,'r')
+        full_version= string.strip(version_file.read())
+        version_file.close()
+
+        version_parts= string.split(full_version)
+        version= version_parts[-1]
+
+        version_numbers= string.split(version,".")
+        if len(version_numbers) == 2:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[1])
+        else:
+            # for 3.x cds, if there are more than two parts
+            # separated by a ., its one of the beta cds.
+            # hardcode as a 3.0 cd
+            BOOT_OS_MAJOR_VERSION= 3
+            BOOT_OS_MINOR_VERSION= 0
+
+        vars['BOOT_CD_VERSION']= (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION)
+        
+        if (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION) >= \
+               MINIMUM_BOOT_VERSION:
+            return 1
+
+    except IOError, e:
+        pass
+    except IndexError, e:
+        pass
+    except TypeError, e:
+        pass
+
+
+    try:
+        # check for a 2.x version first
+        version_file= file(BOOT_VERSION_2X_FILE,'r')
+        full_version= string.strip(version_file.read())
+        version_file.close()
+
+        version_parts= string.split(full_version)
+        version= version_parts[-1]
+        if version[0] == 'v':
+            version= version[1:]
+
+        version_numbers= string.split(version,".")
+        if len(version_numbers) == 2:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[1])
+        else:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[2])
+
+        vars['BOOT_CD_VERSION']= (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION)
+
+        if (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION) >= \
+           MINIMUM_BOOT_VERSION:
+            return 1
+
+    except IOError, e:
+        pass
+    except IndexError, e:
+        pass
+    except TypeError, e:
+        pass
+
+
+    return 0
+
+
+
+def _create_cciss_dev_entries():
+    def mkccissnod(dev,node):
+        dev = dev + " b 104 %d" % (node)
+       cmd = "mknod /dev/cciss/%s" %dev
+        utils.sysexec_noerr(cmd)
+        node = node + 1
+        return node
+
+    node = 0
+    for i in range(0,16):
+        dev = "c0d%d" % i
+        node = mkccissnod(dev,node)
+        for j in range(1,16):
+            subdev = dev + "p%d" % j
+            node = mkccissnod(subdev,node)
diff --git a/source/steps/InstallBootstrapFS.py b/source/steps/InstallBootstrapFS.py
new file mode 100644 (file)
index 0000000..c157db4
--- /dev/null
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2007 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, string
+import popen2
+import shutil
+import traceback 
+import time
+
+from Exceptions import *
+import utils
+import BootServerRequest
+import BootAPI
+
+
+def Run( vars, log ):
+    """
+    Download core + extensions bootstrapfs tarballs and install on the hard drive
+    
+    Expect the following variables from the store:
+    SYSIMG_PATH          the path where the system image will be mounted
+    PARTITIONS           dictionary of generic part. types (root/swap)
+                         and their associated devices.
+    NODE_ID              the id of this machine
+    
+    Sets the following variables:
+    TEMP_BOOTCD_PATH     where the boot cd is remounted in the temp
+                         path
+    ROOT_MOUNTED         set to 1 when the the base logical volumes
+                         are mounted.
+    """
+
+    log.write( "\n\nStep: Install: bootstrapfs tarball.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError, "NODE_ID"
+
+        VERSION=vars['VERSION'] or 'unknown'
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        # make sure the required partitions exist
+        val= PARTITIONS["root"]
+        val= PARTITIONS["swap"]
+        val= PARTITIONS["vservers"]
+    except KeyError, part:
+        log.write( "Missing partition in PARTITIONS: %s\n" % part )
+        return 0   
+
+    bs_request= BootServerRequest.BootServerRequest(vars)
+    
+    log.write( "turning on swap space\n" )
+    utils.sysexec( "swapon %s" % PARTITIONS["swap"], log )
+
+    # make sure the sysimg dir is present
+    utils.makedirs( SYSIMG_PATH )
+
+    log.write( "mounting root file system\n" )
+    utils.sysexec( "mount -t ext3 %s %s" % (PARTITIONS["root"],SYSIMG_PATH), log )
+
+    log.write( "mounting vserver partition in root file system\n" )
+    utils.makedirs( SYSIMG_PATH + "/vservers" )
+    utils.sysexec( "mount -t ext3 %s %s/vservers" % (PARTITIONS["vservers"],
+                                                     SYSIMG_PATH), log )
+
+    vars['ROOT_MOUNTED']= 1
+
+    # call getNodeFlavour
+    try:
+        node_flavour = BootAPI.call_api_function(vars, "GetNodeFlavour", (NODE_ID,) )
+        nodefamily = node_flavour['nodefamily']
+        extensions = node_flavour['extensions']
+        plain = node_flavour['plain']
+    except:
+        raise BootManagerException ("Could not call GetNodeFlavour - need PLCAPI-5.0")
+    
+    # the 'plain' option is for tests mostly
+    if plain:
+        download_suffix=".tar"
+        uncompress_option=""
+        log.write("Using plain bootstrapfs images\n")
+    else:
+        download_suffix=".tar.bz2"
+        uncompress_option="-j"
+        log.write("Using compressed bootstrapfs images\n")
+
+    log.write ("Using nodefamily=%s\n"%(nodefamily))
+    if not extensions:
+        log.write("Installing only core software\n")
+    else:
+        log.write("Requested extensions %r\n" % extensions)
+    
+    bootstrapfs_names = [ nodefamily ] + extensions
+
+    for name in bootstrapfs_names:
+        tarball = "bootstrapfs-%s%s"%(name,download_suffix)
+        source_file= "/boot/%s" % (tarball)
+        dest_file= "%s/%s" % (SYSIMG_PATH, tarball)
+
+        source_hash_file= "/boot/%s.sha1sum" % (tarball)
+        dest_hash_file= "%s/%s.sha1sum" % (SYSIMG_PATH, tarball)
+
+        time_beg=time.time()
+        log.write( "downloading %s\n" % source_file )
+        # 30 is the connect timeout, 14400 is the max transfer time in
+        # seconds (4 hours)
+        result = bs_request.DownloadFile( source_file, None, None,
+                                         1, 1, dest_file,
+                                         30, 14400)
+        time_end=time.time()
+        duration=int(time_end-time_beg)
+        log.write("Done downloading (%s seconds)\n"%duration)
+        if result:
+            # Download SHA1 checksum file
+            log.write( "downloading sha1sum for %s\n"%source_file)
+            result = bs_request.DownloadFile( source_hash_file, None, None,
+                                         1, 1, dest_hash_file,
+                                         30, 14400)
+            log.write( "verifying sha1sum for %s\n"%source_file)
+            if not utils.check_file_hash(dest_file, dest_hash_file):
+                raise BootManagerException, "FATAL: SHA1 checksum does not match between %s and %s" % (source_file, source_hash_file)
+                
+            
+            time_beg=time.time()
+            log.write( "extracting %s in %s\n" % (dest_file,SYSIMG_PATH) )
+            result = utils.sysexec( "tar -C %s -xpf %s %s" % (SYSIMG_PATH,dest_file,uncompress_option), log )
+            time_end=time.time()
+            duration=int(time_end-time_beg)
+            log.write( "Done extracting (%s seconds)\n"%duration)
+            utils.removefile( dest_file )
+        else:
+            # the main tarball is required
+            if name == nodefamily:
+                raise BootManagerException, "FATAL: Unable to download main tarball %s from server." % \
+                    source_file
+            # for extensions, just print a warning
+            else:
+                log.write("WARNING: tarball for extension %s not found\n"%(name))
+
+    # copy resolv.conf from the base system into our temp dir
+    # so DNS lookups work correctly while we are chrooted
+    log.write( "Copying resolv.conf to temp dir\n" )
+    utils.sysexec( "cp /etc/resolv.conf %s/etc/" % SYSIMG_PATH, log )
+
+    # Copy the boot server certificate(s) and GPG public key to
+    # /usr/boot in the temp dir.
+    log.write( "Copying boot server certificates and public key\n" )
+
+    if os.path.exists("/usr/boot"):
+        utils.makedirs(SYSIMG_PATH + "/usr")
+        shutil.copytree("/usr/boot", SYSIMG_PATH + "/usr/boot")
+    elif os.path.exists("/usr/bootme"):
+        utils.makedirs(SYSIMG_PATH + "/usr/boot")
+        boot_server = file("/usr/bootme/BOOTSERVER").readline().strip()
+        shutil.copy("/usr/bootme/cacert/" + boot_server + "/cacert.pem",
+                    SYSIMG_PATH + "/usr/boot/cacert.pem")
+        file(SYSIMG_PATH + "/usr/boot/boot_server", "w").write(boot_server)
+        shutil.copy("/usr/bootme/pubring.gpg", SYSIMG_PATH + "/usr/boot/pubring.gpg")
+        
+    # For backward compatibility
+    if os.path.exists("/usr/bootme"):
+        utils.makedirs(SYSIMG_PATH + "/mnt/cdrom")
+        shutil.copytree("/usr/bootme", SYSIMG_PATH + "/mnt/cdrom/bootme")
+
+    # Import the GPG key into the RPM database so that RPMS can be verified
+    utils.makedirs(SYSIMG_PATH + "/etc/pki/rpm-gpg")
+    utils.sysexec("gpg --homedir=/root --export --armor" \
+                  " --no-default-keyring --keyring %s/usr/boot/pubring.gpg" \
+                  " >%s/etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab" % (SYSIMG_PATH, SYSIMG_PATH), log)
+    utils.sysexec_chroot(SYSIMG_PATH, "rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab", log)
+
+    # keep a log on the installed hdd
+    stamp=file(SYSIMG_PATH + "/bm-install.txt",'w')
+    now=time.strftime("%Y-%b-%d @ %H:%M %Z", time.gmtime())
+    stamp.write("Hard drive installed by BootManager %s\n"%VERSION)
+    stamp.write("Finished extraction of bootstrapfs on %s\n"%now)
+    stamp.write("Using nodefamily %s\n"%nodefamily)
+    stamp.close()
+
+    return 1
diff --git a/source/steps/InstallInit.py b/source/steps/InstallInit.py
new file mode 100644 (file)
index 0000000..3456343
--- /dev/null
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, string
+
+import utils
+from Exceptions import *
+
+def Run( vars, log ):
+    """
+    Setup the install environment:
+    - unmount anything in the temp/sysimg path (possible from previous
+      aborted installs
+    - create temp directories
+    
+    Expect the following variables from the store:
+    TEMP_PATH         the path to download and store temp files to
+    SYSIMG_DIR        the directory name of the system image
+                      contained in TEMP_PATH
+    PLCONF_DIR        The directory to store the configuration file in
+    
+    Sets the following variables:
+    SYSIMG_PATH       the directory where the system image will be mounted,
+                      (= TEMP_PATH/SYSIMG_DIR)
+    """
+
+    log.write( "\n\nStep: Install: Initializing.\n" )
+    
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError("TEMP_PATH")
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError("SYSIMG_PATH")
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    # if this is a fresh install, then nothing should be
+    # here, but we support restarted installs without rebooting
+    # so who knows what the current state is
+
+    log.write( "Unmounting any previous mounts\n" )
+
+    try:
+        # backwards compat, though, we should never hit this case post PL 3.2
+        os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+        utils.sysexec_chroot_noerr( SYSIMG_PATH, "umount /rcfs", log )
+    except OSError, e:
+        pass
+
+    # NOTE: added /sys and /dev b/c some nodes fail due to this when disk is
+    # nearly full.
+    utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH , log )
+    utils.sysexec_noerr( "umount %s/mnt/cdrom" % SYSIMG_PATH , log )
+    utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH , log )
+    utils.sysexec_noerr( "umount %s/sys" % SYSIMG_PATH , log )
+    utils.sysexec_noerr( "umount %s/dev" % SYSIMG_PATH , log )
+    utils.sysexec_noerr( "umount %s" % SYSIMG_PATH , log )
+    vars['ROOT_MOUNTED']= 0
+
+#    log.write( "Removing any old files, directories\n" )
+#    utils.removedir( TEMP_PATH )
+    
+    log.write( "Cleaning up any existing PlanetLab config files\n" )
+    try:
+        flist = os.listdir( PLCONF_DIR)
+        for file in flist:
+            utils.removedir( file )
+    except OSError:
+        pass
+    
+    # create the temp path and sysimg path. since sysimg
+    # path is in temp path, both are created here
+    log.write( "Creating system image path\n" )
+    utils.makedirs( SYSIMG_PATH )
+
+    return 1
diff --git a/source/steps/InstallPartitionDisks.py b/source/steps/InstallPartitionDisks.py
new file mode 100644 (file)
index 0000000..b469330
--- /dev/null
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys
+import string
+import popen2
+import time
+
+from Exceptions import *
+import utils
+import BootServerRequest
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+    """
+    Setup the block devices for install, partition them w/ LVM
+    
+    Expect the following variables from the store:
+    INSTALL_BLOCK_DEVICES    list of block devices to install onto
+    TEMP_PATH                somewhere to store what we need to run
+    ROOT_SIZE                the size of the root logical volume
+    SWAP_SIZE                the size of the swap partition
+    """
+
+    log.write( "\n\nStep: Install: partitioning disks.\n" )
+        
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        INSTALL_BLOCK_DEVICES= vars["INSTALL_BLOCK_DEVICES"]
+        if( len(INSTALL_BLOCK_DEVICES) == 0 ):
+            raise ValueError, "INSTALL_BLOCK_DEVICES is empty"
+
+        ROOT_SIZE= vars["ROOT_SIZE"]
+        if ROOT_SIZE == "" or ROOT_SIZE == 0:
+            raise ValueError, "ROOT_SIZE invalid"
+
+        SWAP_SIZE= vars["SWAP_SIZE"]
+        if SWAP_SIZE == "" or SWAP_SIZE == 0:
+            raise ValueError, "SWAP_SIZE invalid"
+
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+        if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK:
+            VSERVERS_SIZE= "-1"
+            if "VSERVERS_SIZE" in vars:
+                VSERVERS_SIZE= vars["VSERVERS_SIZE"]
+                if VSERVERS_SIZE == "" or VSERVERS_SIZE == 0:
+                    raise ValueError, "VSERVERS_SIZE"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    bs_request= BootServerRequest.BootServerRequest(vars)
+
+    
+    # disable swap if its on
+    utils.sysexec_noerr( "swapoff %s" % PARTITIONS["swap"], log )
+
+    # shutdown and remove any lvm groups/volumes
+    utils.sysexec_noerr( "vgscan", log )
+    utils.sysexec_noerr( "vgchange -ay", log )        
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["root"], log )
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["swap"], log )
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["vservers"], log )
+    utils.sysexec_noerr( "vgchange -an", log )
+    utils.sysexec_noerr( "vgremove -f planetlab", log )
+
+    log.write( "Running vgscan for devices\n" )
+    utils.sysexec_noerr( "vgscan", log )
+    
+    used_devices= []
+
+    INSTALL_BLOCK_DEVICES.sort()
+    for device in INSTALL_BLOCK_DEVICES:
+
+        if single_partition_device( device, vars, log ):
+            if (len(used_devices) > 0 and
+                (vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK)):
+                log.write( "Running in raw disk mode, not using %s.\n" % device )
+            else:
+                used_devices.append( device )
+                log.write( "Successfully initialized %s\n" % device )
+        else:
+            log.write( "Unable to partition %s, not using it.\n" % device )
+            continue
+
+    # list of devices to be used with vgcreate
+    vg_device_list= ""
+
+    # get partitions
+    partitions = []
+    for device in used_devices:
+        part_path= get_partition_path_from_device( device, vars, log )
+        partitions.append(part_path)
+   
+    # create raid partition
+    raid_partition = create_raid_partition(partitions, vars, log)
+    if raid_partition != None:
+        partitions = [raid_partition]      
+    log.write("PARTITIONS %s\n" %  str(partitions)) 
+    # initialize the physical volumes
+    for part_path in partitions:
+        if not create_lvm_physical_volume( part_path, vars, log ):
+            raise BootManagerException, "Could not create lvm physical volume " \
+                  "on partition %s" % part_path
+        vg_device_list = vg_device_list + " " + part_path
+
+    # create an lvm volume group
+    utils.sysexec( "vgcreate -s32M planetlab %s" % vg_device_list, log)
+
+    # create swap logical volume
+    utils.sysexec( "lvcreate -L%s -nswap planetlab" % SWAP_SIZE, log )
+
+    # create root logical volume
+    utils.sysexec( "lvcreate -L%s -nroot planetlab" % ROOT_SIZE, log )
+
+    if vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK and VSERVERS_SIZE != "-1":
+        utils.sysexec( "lvcreate -L%s -nvservers planetlab" % VSERVERS_SIZE, log )
+        remaining_extents= get_remaining_extents_on_vg( vars, log )
+        utils.sysexec( "lvcreate -l%s -nrawdisk planetlab" % remaining_extents, log )
+    else:
+        # create vservers logical volume with all remaining space
+        # first, we need to get the number of remaining extents we can use
+        remaining_extents= get_remaining_extents_on_vg( vars, log )
+        
+        utils.sysexec( "lvcreate -l%s -nvservers planetlab" % remaining_extents, log )
+
+    # activate volume group (should already be active)
+    #utils.sysexec( TEMP_PATH + "vgchange -ay planetlab", log )
+
+    # make swap
+    utils.sysexec( "mkswap -f %s" % PARTITIONS["swap"], log )
+
+    # check if badhd option has been set
+    option = ''
+    txt = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.BADHD:
+        option = '-c'
+        txt = " with bad block search enabled, which may take a while"
+    
+    # filesystems partitions names and their corresponding
+    # reserved-blocks-percentages
+    filesystems = {"root":5,"vservers":0}
+
+    # make the file systems
+    for fs in filesystems.keys():
+        # get the reserved blocks percentage
+        rbp = filesystems[fs]
+        devname = PARTITIONS[fs]
+        log.write("formatting %s partition (%s)%s.\n" % (fs,devname,txt))
+        utils.sysexec( "mkfs.ext2 -q %s -m %d -j %s" % (option,rbp,devname), log )
+
+    # disable time/count based filesystems checks
+    for filesystem in ("root","vservers"):
+        utils.sysexec_noerr( "tune2fs -c -1 -i 0 %s" % PARTITIONS[filesystem], log)
+
+    # save the list of block devices in the log
+    log.write( "Block devices used (in lvm): %s\n" % repr(used_devices))
+
+    # list of block devices used may be updated
+    vars["INSTALL_BLOCK_DEVICES"]= used_devices
+
+    return 1
+
+
+import parted
+def single_partition_device( device, vars, log ):
+    """
+    initialize a disk by removing the old partition tables,
+    and creating a new single partition that fills the disk.
+
+    return 1 if sucessful, 0 otherwise
+    """
+
+    # two forms, depending on which version of pyparted we have
+    # v1 does not have a 'version' method
+    # v2 and above does, but to make it worse, 
+    # parted-3.4 on f14 has parted.version broken and raises SystemError
+    try:
+        parted.version()
+        return single_partition_device_2_x (device, vars, log)
+    except AttributeError:
+        # old parted does not have version at all
+        return single_partition_device_1_x (device, vars, log)
+    except SystemError:
+        # let's assume this is >=2
+        return single_partition_device_2_x (device, vars, log)
+    except:
+        raise
+
+def single_partition_device_1_x ( device, vars, log):
+    
+    lvm_flag= parted.partition_flag_get_by_name('lvm')
+    
+    try:
+        log.write("Using pyparted 1.x\n")
+        # wipe the old partition table
+        utils.sysexec( "dd if=/dev/zero of=%s bs=512 count=1" % device, log )
+
+        # get the device
+        dev= parted.PedDevice.get(device)
+
+        # create a new partition table
+        disk= dev.disk_new_fresh(parted.disk_type_get("msdos"))
+
+        # create one big partition on each block device
+        constraint= dev.constraint_any()
+
+        new_part= disk.partition_new(
+            parted.PARTITION_PRIMARY,
+            parted.file_system_type_get("ext2"),
+            0, 1 )
+
+        # make it an lvm partition
+        new_part.set_flag(lvm_flag,1)
+
+        # actually add the partition to the disk
+        disk.add_partition(new_part, constraint)
+
+        disk.maximize_partition(new_part,constraint)
+
+        disk.commit()
+        del disk
+            
+    except BootManagerException, e:
+        log.write( "BootManagerException while running: %s\n" % str(e) )
+        return 0
+
+    except parted.error, e:
+        log.write( "parted exception while running: %s\n" % str(e) )
+        return 0
+                   
+    return 1
+
+
+
+def single_partition_device_2_x ( device, vars, log):
+    try:
+        log.write("Using pyparted 2.x\n")
+
+        # Thierry june 2012 -- for disks larger than 2TB
+        # calling this with part_type='msdos' would fail at the maximizePartition stage
+        # create a new partition table
+        def partition_table (device, part_type, fs_type):
+            # wipe the old partition table
+            utils.sysexec( "dd if=/dev/zero of=%s bs=512 count=1" % device, log )
+            # get the device
+            dev= parted.Device(device)
+            disk = parted.freshDisk(dev,part_type)
+            # create one big partition on each block device
+            constraint= parted.constraint.Constraint (device=dev)
+            geometry = parted.geometry.Geometry (device=dev, start=0, end=1)
+            fs = parted.filesystem.FileSystem (type=fs_type,geometry=geometry)
+            new_part= parted.partition.Partition (disk, type=parted.PARTITION_NORMAL,
+                                                  fs=fs, geometry=geometry)
+            # make it an lvm partition
+            new_part.setFlag(parted.PARTITION_LVM)
+            # actually add the partition to the disk
+            disk.addPartition(new_part, constraint)
+            disk.maximizePartition(new_part,constraint)
+            disk.commit()
+            log.write ("Current disk for %s - partition type %s\n%s\n"%(device,part_type,disk))
+            log.write ("Current dev for %s\n%s\n"%(device,dev))
+            del disk
+
+        try:
+            partition_table (device, 'msdos', 'ext2')
+        except:
+            partition_table (device, 'gpt', 'ext2')
+
+    except Exception, e:
+        log.write( "Exception inside single_partition_device_2_x : %s\n" % str(e) )
+        import traceback
+        traceback.print_exc(file=log)
+        return 0
+                   
+    return 1
+
+
+
+def create_lvm_physical_volume( part_path, vars, log ):
+    """
+    make the specificed partition a lvm physical volume.
+
+    return 1 if successful, 0 otherwise.
+    """
+
+    try:
+        # again, wipe any old data, this time on the partition
+        utils.sysexec( "dd if=/dev/zero of=%s bs=512 count=1" % part_path, log )
+        ### patch Thierry Parmentelat, required on some hardware
+        import time
+        time.sleep(1)
+        utils.sysexec( "pvcreate -ffy %s" % part_path, log )
+    except BootManagerException, e:
+        log.write( "create_lvm_physical_volume failed.\n" )
+        return 0
+
+    return 1
+
+
+def create_raid_partition(partitions, vars, log):
+    """
+    create raid array using specified partitions.  
+    """ 
+    raid_part = None
+    raid_enabled = False
+    node_tags = BootAPI.call_api_function( vars, "GetNodeTags",
+                                        ({'node_id': vars['NODE_ID']},))
+    for node_tag in node_tags:
+        if node_tag['tagname'] == 'raid_enabled' and \
+           node_tag['value'] == '1':
+            raid_enabled = True
+            break
+    if not raid_enabled:
+        return raid_part
+
+    try:
+        log.write( "Software raid enabled.\n" )
+        # wipe everything
+        utils.sysexec_noerr("mdadm --stop /dev/md0", log)
+        time.sleep(1)
+        for part_path in partitions:
+            utils.sysexec_noerr("mdadm --zero-superblock %s " % part_path, log)
+
+        # assume each partiton is on a separate disk
+        num_parts = len(partitions)
+        if num_parts < 2:
+            log.write( "Not enough disks for raid. Found: %s\n" % partitions )
+            raise BootManagerException("Not enough disks for raid. Found: %s\n" % partitions)  
+        if num_parts == 2:
+            lvl = 1
+        else:
+            lvl = 5   
+        
+        # make the array
+        part_list = " ".join(partitions)
+        raid_part = "/dev/md0"
+        cmd = "mdadm --create %(raid_part)s --chunk=128 --level=raid%(lvl)s " % locals() + \
+              "--raid-devices=%(num_parts)s %(part_list)s" % locals()
+        utils.sysexec(cmd, log)        
+
+    except BootManagerException, e:
+        log.write("create_raid_partition failed.\n")
+        raid_part = None
+
+    return raid_part  
+
+
+def get_partition_path_from_device( device, vars, log ):
+    """
+    given a device, return the path of the first partition on the device
+    """
+
+    # those who wrote the cciss driver just had to make it difficult
+    cciss_test= "/dev/cciss"
+    if device[:len(cciss_test)] == cciss_test:
+        part_path= device + "p1"
+    else:
+        part_path= device + "1"
+
+    return part_path
+
+
+
+def get_remaining_extents_on_vg( vars, log ):
+    """
+    return the free amount of extents on the planetlab volume group
+    """
+    
+    c_stdout, c_stdin = popen2.popen2("vgdisplay -c planetlab")
+    result= string.strip(c_stdout.readline())
+    c_stdout.close()
+    c_stdin.close()
+    remaining_extents= string.split(result,":")[15]
+    
+    return remaining_extents
diff --git a/source/steps/InstallUninitHardware.py b/source/steps/InstallUninitHardware.py
new file mode 100644 (file)
index 0000000..6497081
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os
+
+from Exceptions import *
+import utils
+
+
+
+def Run( vars, log ):
+    """
+    Unitializes hardware:
+    - unmount everything mounted during install, except the
+    /dev/planetlab/root and /dev/planetlab/vservers. This includes
+    calling swapoff for /dev/planetlab/swap.
+
+    Except the following variables from the store:
+    TEMP_PATH         the path to download and store temp files to
+    SYSIMG_PATH       the path where the system image will be mounted
+                      (always starts with TEMP_PATH)
+    PARTITIONS        dictionary of generic part. types (root/swap)
+                      and their associated devices.
+
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Shutting down installer.\n" )
+
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        # make sure the required partitions exist
+        val= PARTITIONS["root"]
+        val= PARTITIONS["swap"]
+        val= PARTITIONS["vservers"]
+    except KeyError, part:
+        raise BootManagerException, "Missing partition in PARTITIONS: %s\n" % part
+
+    try:
+        # backwards compat, though, we should never hit this case post PL 3.2
+        os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+        utils.sysexec_chroot_noerr( SYSIMG_PATH, "umount /rcfs", log )
+    except OSError, e:
+        pass
+            
+    log.write( "Shutting down swap\n" )
+    utils.sysexec( "swapoff %s" % PARTITIONS["swap"], log )
+
+    return 1
diff --git a/source/steps/InstallWriteConfig.py b/source/steps/InstallWriteConfig.py
new file mode 100644 (file)
index 0000000..d8b5a6e
--- /dev/null
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, string
+
+from Exceptions import *
+import utils
+import systeminfo
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+
+    """
+    Writes out the following configuration files for the node:
+    /etc/fstab
+    /etc/resolv.conf (if applicable)
+    /etc/ssh/ssh_host_key
+    /etc/ssh/ssh_host_rsa_key
+    /etc/ssh/ssh_host_dsa_key
+    
+    Expect the following variables from the store:
+    VERSION                 the version of the install
+    SYSIMG_PATH             the path where the system image will be mounted
+                            (always starts with TEMP_PATH)
+    PARTITIONS              dictionary of generic part. types (root/swap)
+                            and their associated devices.
+    PLCONF_DIR              The directory to store the configuration file in
+    INTERFACE_SETTINGS  A dictionary of the values from the network
+                                configuration file
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Writing configuration files.\n" )
+    
+    # make sure we have the variables we need
+    try:
+        VERSION= vars["VERSION"]
+        if VERSION == "":
+            raise ValueError, "VERSION"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+        INTERFACE_SETTINGS= vars["INTERFACE_SETTINGS"]
+        if INTERFACE_SETTINGS == "":
+            raise ValueError, "INTERFACE_SETTINGS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    log.write( "Setting local time to UTC\n" )
+    utils.sysexec_chroot( SYSIMG_PATH,
+        "ln -sf /usr/share/zoneinfo/UTC /etc/localtime", log )
+
+    log.write( "Enabling ntp at boot\n" )
+    utils.sysexec_chroot( SYSIMG_PATH, "chkconfig ntpd on", log )
+
+    log.write( "Creating system directory %s\n" % PLCONF_DIR )
+    if not utils.makedirs( "%s/%s" % (SYSIMG_PATH,PLCONF_DIR) ):
+        log.write( "Unable to create directory\n" )
+        return 0
+
+    log.write( "Writing system /etc/fstab\n" )
+    fstab= file( "%s/etc/fstab" % SYSIMG_PATH, "w" )
+    fstab.write( "%s           none        swap      sw        0 0\n" % \
+                 PARTITIONS["mapper-swap"] )
+    fstab.write( "%s           /           ext3      defaults  1 1\n" % \
+                 PARTITIONS["mapper-root"] )
+    fstab.write( "%s           /vservers   ext3      tagxid,defaults  1 2\n" % \
+                 PARTITIONS["mapper-vservers"] )
+    fstab.write( "none         /proc       proc      defaults  0 0\n" )
+    fstab.write( "none         /dev/shm    tmpfs     defaults  0 0\n" )
+    fstab.write( "none         /dev/pts    devpts    defaults  0 0\n" )
+    fstab.close()
+
+    log.write( "Writing system /etc/issue\n" )
+    issue= file( "%s/etc/issue" % SYSIMG_PATH, "w" )
+    issue.write( "PlanetLab Node: \\n\n" )
+    issue.write( "Kernel \\r on an \\m\n" )
+    issue.write( "http://www.planet-lab.org\n\n" )
+    issue.close()
+
+    log.write( "Setting up authentication (non-ssh)\n" )
+    utils.sysexec_chroot( SYSIMG_PATH, "authconfig --nostart --kickstart --enablemd5 " \
+                   "--enableshadow", log )
+    utils.sysexec( "sed -e 's/^root\:\:/root\:*\:/g' " \
+                   "%s/etc/shadow > %s/etc/shadow.new" % \
+                   (SYSIMG_PATH,SYSIMG_PATH), log )
+    utils.sysexec_chroot( SYSIMG_PATH, "mv " \
+                   "/etc/shadow.new /etc/shadow", log )
+    utils.sysexec_chroot( SYSIMG_PATH, "chmod 400 /etc/shadow", log )
+
+    # if we are setup with dhcp, copy the current /etc/resolv.conf into
+    # the system image so we can run programs inside that need network access
+    method= ""
+    try:
+        method= vars['INTERFACE_SETTINGS']['method']
+    except:
+        pass
+    
+    if method == "dhcp":
+        utils.sysexec( "cp /etc/resolv.conf %s/etc/" % SYSIMG_PATH, log )
+
+    log.write( "Writing node install version\n" )
+    utils.makedirs( "%s/etc/planetlab" % SYSIMG_PATH )
+    ver= file( "%s/etc/planetlab/install_version" % SYSIMG_PATH, "w" )
+    ver.write( "%s\n" % VERSION )
+    ver.close()
+
+    log.write( "Creating ssh host keys\n" )
+    key_gen_prog= "/usr/bin/ssh-keygen"
+
+    log.write( "Generating SSH1 RSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_key"
+    utils.sysexec_chroot( SYSIMG_PATH, "%s -q -t rsa1 -f %s -C '' -N ''" %
+                   (key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+    
+    log.write( "Generating SSH2 RSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_rsa_key"
+    utils.sysexec_chroot( SYSIMG_PATH, "%s -q -t rsa -f %s -C '' -N ''" %
+                   (key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+    
+    log.write( "Generating SSH2 DSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_dsa_key"
+    utils.sysexec_chroot( SYSIMG_PATH, "%s -q -t dsa -f %s -C '' -N ''" %
+                   (key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+
+    return 1
diff --git a/source/steps/MakeInitrd.py b/source/steps/MakeInitrd.py
new file mode 100644 (file)
index 0000000..4564dfe
--- /dev/null
@@ -0,0 +1,77 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+
+from Exceptions import *
+import utils
+import systeminfo
+
+# for centos5.3
+# 14:42:27(UTC) No module dm-mem-cache found for kernel 2.6.22.19-vs2.3.0.34.33.onelab, aborting.
+# http://kbase.redhat.com/faq/docs/DOC-16528;jsessionid=7E984A99DE8DB094D9FB08181C71717C.ab46478d
+def bypassRaidIfNeeded(sysimg_path, log):
+    try:
+        [ a,b,c,d]=file('%s/etc/redhat-release'%sysimg_path).readlines()[0].strip().split()
+        if a !='CentOS': return
+        [major,minor]=[int(x) for x in c.split('.')]
+        if minor >= 3:
+            utils.sysexec_noerr('echo "DMRAID=no" >> %s/etc/sysconfig/mkinitrd/noraid' % sysimg_path , log )
+            utils.sysexec_noerr('chmod 755 %s/etc/sysconfig/mkinitrd/noraid' % sysimg_path, log )
+    except:
+        pass
+            
+        
+def Run( vars, log ):
+    """
+    Rebuilds the system initrd, on first install or in case the
+    hardware changed.
+    """
+
+    log.write( "\n\nStep: Rebuilding initrd\n" )
+    
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    # mkinitrd needs /dev and /proc to do the right thing.
+    # /proc is already mounted, so bind-mount /dev here
+    # xxx tmp - trying to work around the f14 case:
+    # check that /dev/ is mounted with devtmpfs
+    # tmp - sysexec_noerr not returning what one would expect
+    # if utils.sysexec_noerr ("grep devtmpfs /proc/mounts") != 0:
+    utils.sysexec_noerr("mount -t devtmpfs none /dev")
+    utils.sysexec("mount -o bind /dev %s/dev" % SYSIMG_PATH)
+    utils.sysexec("mount -t sysfs none %s/sys" % SYSIMG_PATH)
+
+    initrd, kernel_version= systeminfo.getKernelVersion(vars,log)
+    try:
+        utils.removefile( "%s/boot/%s" % (SYSIMG_PATH, initrd) )
+    except:
+        print "%s/boot/%s is already removed" % (SYSIMG_PATH, initrd)
+
+    # hack for CentOS 5.3
+    bypassRaidIfNeeded(SYSIMG_PATH , log )
+    # specify ext3 for fedora14 and above as their default fs is ext4
+    utils.sysexec_chroot( SYSIMG_PATH, "mkinitrd -v --with=ext3 --allow-missing /boot/initrd-%s.img %s" % \
+               (kernel_version, kernel_version), log )
+
+    utils.sysexec_noerr("umount %s/sys" % SYSIMG_PATH , log )
+    utils.sysexec_noerr("umount %s/dev" % SYSIMG_PATH , log)
+
diff --git a/source/steps/ReadNodeConfiguration.py b/source/steps/ReadNodeConfiguration.py
new file mode 100644 (file)
index 0000000..aadef79
--- /dev/null
@@ -0,0 +1,624 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import sys, os, traceback
+import string
+import socket
+import re
+
+import utils
+from Exceptions import *
+import BootServerRequest
+import BootAPI
+import notify_messages
+import UpdateRunLevelWithPLC
+
+
+# two possible names of the configuration files
+NEW_CONF_FILE_NAME= "plnode.txt"
+OLD_CONF_FILE_NAME= "planet.cnf"
+
+
+def Run( vars, log ):   
+    """
+    read the machines node configuration file, which contains
+    the node key and the node_id for this machine.
+    
+    these files can exist in several different locations with
+    several different names. Below is the search order:
+
+    filename      floppy   flash    ramdisk    cd
+    plnode.txt      1        2      4 (/)      5 (/usr/boot), 6 (/usr)
+    planet.cnf      3
+
+    The locations will be searched in the above order, plnode.txt
+    will be checked first, then planet.cnf. Flash devices will only
+    be searched on 3.0 cds.
+
+    Because some of the earlier
+    boot cds don't validate the configuration file (which results
+    in a file named /tmp/planet-clean.cnf), and some do, lets
+    bypass this, and mount and attempt to read in the conf
+    file ourselves. If it doesn't exist, we cannot continue, and a
+    BootManagerException will be raised. If the configuration file is found
+    and read, return 1.
+
+    Expect the following variables from the store:
+    
+    Sets the following variables from the configuration file:
+    WAS_NODE_ID_IN_CONF         Set to 1 if the node id was in the conf file
+    WAS_NODE_KEY_IN_CONF         Set to 1 if the node key was in the conf file
+    NONE_ID                     The db node_id for this machine
+    NODE_KEY                    The key for this node
+    INTERFACE_SETTINGS            A dictionary of the values from the network
+                                configuration file. keys set:
+                                   method               IP_METHOD
+                                   ip                   IP_ADDRESS
+                                   mac                  NET_DEVICE       
+                                   gateway              IP_GATEWAY
+                                   network              IP_NETADDR
+                                   broadcast            IP_BROADCASTADDR
+                                   netmask              IP_NETMASK
+                                   dns1                 IP_DNS1
+                                   dns2                 IP_DNS2
+                                   hostname             HOST_NAME
+                                   domainname           DOMAIN_NAME
+                                -- wlan oriented --
+                                   ssid                 WLAN_SSID
+                                   iwconfig             WLAN_IWCONFIG
+
+    the mac address is read from the machine unless it exists in the
+    configuration file.
+    """
+
+    log.write( "\n\nStep: Reading node configuration file.\n" )
+
+
+    # make sure we have the variables we need
+
+    INTERFACE_SETTINGS= {}
+    INTERFACE_SETTINGS['method']= "dhcp"
+    INTERFACE_SETTINGS['ip']= ""
+    INTERFACE_SETTINGS['mac']= ""
+    INTERFACE_SETTINGS['gateway']= ""
+    INTERFACE_SETTINGS['network']= ""
+    INTERFACE_SETTINGS['broadcast']= ""
+    INTERFACE_SETTINGS['netmask']= ""
+    INTERFACE_SETTINGS['dns1']= ""
+    INTERFACE_SETTINGS['dns2']= ""
+    INTERFACE_SETTINGS['hostname']= "localhost"
+    INTERFACE_SETTINGS['domainname']= "localdomain"
+    vars['INTERFACE_SETTINGS']= INTERFACE_SETTINGS
+
+    vars['NODE_ID']= 0
+    vars['NODE_KEY']= ""
+
+    vars['WAS_NODE_ID_IN_CONF']= 0
+    vars['WAS_NODE_KEY_IN_CONF']= 0
+
+    vars['DISCONNECTED_OPERATION']= ''
+
+    # for any devices that need to be mounted to get the configuration
+    # file, mount them here.
+    mount_point= "/tmp/conffilemount"
+    utils.makedirs( mount_point )
+
+    old_conf_file_contents= None
+    conf_file_contents= None
+    
+    
+    # 1. check the regular floppy device
+    log.write( "Checking standard floppy disk for plnode.txt file.\n" )
+
+    log.write( "Mounting /dev/fd0 on %s\n" % mount_point )
+    utils.sysexec_noerr( "mount -o ro -t ext2,msdos /dev/fd0 %s " \
+                         % mount_point, log )
+
+    conf_file_path= "%s/%s" % (mount_point,NEW_CONF_FILE_NAME)
+    
+#    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access( conf_file_path, os.R_OK ):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+
+        utils.sysexec_noerr( "umount %s" % mount_point, log )
+        if __parse_configuration_file( vars, log, conf_file_contents):
+            log.write("ReadNodeConfiguration: [1] using %s from floppy /dev/fd0\n"%NEW_CONF_FILE_NAME)
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "on floppy, but was unable to parse it." )
+
+
+    # try the old file name, same device. its actually number 3 on the search
+    # order, but do it now to save mounting/unmounting the disk twice.
+    # try to parse it later...
+    conf_file_path= "%s/%s" % (mount_point,OLD_CONF_FILE_NAME)
+
+# this message really does not convey any useful information
+#    log.write( "Checking for existence of %s (used later)\n" % conf_file_path )
+    if os.access( conf_file_path, os.R_OK ):
+        try:
+            old_conf_file= file(conf_file_path,"r")
+            old_conf_file_contents= old_conf_file.read()
+            old_conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+        
+    utils.sysexec_noerr( "umount %s" % mount_point, log )
+
+    # 2. check flash devices on 3.0 based cds
+    log.write( "Checking flash devices for plnode.txt file.\n" )
+
+    # this is done the same way the 3.0 cds do it, by attempting
+    # to mount and sd*1 devices that are removable
+    devices= os.listdir("/sys/block/")
+
+    for device in devices:
+        if device[:2] != "sd":
+            log.write( "Skipping non-scsi device %s\n" % device )
+            continue
+
+        # test removable
+        removable_file_path= "/sys/block/%s/removable" % device
+        try:
+            removable= int(file(removable_file_path,"r").read().strip())
+        except ValueError, e:
+            continue
+        except IOError, e:
+            continue
+
+        if not removable:
+            log.write( "Skipping non-removable device %s\n" % device )
+            continue
+
+        log.write( "Checking removable device %s\n" % device )
+
+        partitions= file("/proc/partitions", "r")
+        for line in partitions:
+            found_file= 0
+            parsed_file= 0
+            
+            if not re.search("%s[0-9]*$" % device, line):
+                continue
+
+            try:
+                # major minor  #blocks  name
+                parts= string.split(line)
+
+                # ok, try to mount it and see if we have a conf file.
+                full_device= "/dev/%s" % parts[3]
+            except IndexError, e:
+                log.write( "Incorrect /proc/partitions line:\n%s\n" % line )
+                continue
+
+            log.write( "Mounting %s on %s\n" % (full_device,mount_point) )
+            try:
+                utils.sysexec( "mount -o ro -t ext2,msdos %s %s" \
+                               % (full_device,mount_point), log )
+            except BootManagerException, e:
+                log.write( "Unable to mount, trying next partition\n" )
+                continue
+
+            conf_file_path= "%s/%s" % (mount_point,NEW_CONF_FILE_NAME)
+
+            log.write( "Checking for existence of %s\n" % conf_file_path )
+            if os.access( conf_file_path, os.R_OK ):
+                try:
+                    conf_file= file(conf_file_path,"r")
+                    conf_file_contents= conf_file.read()
+                    conf_file.close()
+                    found_file= 1
+                    log.write( "Read in contents of file %s\n" % \
+                               conf_file_path )
+
+                    if __parse_configuration_file( vars, log, conf_file_contents):
+                        parsed_file= 1
+                except IOError, e:
+                    log.write( "Unable to read file %s\n" % conf_file_path )
+
+            utils.sysexec_noerr( "umount %s" % mount_point, log )
+            if found_file:
+                if parsed_file:
+                    log.write("ReadNodeConfiguration: [2] using %s from partition %s\n"%\
+                                  (NEW_CONF_FILE_NAME,full_device))
+                    return 1
+                else:
+                    raise BootManagerException( \
+                        "Found configuration file on %s, but was unable to parse it."%full_device)
+
+
+            
+    # 3. check standard floppy disk for old file name planet.cnf
+    log.write( "Checking standard floppy disk for planet.cnf file (for legacy nodes).\n" )
+
+    if old_conf_file_contents:
+        if __parse_configuration_file( vars, log, old_conf_file_contents):
+            log.write("ReadNodeConfiguration: [3] using %s from floppy /dev/fd0\n"%OLD_CONF_FILE_NAME)
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file planet.cnf " \
+                                        "on floppy, but was unable to parse it." )
+
+
+    # 4. check for plnode.txt in / (ramdisk)
+    log.write( "Checking / (ramdisk) for plnode.txt file.\n" )
+    
+    conf_file_path= "/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            log.write("ReadNodeConfiguration: [4] using %s from ramdisk\n"%NEW_CONF_FILE_NAME)
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /, but was unable to parse it.")
+
+    
+    # 5. check for plnode.txt in /usr/boot (mounted already)
+    log.write( "Checking /usr/boot (cd) for plnode.txt file.\n" )
+    
+    conf_file_path= "/usr/boot/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            log.write("ReadNodeConfiguration: [5] using %s from CD in /usr/boot\n"%NEW_CONF_FILE_NAME)
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /usr/boot, but was unable to parse it.")
+
+
+
+    # 6. check for plnode.txt in /usr (mounted already)
+    log.write( "Checking /usr (cd) for plnode.txt file.\n" )
+    
+    conf_file_path= "/usr/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass    
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            log.write("ReadNodeConfiguration: [6] using %s from /usr\n"%NEW_CONF_FILE_NAME)
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /usr, but was unable to parse it.")
+
+
+    raise BootManagerException, "Unable to find and read a node configuration file."
+    
+
+
+
+def __parse_configuration_file( vars, log, file_contents ):
+    """
+    parse a configuration file, set keys in var INTERFACE_SETTINGS
+    in vars (see comment for function ReadNodeConfiguration). this
+    also reads the mac address from the machine if successful parsing
+    of the configuration file is completed.
+    """
+
+    INTERFACE_SETTINGS= vars["INTERFACE_SETTINGS"]
+    
+    if file_contents is None:
+        log.write( "__parse_configuration_file called with no file contents\n" )
+        return 0
+    
+    try:
+        line_num= 0
+        for line in file_contents.split("\n"):
+
+            line_num = line_num + 1
+            
+            # if its a comment or a whitespace line, ignore
+            if line[:1] == "#" or string.strip(line) == "":
+                continue
+
+            # file is setup as name="value" pairs
+            parts= string.split(line, "=", 1)
+
+            name= string.strip(parts[0])
+            value= string.strip(parts[1])
+
+            # make sure value starts and ends with
+            # single or double quotes
+            quotes= value[0] + value[len(value)-1]
+            if quotes != "''" and quotes != '""':
+                log.write( "Invalid line %d in configuration file:\n" % line_num )
+                log.write( line + "\n" )
+                return 0
+
+            # get rid of the quotes around the value
+            value= string.strip(value[1:len(value)-1])
+
+            if name == "NODE_ID":
+                try:
+                    vars['NODE_ID']= int(value)
+                    vars['WAS_NODE_ID_IN_CONF']= 1
+                except ValueError, e:
+                    log.write( "Non-numeric node_id in configuration file.\n" )
+                    return 0
+
+            if name == "NODE_KEY":
+                vars['NODE_KEY']= value
+                vars['WAS_NODE_KEY_IN_CONF']= 1
+
+            if name == "IP_METHOD":
+                value= string.lower(value)
+                if value != "static" and value != "dhcp":
+                    log.write( "Invalid IP_METHOD in configuration file:\n" )
+                    log.write( line + "\n" )
+                    return 0
+                INTERFACE_SETTINGS['method']= value.strip()
+
+            if name == "IP_ADDRESS":
+                INTERFACE_SETTINGS['ip']= value.strip()
+
+            if name == "IP_GATEWAY":
+                INTERFACE_SETTINGS['gateway']= value.strip()
+
+            if name == "IP_NETMASK":
+                INTERFACE_SETTINGS['netmask']= value.strip()
+
+            if name == "IP_NETADDR":
+                INTERFACE_SETTINGS['network']= value.strip()
+
+            if name == "IP_BROADCASTADDR":
+                INTERFACE_SETTINGS['broadcast']= value.strip()
+
+            if name == "IP_DNS1":
+                INTERFACE_SETTINGS['dns1']= value.strip()
+
+            if name == "IP_DNS2":
+                INTERFACE_SETTINGS['dns2']= value.strip()
+
+            if name == "HOST_NAME":
+                INTERFACE_SETTINGS['hostname']= string.lower(value)
+
+            if name == "DOMAIN_NAME":
+                INTERFACE_SETTINGS['domainname']= string.lower(value)
+
+            if name == "NET_DEVICE":
+                INTERFACE_SETTINGS['mac']= string.upper(value)
+
+            if name == "DISCONNECTED_OPERATION":
+                vars['DISCONNECTED_OPERATION']= value.strip()
+
+    except IndexError, e:
+        log.write( "Unable to parse configuration file\n" )
+        return 0
+
+    # now if we are set to dhcp, clear out any fields
+    # that don't make sense
+    if INTERFACE_SETTINGS["method"] == "dhcp":
+        INTERFACE_SETTINGS["ip"]= ""
+        INTERFACE_SETTINGS["gateway"]= ""     
+        INTERFACE_SETTINGS["netmask"]= ""
+        INTERFACE_SETTINGS["network"]= ""
+        INTERFACE_SETTINGS["broadcast"]= ""
+        INTERFACE_SETTINGS["dns1"]= ""
+        INTERFACE_SETTINGS["dns2"]= ""
+
+    log.write("Successfully read and parsed node configuration file.\n" )
+
+    # if the mac wasn't specified, read it in from the system.
+    if INTERFACE_SETTINGS["mac"] == "":
+        device= "eth0"
+        mac_addr= utils.get_mac_from_interface(device)
+
+        if mac_addr is None:
+            log.write( "Could not get mac address for device eth0.\n" )
+            return 0
+
+        INTERFACE_SETTINGS["mac"]= string.upper(mac_addr)
+
+        log.write( "Got mac address %s for device %s\n" %
+                   (INTERFACE_SETTINGS["mac"],device) )
+        
+
+    # now, if the conf file didn't contain a node id, post the mac address
+    # to plc to get the node_id value
+    if vars['NODE_ID'] is None or vars['NODE_ID'] == 0:
+        log.write( "Configuration file does not contain the node_id value.\n" )
+        log.write( "Querying PLC for node_id.\n" )
+
+        bs_request= BootServerRequest.BootServerRequest(vars)
+        
+        postVars= {"mac_addr" : INTERFACE_SETTINGS["mac"]}
+        result= bs_request.DownloadFile( "/boot/getnodeid.php",
+                                         None, postVars, 1, 1,
+                                         "/tmp/node_id")
+        if result == 0:
+            log.write( "Unable to make request to get node_id.\n" )
+            return 0
+
+        try:
+            node_id_file= file("/tmp/node_id","r")
+            node_id= string.strip(node_id_file.read())
+            node_id_file.close()
+        except IOError:
+            log.write( "Unable to read node_id from /tmp/node_id\n" )
+            return 0
+
+        try:
+            node_id= int(string.strip(node_id))
+        except ValueError:
+            log.write( "Got node_id from PLC, but not numeric: %s" % str(node_id) )
+            return 0
+
+        if node_id == -1:
+            log.write( "Got node_id, but it returned -1\n\n" )
+
+            log.write( "------------------------------------------------------\n" )
+            log.write( "This indicates that this node could not be identified\n" )
+            log.write( "by PLC. You will need to add the node to your site,\n" )
+            log.write( "and regenerate the network configuration file.\n" )
+            log.write( "See the Technical Contact guide for node setup\n" )
+            log.write( "procedures.\n\n" )
+            log.write( "Boot process canceled until this is completed.\n" )
+            log.write( "------------------------------------------------------\n" )
+            
+            cancel_boot_flag= "/tmp/CANCEL_BOOT"
+            # this will make the initial script stop requesting scripts from PLC
+            utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+            return 0
+
+        log.write( "Got node_id from PLC: %s\n" % str(node_id) )
+        vars['NODE_ID']= node_id
+
+
+
+    if vars['NODE_KEY'] is None or vars['NODE_KEY'] == "":
+        log.write( "Configuration file does not contain a node_key value.\n" )
+        log.write( "Using boot nonce instead.\n" )
+
+        # 3.x cds stored the file in /tmp/nonce in ascii form, so they
+        # can be read and used directly. 2.x cds stored in the same place
+        # but in binary form, so we need to convert it to ascii the same
+        # way the old boot scripts did so it matches whats in the db
+        # (php uses bin2hex, 
+        read_mode= "r"
+            
+        try:
+            nonce_file= file("/tmp/nonce",read_mode)
+            nonce= nonce_file.read()
+            nonce_file.close()
+        except IOError:
+            log.write( "Unable to read nonce from /tmp/nonce\n" )
+            return 0
+
+        nonce= string.strip(nonce)
+
+        log.write( "Read nonce, using as key.\n" )
+        vars['NODE_KEY']= nonce
+        
+        
+    # at this point, we've read the network configuration file.
+    # if we were setup using dhcp, get this system's current ip
+    # address and update the vars key ip, because it
+    # is needed for future api calls.
+
+    # at the same time, we can check to make sure that the hostname
+    # in the configuration file matches the ip address. if it fails
+    # notify the owners
+
+    hostname= INTERFACE_SETTINGS['hostname'] + "." + \
+              INTERFACE_SETTINGS['domainname']
+
+    # set to 0 if any part of the hostname resolution check fails
+    hostname_resolve_ok= 1
+
+    # set to 0 if the above fails, and, we are using dhcp in which
+    # case we don't know the ip of this machine (without having to
+    # parse ifconfig or something). In that case, we won't be able
+    # to make api calls, so printing a message to the screen will
+    # have to suffice.
+    can_make_api_call= 1
+
+    log.write( "Checking that hostname %s resolves\n" % hostname )
+
+    # try a regular dns lookup first
+    try:
+        resolved_node_ip= socket.gethostbyname(hostname)
+    except socket.gaierror, e:
+        hostname_resolve_ok= 0
+        
+
+    if INTERFACE_SETTINGS['method'] == "dhcp":
+        if hostname_resolve_ok:
+            INTERFACE_SETTINGS['ip']= resolved_node_ip
+            node_ip= resolved_node_ip
+        else:
+            can_make_api_call= 0
+    else:
+        node_ip= INTERFACE_SETTINGS['ip']
+
+    # make sure the dns lookup matches what the configuration file says
+    if hostname_resolve_ok:
+        if node_ip != resolved_node_ip:
+            log.write( "Hostname %s does not resolve to %s, but %s:\n" % \
+                       (hostname,node_ip,resolved_node_ip) )
+            hostname_resolve_ok= 0
+        else:
+            log.write( "Hostname %s correctly resolves to %s:\n" %
+                       (hostname,node_ip) )
+
+        
+    vars["INTERFACE_SETTINGS"]= INTERFACE_SETTINGS
+
+    if (not hostname_resolve_ok and not vars['DISCONNECTED_OPERATION'] and
+        'NAT_MODE' not in vars):
+        log.write( "Hostname does not resolve correctly, will not continue.\n" )
+
+        if can_make_api_call:
+            log.write( "Notifying contacts of problem.\n" )
+
+            vars['RUN_LEVEL']= 'failboot'
+            vars['STATE_CHANGE_NOTIFY']= 1
+            vars['STATE_CHANGE_NOTIFY_MESSAGE']= \
+                                     notify_messages.MSG_HOSTNAME_NOT_RESOLVE
+            
+            UpdateRunLevelWithPLC.Run( vars, log )
+                    
+        log.write( "\n\n" )
+        log.write( "The hostname and/or ip in the network configuration\n" )
+        log.write( "file do not resolve and match.\n" )
+        log.write( "Please make sure the hostname set in the network\n" )
+        log.write( "configuration file resolves to the ip also specified\n" )
+        log.write( "there.\n\n" )
+        log.write( "Debug mode is being started on this cd. When the above\n" )
+        log.write( "is corrected, reboot the machine to try again.\n" )
+        
+        raise BootManagerException, \
+              "Configured node hostname does not resolve."
+    
+    try:
+        log.write("Using NODE_ID %d\n"%vars['NODE_ID'])
+    except:
+        log.write("Unknown NODE_ID")
+
+    return 1
diff --git a/source/steps/SendHardwareConfigToPLC.py b/source/steps/SendHardwareConfigToPLC.py
new file mode 100644 (file)
index 0000000..1f5dd4b
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+from Exceptions import *
+
+
+def Run( vars, log ):
+
+    log.write( "\n\nStep: Sending hardware configuration to PLC.\n" )
+
+    log.write( "Not implemented, continuing.\n" )
+    
+    return
diff --git a/source/steps/StartDebug.py b/source/steps/StartDebug.py
new file mode 100644 (file)
index 0000000..5cdf8f7
--- /dev/null
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+
+from Exceptions import *
+import utils
+
+
+warning_message= \
+"""
+---------------------------------------------------------
+This machine has entered a temporary debug state, so
+Planetlab Support can login and fix any problems that
+might have occurred.
+
+Please do not reboot this machine at this point, unless
+specifically asked to.
+
+Thank you.
+---------------------------------------------------------
+"""
+
+# this can be invoked 
+# either at the end of the bm logic, because something failed (last_resort = True)
+# and/or it can be invoked as a fallback very early in the bootmanager logic,
+# so we can reach the node regardless of what happens (e.g. bm sometimes hangs)
+
+def Run( vars, log, last_resort = True):
+
+    """
+    Bring up sshd inside the boot cd environment for debug purposes.
+
+    Once its running, touch the file /tmp/SSHD_RUNNING so future
+    calls to this function don't do anything.
+
+    Expect the following variables in vars to be set:
+    BM_SOURCE_DIR     The source dir for the boot manager sources that
+                        we are currently running from
+    """
+
+    if last_resort:
+        message="Starting debug mode"
+    else:
+        message="Starting fallback sshd"
+
+
+    log.write( "\n\nStep: %s.\n"%message )
+    
+    # make sure we have the variables we need
+    try:
+        BM_SOURCE_DIR= vars["BM_SOURCE_DIR"]
+        if BM_SOURCE_DIR == "":
+            raise ValueError, "BM_SOURCE_DIR"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    # constants
+    ssh_source_files= "%s/debug_files/" % BM_SOURCE_DIR    
+    ssh_dir= "/etc/ssh/"
+    ssh_home= "/root/.ssh"
+    cancel_boot_flag= "/tmp/CANCEL_BOOT"
+    sshd_started_flag= "/tmp/SSHD_RUNNING"
+
+    # pre-sshd
+    pre_sshd_script= os.path.join(ssh_source_files, "pre-sshd")
+    if os.path.exists(pre_sshd_script):
+        utils.sysexec_noerr( pre_sshd_script, log )
+    
+    # create host keys if needed
+    if not os.path.isdir (ssh_dir):
+        utils.makedirs (ssh_dir)
+    key=ssh_dir+"/ssh_host_key"
+    if not os.path.isfile (key):
+        log.write("Creating host rsa1 key %s\n"%key)
+        utils.sysexec( "ssh-keygen -t rsa1 -b 1024 -f %s -N ''" % key, log )
+    key=ssh_dir+"/ssh_host_rsa_key"
+    if not os.path.isfile (key):
+        log.write("Creating host rsa key %s\n"%key)
+        utils.sysexec( "ssh-keygen -t rsa -f %s -N ''" % key, log )
+    key=ssh_dir+"/ssh_host_dsa_key"
+    if not os.path.isfile (key):
+        log.write("Creating host dsa key %s\n"%key)
+        utils.sysexec( "ssh-keygen -d -f %s -N ''" % key, log )
+
+    # (over)write sshd config
+    utils.sysexec( "cp -f %s/sshd_config %s/sshd_config" % (ssh_source_files,ssh_dir), log )
+    
+    ### xxx ### xxx ### xxx ### xxx ### xxx 
+
+    # always update the key, may have changed in this instance of the bootmanager
+    log.write( "Installing debug ssh key for root user\n" )
+    if not os.path.isdir ( ssh_home):
+        utils.makedirs( ssh_home )
+    utils.sysexec( "cp -f %s/debug_root_ssh_key %s/authorized_keys" % (ssh_source_files,ssh_home), log )
+    utils.sysexec( "chmod 700 %s" % ssh_home, log )
+    utils.sysexec( "chmod 600 %s/authorized_keys" % ssh_home, log )
+
+    # start sshd
+    if not os.path.isfile(sshd_started_flag):
+        log.write( "Starting sshd\n" )
+        utils.sysexec( "service sshd start", log )
+        # flag that ssh is running
+        utils.sysexec( "touch %s" % sshd_started_flag, log )
+    else:
+        # it is expected that sshd is already running when last_resort==True
+        if not last_resort:
+            log.write( "sshd is already running\n" )
+
+    if last_resort:
+        # this will make the initial script stop requesting scripts from PLC
+        utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+    if last_resort:
+        print warning_message
+    
+    return
diff --git a/source/steps/StartRunlevelAgent.py b/source/steps/StartRunlevelAgent.py
new file mode 100644 (file)
index 0000000..d4ee55c
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+from Exceptions import *
+import BootAPI
+
+
+def Run( vars, log ):
+    """
+        Start the RunlevelAgent.py script.  Should follow
+        AuthenticateWithPLC() in order to guarantee that
+        /etc/planetlab/session is present.
+    """
+
+    log.write( "\n\nStep: Starting RunlevelAgent.py\n" )
+
+    try:
+        cmd = "%s/RunlevelAgent.py" % vars['BM_SOURCE_DIR']
+        # raise error if script is not present.
+        os.stat(cmd)
+        # init script only starts RLA once.
+        os.system("/usr/bin/python %s start bootmanager &" % cmd)
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    return 1
+    
+
diff --git a/source/steps/StopRunlevelAgent.py b/source/steps/StopRunlevelAgent.py
new file mode 100644 (file)
index 0000000..c88b35f
--- /dev/null
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+from Exceptions import *
+import BootAPI
+
+
+def Run( vars, log ):
+    """
+        Stop the RunlevelAgent.py script.  Should proceed
+        kexec to reset run_level to 'boot' before kexec
+    """
+
+    log.write( "\n\nStep: Stopping RunlevelAgent.py\n" )
+
+    try:
+        cmd = "%s/RunlevelAgent.py" % vars['BM_SOURCE_DIR']
+        # raise error if script is not present.
+        os.stat(cmd)
+        os.system("/usr/bin/python %s stop" % cmd)
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        update_vals= {}
+        update_vals['run_level']='boot'
+        BootAPI.call_api_function( vars, "ReportRunlevel", (update_vals,) )
+    except BootManagerException, e:
+        log.write( "Unable to update boot state for this node at PLC: %s.\n" % e )
+
+    return 1
+    
+
diff --git a/source/steps/UpdateBootStateWithPLC.py b/source/steps/UpdateBootStateWithPLC.py
new file mode 100644 (file)
index 0000000..9271322
--- /dev/null
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+import BootAPI
+import notify_messages
+
+
+def Run( vars, log ):
+    """
+    Change this nodes boot state at PLC.
+
+    The only valid transition is from reinstall to boot.  All other changes to
+    the boot state of a node should be performed by the Admin, Tech or PI
+    through the API or Web interface.
+
+    The current value of the BOOT_STATE key in vars is used.
+    Optionally, notify the contacts of the boot state change.
+    If this is the case, the following keys/values
+    should be set in vars before calling this step:
+    STATE_CHANGE_NOTIFY= 1
+    STATE_CHANGE_NOTIFY_MESSAGE= "<notify message>"
+    The second value is a message to send the users from notify_messages.py
+
+    Return 1 if succesfull, a BootManagerException otherwise.
+    """
+
+    log.write( "\n\nStep: Updating node boot state at PLC.\n" )
+
+    update_vals= {}
+    update_vals['boot_state']= vars['BOOT_STATE']
+    try:
+        BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+        log.write( "Successfully updated boot state for this node at PLC\n" )
+    except BootManagerException, e:
+        log.write( "Unable to update boot state for this node at PLC: %s.\n" % e )
+
+    notify = vars.get("STATE_CHANGE_NOTIFY",0)
+
+    if notify:
+        message= vars['STATE_CHANGE_NOTIFY_MESSAGE']
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                             (message,
+                                              include_pis,
+                                              include_techs,
+                                              include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+
+        if sent == 0:
+            log.write( "Unable to notify site contacts of state change.\n" )
+
+    return 1
diff --git a/source/steps/UpdateLastBootOnce.py b/source/steps/UpdateLastBootOnce.py
new file mode 100644 (file)
index 0000000..5d689ac
--- /dev/null
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+import BootAPI
+import notify_messages
+import os.path
+
+
+def Run( vars, log ):
+    """
+        UpdateLastBootOnce will update the last_* values for the node only
+        once per boot.  This helps calculate last_time_spent_online and
+        last_time_spent_offline for collecting run-time metrics.
+    """
+
+    log.write( "\n\nStep: Updating node last boot times at PLC.\n" )
+
+    update_vals= {}
+    try:
+        if not os.path.isfile("/tmp/UPDATE_LAST_BOOT_ONCE"):
+            BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+            log.write( "Successfully updated boot state for this node at PLC\n" )
+            os.system("touch /tmp/UPDATE_LAST_BOOT_ONCE")
+    except BootManagerException, e:
+        log.write( "Unable to update last boot times for this node at PLC: %s.\n" % e )
+
+    return 1
diff --git a/source/steps/UpdateNodeConfiguration.py b/source/steps/UpdateNodeConfiguration.py
new file mode 100644 (file)
index 0000000..3008341
--- /dev/null
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+from Exceptions import *
+import utils
+
+
+# if this file is present in the vservers /etc directory,
+# the resolv.conf and hosts files will automatically be updated
+# by the bootmanager
+UPDATE_FILE_FLAG= "AUTO_UPDATE_NET_FILES"
+
+
+def Run( vars, log ):
+    """
+    Reconfigure a node if necessary, including rewriting any network init
+    scripts based on what PLC has. Also, update any slivers on the machine
+    incase their network files are out of date (primarily /etc/hosts).
+
+    Also write out /etc/planetlab/session, a random string that gets
+    a new value at every request of BootGetNodeDetails (ie, every boot)
+
+    This step expects the root to be already mounted on SYSIMG_PATH.
+    
+    Except the following keys to be set:
+    SYSIMG_PATH              the path where the system image will be mounted
+                             (always starts with TEMP_PATH)
+    ROOT_MOUNTED             the node root file system is mounted
+    INTERFACE_SETTINGS  A dictionary of the values from the network
+                                configuration file
+    """
+    
+    log.write( "\n\nStep: Updating node configuration.\n" )
+
+    # make sure we have the variables we need
+    try:
+        INTERFACE_SETTINGS= vars["INTERFACE_SETTINGS"]
+        if INTERFACE_SETTINGS == "":
+            raise ValueError, "INTERFACE_SETTINGS"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        ROOT_MOUNTED= vars["ROOT_MOUNTED"]
+        if ROOT_MOUNTED == "":
+            raise ValueError, "ROOT_MOUNTED"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        ip= INTERFACE_SETTINGS['ip']
+        method= INTERFACE_SETTINGS['method']
+        hostname= INTERFACE_SETTINGS['hostname']
+        domainname= INTERFACE_SETTINGS['domainname']
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing network value %s in var INTERFACE_SETTINGS\n" % var
+
+    
+    if not ROOT_MOUNTED:
+        raise BootManagerException, "Root isn't mounted on SYSIMG_PATH\n"
+
+    log.write( "Updating vserver's /etc/hosts and /etc/resolv.conf files\n" )
+
+    # create a list of the full directory paths of all the vserver images that
+    # need to be updated.
+    update_path_list= []
+
+    for base_dir in ('/vservers','/vservers/.vref','/vservers/.vcache'):
+        try:
+            full_dir_path= "%s/%s" % (SYSIMG_PATH,base_dir)
+            slices= os.listdir( full_dir_path )
+
+            try:
+                slices.remove("lost+found")
+            except ValueError, e:
+                pass
+            
+            update_path_list= update_path_list + map(lambda x: \
+                                                     full_dir_path+"/"+x,
+                                                     slices)
+        except OSError, e:
+            continue
+
+
+    log.write( "Updating network configuration in:\n" )
+    if len(update_path_list) == 0:
+        log.write( "No vserver images found to update.\n" )
+    else:
+        for base_dir in update_path_list:
+            log.write( "%s\n" % base_dir )
+
+
+    # now, update /etc/hosts and /etc/resolv.conf in each dir if
+    # the update flag is there
+    for base_dir in update_path_list:
+        update_vserver_network_files(base_dir,vars,log)
+    
+    return
+
+
+
+def update_vserver_network_files( vserver_dir, vars, log ):
+    """
+    Update the /etc/resolv.conf and /etc/hosts files in the specified
+    vserver directory. If the files do not exist, write them out. If they
+    do exist, rewrite them with new values if the file UPDATE_FILE_FLAG
+    exists it /etc. if this is called with the vserver-reference directory,
+    always update the network config files and create the UPDATE_FILE_FLAG.
+
+    This is currently called when setting up the initial vserver reference,
+    and later when nodes boot to update existing vserver images.
+
+    Expect the following variables from the store:
+    SYSIMG_PATH        the path where the system image will be mounted
+                       (always starts with TEMP_PATH)
+    INTERFACE_SETTINGS   A dictionary of the values from the network
+                       configuration file
+    """
+
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        INTERFACE_SETTINGS= vars["INTERFACE_SETTINGS"]
+        if INTERFACE_SETTINGS == "":
+            raise ValueError, "INTERFACE_SETTINGS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        ip= INTERFACE_SETTINGS['ip']
+        method= INTERFACE_SETTINGS['method']
+        hostname= INTERFACE_SETTINGS['hostname']
+        domainname= INTERFACE_SETTINGS['domainname']
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing network value %s in var INTERFACE_SETTINGS\n" % var
+
+    try:
+        os.listdir(vserver_dir)
+    except OSError:
+        log.write( "Directory %s does not exist to write network conf in.\n" %
+                   vserver_dir )
+        return
+
+    file_path= "%s/etc/%s" % (vserver_dir,UPDATE_FILE_FLAG)
+    update_files= 0
+    if os.access(file_path,os.F_OK):
+        update_files= 1
+
+        
+    # Thierry - 2012/03 - I'm renaming vserver-reference into sliceimage
+    # however I can't quite grasp the reason for this test below, very likely 
+    # compatibility with very old node images or something
+    if '/.vref/' in vserver_dir or \
+       '/.vcache/' in vserver_dir or \
+       '/vserver-reference' in vserver_dir:
+        log.write( "Forcing update on vserver reference directory:\n%s\n" %
+                   vserver_dir )
+        utils.sysexec_noerr( "echo '%s' > %s/etc/%s" %
+                             (UPDATE_FILE_FLAG,vserver_dir,UPDATE_FILE_FLAG),
+                             log )
+        update_files= 1
+        
+
+    if update_files:
+        log.write( "Updating network files in %s.\n" % vserver_dir )
+        try:
+            # NOTE: this works around a recurring problem on public pl,
+            # suspected to be due to mismatch between 2.6.12 bootcd and
+            # 2.6.22/f8 root environment.  files randomly show up with the
+            # immutible attribute set.  this clears it before trying to write
+            # the files below.
+            utils.sysexec( "chattr -i %s/etc/hosts" % vserver_dir , log )
+            utils.sysexec( "chattr -i %s/etc/resolv.conf" % vserver_dir , log )
+        except:
+            pass
+
+        
+        file_path= "%s/etc/hosts" % vserver_dir
+        hosts_file= file(file_path, "w" )
+        hosts_file.write( "127.0.0.1       localhost\n" )
+        if method == "static":
+            hosts_file.write( "%s %s.%s\n" % (ip, hostname, domainname) )
+        hosts_file.close()
+        hosts_file= None
+
+        file_path= "%s/etc/resolv.conf" % vserver_dir
+        if method == "dhcp":
+            # copy the resolv.conf from the boot cd env.
+            utils.sysexec( "cp /etc/resolv.conf %s/etc" % vserver_dir, log )
+        else:
+            # copy the generated resolv.conf from the system image, since
+            # we generated it via static settings
+            utils.sysexec( "cp %s/etc/resolv.conf %s/etc" % \
+                           (SYSIMG_PATH,vserver_dir), log )
+            
+    return 
diff --git a/source/steps/UpdateRunLevelWithPLC.py b/source/steps/UpdateRunLevelWithPLC.py
new file mode 100644 (file)
index 0000000..5d083d3
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+import BootAPI
+import notify_messages
+
+
+def Run( vars, log ):
+    """
+    Change this nodes run level at PLC.
+
+    Replaces the behavior of UpdateBootStateWithPLC.  Where previously, the
+    boot_state of a node would be altered by the BM, now the run_level is
+    updated, and the boot_state is preserved as a record of a User's
+    preference.
+
+    The current value of the RUN_LEVEL key in vars is used.
+    Optionally, notify the contacts of the run level change.
+    If this is the case, the following keys/values
+    should be set in vars before calling this step:
+    STATE_CHANGE_NOTIFY= 1
+    STATE_CHANGE_NOTIFY_MESSAGE= "<notify message>"
+    The second value is a message to send the users from notify_messages.py
+
+    Return 1 if succesfull, a BootManagerException otherwise.
+    """
+
+    log.write( "\n\nStep: Updating node run level at PLC.\n" )
+
+    update_vals= {}
+    # translate boot_state values to run_level value
+    if vars['RUN_LEVEL'] in ['diag', 'diagnose', 'disabled', 'disable']:
+        vars['RUN_LEVEL']='safeboot'
+    update_vals['run_level']=vars['RUN_LEVEL']
+    try:
+        BootAPI.call_api_function( vars, "ReportRunlevel", (update_vals,) )
+        log.write( "Successfully updated run level for this node at PLC\n" )
+    except BootManagerException, e:
+        log.write( "Unable to update run level for this node at PLC: %s.\n" % e )
+
+    notify = vars.get("STATE_CHANGE_NOTIFY",0)
+
+    if notify:
+        message= vars['STATE_CHANGE_NOTIFY_MESSAGE']
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                             (message,
+                                              include_pis,
+                                              include_techs,
+                                              include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+
+        if sent == 0:
+            log.write( "Unable to notify site contacts of state change.\n" )
+
+    return 1
diff --git a/source/steps/ValidateNodeInstall.py b/source/steps/ValidateNodeInstall.py
new file mode 100644 (file)
index 0000000..c987170
--- /dev/null
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+
+from Exceptions import *
+import utils
+import systeminfo
+import ModelOptions
+
+
+def Run( vars, log ):
+    """
+    See if a node installation is valid. More checks should certainly be
+    done in the future, but for now, make sure that the sym links kernel-boot
+    exist in /boot
+    
+    Expect the following variables to be set:
+    SYSIMG_PATH              the path where the system image will be mounted
+                             (always starts with TEMP_PATH)
+    ROOT_MOUNTED             the node root file system is mounted
+    NODE_ID                  The db node_id for this machine
+    PLCONF_DIR               The directory to store the configuration file in
+    
+    Set the following variables upon successfully running:
+    ROOT_MOUNTED             the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Validating node installation.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError, "NODE_ID"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+        
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    ROOT_MOUNTED= 0
+    if vars.has_key('ROOT_MOUNTED'):
+        ROOT_MOUNTED= vars['ROOT_MOUNTED']
+
+    # mount the root system image if we haven't already.
+    # capture BootManagerExceptions during the vgscan/change and mount
+    # calls, so we can return 0 instead
+    if ROOT_MOUNTED == 0:
+            
+        # simply creating an instance of this class and listing the system
+        # block devices will make them show up so vgscan can find the planetlab
+        # volume group
+        systeminfo.get_block_device_list(vars, log)
+
+        try:
+            utils.sysexec( "vgscan", log )
+            utils.sysexec( "vgchange -ay planetlab", log )
+        except BootManagerException, e:
+            log.write( "BootManagerException during vgscan/vgchange: %s\n" %
+                       str(e) )
+            return 0
+            
+        utils.makedirs( SYSIMG_PATH )
+
+        for filesystem in ("root","vservers"):
+            try:
+                # first run fsck to prevent fs corruption from hanging mount...
+                log.write( "fsck %s file system\n" % filesystem )
+                utils.sysexec("e2fsck -v -p %s" % (PARTITIONS[filesystem]), log, fsck=True)
+            except BootManagerException, e:
+                log.write( "BootManagerException during fsck of %s (%s) filesystem : %s\n" %
+                           (filesystem, PARTITIONS[filesystem], str(e)) )
+                try:
+                    log.write( "Trying to recover filesystem errors on %s\n" % filesystem )
+                    utils.sysexec("e2fsck -v -y %s" % (PARTITIONS[filesystem]),log, fsck=True)
+                except BootManagerException, e:
+                    log.write( "BootManagerException during trying to recover filesystem errors on %s (%s) filesystem : %s\n" %
+                           (filesystem, PARTITIONS[filesystem], str(e)) )
+                    return -1
+            else:
+                # disable time/count based filesystems checks
+                utils.sysexec_noerr( "tune2fs -c -1 -i 0 %s" % PARTITIONS[filesystem], log)
+
+        try:
+            # then attempt to mount them
+            log.write( "mounting root file system\n" )
+            utils.sysexec("mount -t ext3 %s %s" % (PARTITIONS["root"],SYSIMG_PATH),log)
+        except BootManagerException, e:
+            log.write( "BootManagerException during mount of /root: %s\n" % str(e) )
+            return -2
+            
+        try:
+            PROC_PATH = "%s/proc" % SYSIMG_PATH
+            utils.makedirs(PROC_PATH)
+            log.write( "mounting /proc\n" )
+            utils.sysexec( "mount -t proc none %s" % PROC_PATH, log )
+        except BootManagerException, e:
+            log.write( "BootManagerException during mount of /proc: %s\n" % str(e) )
+            return -2
+
+        try:
+            VSERVERS_PATH = "%s/vservers" % SYSIMG_PATH
+            utils.makedirs(VSERVERS_PATH)
+            log.write( "mounting vserver partition in root file system\n" )
+            utils.sysexec("mount -t ext3 %s %s" % (PARTITIONS["vservers"], VSERVERS_PATH), log)
+        except BootManagerException, e:
+            log.write( "BootManagerException during mount of /vservers: %s\n" % str(e) )
+            return -2
+
+        ROOT_MOUNTED= 1
+        vars['ROOT_MOUNTED']= 1
+        
+    # check if the base kernel is installed 
+    # these 2 links are created by our kernel's post-install scriplet
+    log.write("Checking for a custom kernel\n")
+    try:
+        os.stat("%s/boot/kernel-boot" % SYSIMG_PATH)
+    except OSError, e:            
+        log.write( "Couldn't locate base kernel (you might be using the stock kernel).\n")
+        return -3
+
+    # check if the model specified kernel is installed
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+        try:
+            os.stat("%s/boot/kernel-boot%s" % (SYSIMG_PATH,option))
+        except OSError, e:
+            # smp kernel is not there; remove option from modeloptions
+            # such that the rest of the code base thinks we are just
+            # using the base kernel.
+            NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP
+            vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS
+            log.write( "WARNING: Couldn't locate smp kernel.\n")
+            
+    # write out the node id to /etc/planetlab/node_id. if this fails, return
+    # 0, indicating the node isn't a valid install.
+    try:
+        node_id_file_path= "%s/%s/node_id" % (SYSIMG_PATH,PLCONF_DIR)
+        node_id_file= file( node_id_file_path, "w" )
+        node_id_file.write( str(NODE_ID) )
+        node_id_file.close()
+        node_id_file= None
+        log.write( "Updated /etc/planetlab/node_id\n" )
+    except IOError, e:
+        log.write( "Unable to write out /etc/planetlab/node_id\n" )
+        return 0
+
+    log.write( "Node installation appears to be ok\n" )
+    
+    return 1
diff --git a/source/steps/WriteModprobeConfig.py b/source/steps/WriteModprobeConfig.py
new file mode 100644 (file)
index 0000000..378802d
--- /dev/null
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os, string
+
+from Exceptions import *
+import utils
+import systeminfo
+import BootAPI
+import ModelOptions
+import notify_messages
+import modprobe
+
+def Run( vars, log, filename = "/etc/modprobe.conf"):
+    """
+    write out the system file /etc/modprobe.conf with the current
+    set of modules.
+
+    returns a tuple of the number of network driver lines and storage
+    driver lines written as (networkcount,storagecount)
+    """
+
+    # write out the modprobe.conf file for the system. make sure
+    # the order of the ethernet devices are listed in the same order
+    # as the boot cd loaded the modules. this is found in /tmp/loadedmodules
+    # ultimately, the order will only match the boot cd order if
+    # the kernel modules have the same name - which should be true for the later
+    # version boot cds because they use the same kernel version.
+    # older boot cds use a 2.4.19 kernel, and its possible some of the network
+    # module names have changed, in which case the system might not boot
+    # if the network modules are activated in a different order that the
+    # boot cd.
+
+    # make sure we have this class loaded
+    
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    sysmods= systeminfo.get_system_modules(vars, log)
+    if sysmods is None:
+        raise BootManagerException, "Unable to get list of system modules."
+        
+    # parse the existing modprobe.conf file, if one exists
+    mfile = "%s/%s" % (SYSIMG_PATH,filename)
+    m = modprobe.Modprobe()
+    if os.path.exists(mfile):
+        m.input(mfile)
+
+    blacklist = modprobe.Modprobe()
+    blacklistfiles = os.listdir("/etc/modprobe.d")
+    for blf in blacklistfiles:
+        if os.path.exists("/etc/modprobe.d/%s"%blf):
+            blacklist.input("/etc/modprobe.d/%s"%blf)
+        
+    # storage devices
+    m.optionsset("ata_generic","all_generic_ide=1")
+    scsi_count= 0
+    for a_mod in sysmods[systeminfo.MODULE_CLASS_SCSI]:
+        if m.blacklistget(a_mod) <> None or \
+               blacklist.blacklistget(a_mod) <> None:
+            continue
+        m.aliasset("scsi_hostadapter%d"%scsi_count,a_mod)
+        scsi_count= scsi_count + 1
+
+    # network devices
+    eth_count= 0
+    for a_mod in sysmods[systeminfo.MODULE_CLASS_NETWORK]:
+        if m.blacklistget(a_mod) <> None or \
+               blacklist.blacklistget(a_mod) <> None:
+            continue
+        m.aliasset("eth%d"%eth_count,a_mod)
+        eth_count= eth_count + 1
+    m.output(mfile, "BootManager")
+    m.output("%s.bak"%mfile, "BootManager") # write a backup version of this file
+
+    # dump the modprobe.conf file to the log (not to screen)
+    log.write( "Contents of new modprobe.conf file:\n" )
+    modulesconf_file= file("%s/%s" % (SYSIMG_PATH,filename), "r" )
+    contents= modulesconf_file.read()
+    log.write( contents + "\n" )
+    modulesconf_file.close()
+    modulesconf_file= None
+    log.write( "End contents of new modprobe.conf file.\n" )
+
+    # before we do the real kexec, check to see if we had any
+    # network drivers written to modprobe.conf. if not, return -1,
+    # which will cause this node to be switched to a debug state.
+    if eth_count == 0:
+        log.write( "\nIt appears we don't have any network drivers. Aborting.\n" )
+        
+        vars['RUN_LEVEL']= 'failboot'
+        vars['STATE_CHANGE_NOTIFY']= 1
+        vars['STATE_CHANGE_NOTIFY_MESSAGE']= \
+             notify_messages.MSG_NO_DETECTED_NETWORK
+        raise BootManagerException, \
+              notify_messages.MSG_NO_DETECTED_NETWORK
+
+
diff --git a/source/steps/WriteNetworkConfig.py b/source/steps/WriteNetworkConfig.py
new file mode 100644 (file)
index 0000000..bfc29b4
--- /dev/null
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, string
+import traceback
+
+import utils
+import urlparse
+import httplib
+
+from Exceptions import *
+import BootServerRequest
+import ModelOptions
+import BootAPI
+import plnet
+
+class BootAPIWrap:
+    def __init__(self, vars):
+        self.vars = vars
+    def call(self, func, *args):
+        return BootAPI.call_api_function(self.vars, func, args)
+    def __getattr__(self, func):
+        return lambda *args: self.call(func, *args)
+
+class logger:
+    def __init__(self, log):
+        self._log = log
+    def log(self, msg, level=3):
+        self._log.write(msg + "\n")
+    def verbose(self, msg):
+        self.log(msg, 0)
+
+def Run( vars, log ):
+    """
+    Write out the network configuration for this machine:
+    /etc/hosts
+    /etc/sysconfig/network-scripts/ifcfg-<ifname>
+    /etc/resolv.conf (if applicable)
+    /etc/sysconfig/network
+
+    The values to be used for the network settings are to be set in vars
+    in the variable 'INTERFACE_SETTINGS', which is a dictionary
+    with keys:
+
+     Key               Used by this function
+     -----------------------------------------------
+     node_id
+     node_key
+     method            x
+     ip                x
+     mac               x (optional)
+     gateway           x
+     network           x
+     broadcast         x
+     netmask           x
+     dns1              x
+     dns2              x (optional)
+     hostname          x
+     domainname        x
+
+    Expect the following variables from the store:
+    SYSIMG_PATH             the path where the system image will be mounted
+                                (always starts with TEMP_PATH)
+    INTERFACES              All the interfaces associated with this node
+    INTERFACE_SETTINGS      dictionary 
+    Sets the following variables:
+    None
+    """
+
+    log.write( "\n\nStep: Install: Writing Network Configuration files.\n" )
+
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        INTERFACE_SETTINGS= vars['INTERFACE_SETTINGS']
+    except KeyError, e:
+        raise BootManagerException, "No interface settings found in vars."
+
+    try:
+        hostname= INTERFACE_SETTINGS['hostname']
+        domainname= INTERFACE_SETTINGS['domainname']
+        method= INTERFACE_SETTINGS['method']
+        ip= INTERFACE_SETTINGS['ip']
+        gateway= INTERFACE_SETTINGS['gateway']
+        network= INTERFACE_SETTINGS['network']
+        netmask= INTERFACE_SETTINGS['netmask']
+        dns1= INTERFACE_SETTINGS['dns1']
+        mac= INTERFACE_SETTINGS['mac']
+    except KeyError, e:
+        raise BootManagerException, "Missing value %s in interface settings." % str(e)
+
+    # dns2 is not required to be set
+    dns2 = INTERFACE_SETTINGS.get('dns2','')
+
+    # Node Manager needs at least PLC_API_HOST and PLC_BOOT_HOST
+    log.write("Writing /etc/planetlab/plc_config\n")
+    utils.makedirs("%s/etc/planetlab" % SYSIMG_PATH)
+    plc_config = file("%s/etc/planetlab/plc_config" % SYSIMG_PATH, "w")
+
+    api_url = vars['BOOT_API_SERVER']
+    (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(api_url)
+    parts = netloc.split(':')
+    host = parts[0]
+    if len(parts) > 1:
+        port = parts[1]
+    else:
+        port = '80'
+    try:
+        log.write("getting via https://%s/PlanetLabConf/get_plc_config.php " % host)
+        bootserver = httplib.HTTPSConnection(host, int(port))
+        bootserver.connect()
+        bootserver.request("GET","https://%s/PlanetLabConf/get_plc_config.php" % host)
+        plc_config.write("%s" % bootserver.getresponse().read())
+        bootserver.close()
+        log.write("Done\n")
+    except :
+        log.write(" .. Failed.  Using old method. -- stack trace follows\n")
+        traceback.print_exc(file=log.OutputFile)
+        bs= BootServerRequest.BootServerRequest(vars)
+        if bs.BOOTSERVER_CERTS:
+            print >> plc_config, "PLC_BOOT_HOST='%s'" % bs.BOOTSERVER_CERTS.keys()[0]
+        print >> plc_config, "PLC_API_HOST='%s'" % host
+        print >> plc_config, "PLC_API_PORT='%s'" % port
+        print >> plc_config, "PLC_API_PATH='%s'" % path
+
+    plc_config.close()
+
+
+    log.write( "Writing /etc/hosts\n" )
+    hosts_file= file("%s/etc/hosts" % SYSIMG_PATH, "w" )    
+    hosts_file.write( "127.0.0.1       localhost\n" )
+    if method == "static":
+        hosts_file.write( "%s %s.%s\n" % (ip, hostname, domainname) )
+    hosts_file.close()
+    hosts_file= None
+    
+    data =  {'hostname': '%s.%s' % (hostname, domainname),
+             'networks': vars['INTERFACES']}
+    plnet.InitInterfaces(logger(log), BootAPIWrap(vars), data, SYSIMG_PATH,
+                         True, "BootManager")
+
diff --git a/source/steps/__init__.py b/source/steps/__init__.py
new file mode 100644 (file)
index 0000000..bfef9bc
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+"""
+This directory contains individual step classes
+"""
+
+__all__ = ["ReadNodeConfiguration",
+           "AuthenticateWithPLC",
+           "GetAndUpdateNodeDetails",
+           "ConfirmInstallWithUser",
+           "UpdateBootStateWithPLC",
+           "UpdateLastBootOnce",
+           "UpdateRunLevelWithPLC",
+           "CheckHardwareRequirements",
+           "SendHardwareConfigToPLC",
+           "InitializeBootManager",
+           "UpdateNodeConfiguration",
+           "CheckForNewDisks",
+           "ChainBootNode",
+           "ValidateNodeInstall",
+           "StartDebug",
+           "StartRunlevelAgent",
+           "StopRunlevelAgent",
+           "InstallBootstrapFS",
+           "InstallInit",
+           "InstallPartitionDisks",
+           "InstallUninitHardware",
+           "InstallWriteConfig",
+           "MakeInitrd",
+           "WriteNetworkConfig",
+           "WriteModprobeConfig"]
diff --git a/source/systeminfo.py b/source/systeminfo.py
new file mode 100755 (executable)
index 0000000..921ede6
--- /dev/null
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+
+#----------------------------------------------------
+#major minor  #blocks  name
+#
+#3     0   40017915 hda
+#3     1     208813 hda1
+#3     2   20482875 hda2
+#3     3     522112 hda3
+#3     4   18804082 hda4
+#----------------------------------------------------
+
+
+import string
+import sys
+import os
+import popen2
+import re
+import errno
+import ModelOptions
+from pypci import *
+from Exceptions import *
+
+"""
+a utility class for finding and returning information about
+block devices, memory, and other hardware on the system
+"""
+
+PROC_MEMINFO_PATH= "/proc/meminfo"
+PROC_PARTITIONS_PATH= "/proc/partitions"
+
+# set when the sfdisk -l <dev> trick has been done to make
+# all devices show up
+DEVICES_SCANNED_FLAG= "/tmp/devices_scanned"
+
+# a /proc/partitions block is 1024 bytes
+# a GB to a HDD manufacturer is 10^9 bytes
+BLOCKS_PER_GB = pow(10, 9) / 1024.0;
+
+
+MODULE_CLASS_NETWORK= "network"
+MODULE_CLASS_SCSI= "scsi"
+
+#PCI_* is now defined in the pypci modules
+#PCI_BASE_CLASS_NETWORK=0x02L
+#PCI_BASE_CLASS_STORAGE=0x01L
+
+def get_total_phsyical_mem(vars = {}, log = sys.stderr):
+    """
+    return the total physical memory of the machine, in kilobytes.
+
+    Return None if /proc/meminfo not readable.
+    """
+
+    try:
+        meminfo_file= file(PROC_MEMINFO_PATH,"r")
+    except IOError, e:
+        return
+
+    total_memory= None
+
+    for line in meminfo_file:
+
+        try:
+            (fieldname,value)= string.split(line,":")
+        except ValueError, e:
+            # this will happen for lines that don't have two values
+            # (like the first line on 2.4 kernels)
+            continue
+
+        fieldname= string.strip(fieldname)
+        value= string.strip(value)
+        
+        if fieldname == "MemTotal":
+            try:
+                (total_memory,units)= string.split(value)
+            except ValueError, e:
+                return
+
+            if total_memory == "" or total_memory == None or \
+                   units == "" or units == None:
+                return
+
+            if string.lower(units) != "kb":
+                return
+
+            try:
+                total_memory= int(total_memory)
+            except ValueError, e:
+                return
+
+            break
+
+    meminfo_file.close()
+    return total_memory
+
+def get_block_device_list(vars = {}, log = sys.stderr):
+    """
+    get a list of block devices from this system.
+    return an associative array, where the device name
+    (full /dev/device path) is the key, and the value
+    is a tuple of (major,minor,numblocks,gb_size,readonly)
+    """
+
+    # make sure we can access to the files/directories in /proc
+    if not os.access(PROC_PARTITIONS_PATH, os.F_OK):
+        return None
+
+    # table with valid scsi/sata/ide/raid block device names
+    valid_blk_names = {}
+    # add in valid sd and hd block device names
+    # also check for vd (virtio devices used with kvm)
+    for blk_prefix in ('sd','hd','vd'):
+        for blk_num in map ( \
+            lambda x: chr(x), range(ord('a'),ord('z')+1)):
+            devicename="%s%c" % (blk_prefix, blk_num)
+            valid_blk_names[devicename]=None
+
+    # add in valid scsi raid block device names
+    for M in range(0,1+1):
+        for N in range(0,7+1):
+            devicename = "cciss/c%dd%d" % (M,N)
+            valid_blk_names[devicename]=None
+
+    for devicename in valid_blk_names.keys():
+        # devfs under 2.4 (old boot cds) used to list partitions
+        # in a format such as scsi/host0/bus0/target0/lun0/disc
+        # and /dev/sda, etc. were just symlinks
+        try:
+            devfsname= os.readlink( "/dev/%s" % devicename )
+            valid_blk_names[devfsname]=None
+        except OSError:
+            pass
+
+    # only do this once every system boot
+    if not os.access(DEVICES_SCANNED_FLAG, os.R_OK):
+
+        # this is ugly. under devfs, device
+        # entries in /dev/scsi/.. and /dev/ide/...
+        # don't show up until you attempt to read
+        # from the associated device at /dev (/dev/sda).
+        # so, lets run sfdisk -l (list partitions) against
+        # most possible block devices, that way they show
+        # up when it comes time to do the install.
+
+        # 27.6.2012 - Using parted instead of sfdisk, assuming
+        # that doing so respects the behavior mentioned above.
+
+        devicenames = valid_blk_names.keys()
+        devicenames.sort()
+        for devicename in devicenames:
+            os.system( "parted --script --list /dev/%s > /dev/null 2>&1" % devicename )
+
+        # touch file
+        fb = open(DEVICES_SCANNED_FLAG,"w")
+        fb.close()
+
+    devicelist= {}
+
+    partitions_file= file(PROC_PARTITIONS_PATH,"r")
+    line_count= 0
+    for line in partitions_file:
+        line_count= line_count + 1
+
+        # skip the first two lines always
+        if line_count < 2:
+            continue
+
+        parts= string.split(line)
+
+        if len(parts) < 4:
+            continue
+
+        device= parts[3]
+
+        # skip and ignore any partitions
+        if not valid_blk_names.has_key(device):
+            continue
+
+        try:
+            major= int(parts[0])
+            minor= int(parts[1])
+            blocks= int(parts[2])
+        except ValueError, err:
+            continue
+
+        gb_size= blocks/BLOCKS_PER_GB
+
+        # check to see if the blk device is readonly
+        try:
+            # can we write to it?
+            dev_name= "/dev/%s" % device
+            fb = open(dev_name,"w")
+            fb.close()
+            readonly=False
+        except IOError, e:
+            # check if EROFS errno
+            if errno.errorcode.get(e.errno,None) == 'EROFS':
+                readonly=True
+            else:
+                # got some other errno, pretend device is readonly
+                readonly=True
+
+        devicelist[dev_name]= (major,minor,blocks,gb_size,readonly)
+
+    return devicelist
+
+
+def get_system_modules( vars = {}, log = sys.stderr):
+    """
+    Return a list of kernel modules that this system requires.
+    This requires access to the installed system's root
+    directory, as the following file must exist and is used:
+    <install_root>/lib/modules/(first entry if kernel_version unspecified)/modules.pcimap
+
+    If there are more than one kernels installed, and the kernel
+    version is not specified, then only the first one in
+    /lib/modules is used.
+
+    Returns a dictionary, keys being the type of module:
+        - scsi       MODULE_CLASS_SCSI
+        - network    MODULE_CLASS_NETWORK
+    The value being the kernel module name to load.
+
+    Some sata devices show up under an IDE device class,
+    hence the reason for checking for ide devices as well.
+    If there actually is a match in the pci -> module lookup
+    table, and its an ide device, its most likely sata,
+    as ide modules are built in to the kernel.
+    """
+
+    if not vars.has_key("SYSIMG_PATH"):
+        vars["SYSIMG_PATH"]="/"
+    SYSIMG_PATH=vars["SYSIMG_PATH"]
+
+    if not vars.has_key("NODE_MODEL_OPTIONS"):
+        vars["NODE_MODEL_OPTIONS"] = 0;
+
+    initrd, kernel_version = getKernelVersion(vars, log)
+
+    # get the kernel version we are assuming
+    if kernel_version is None:
+        try:
+            kernel_version= os.listdir( "%s/lib/modules/" % SYSIMG_PATH )
+        except OSError, e:
+            return
+
+        if len(kernel_version) == 0:
+            return
+
+        if len(kernel_version) > 1:
+            print( "WARNING: We may be returning modules for the wrong kernel." )
+
+        kernel_version= kernel_version[0]
+
+    print( "Using kernel version %s" % kernel_version )
+
+    # test to make sure the file we need is present
+    modules_pcimap_path = "%s/lib/modules/%s/modules.pcimap" % \
+                          (SYSIMG_PATH,kernel_version)
+    if not os.access(modules_pcimap_path,os.R_OK):
+        print( "WARNING: Unable to read %s" % modules_pcimap_path )
+        return
+
+    pcimap = pypcimap.PCIMap(modules_pcimap_path)
+
+    # this is the actual data structure we return
+    system_mods= {}
+
+    # these are the lists that will be in system_mods
+    network_mods= []
+    scsi_mods= []
+
+    # XXX: this is really similar to what BootCD/conf_files/pl_hwinit does. merge?
+    pcidevs = get_devices()
+
+    devlist=pcidevs.keys()
+    devlist.sort()
+    for slot in devlist:
+        dev = pcidevs[slot]
+        base = (dev[4] & 0xff0000) >> 16
+        modules = pcimap.get(dev)
+        if base not in (PCI_BASE_CLASS_STORAGE,
+                        PCI_BASE_CLASS_NETWORK):
+            # special exception for forcedeth NICs whose base id
+            # claims to be a Bridge, even though it is clearly a
+            # network device
+            if "forcedeth" in modules: 
+                base=PCI_BASE_CLASS_NETWORK
+            else:
+                continue
+
+        if len(modules) > 0:
+            if base == PCI_BASE_CLASS_NETWORK:
+                network_mods += modules
+            elif base == PCI_BASE_CLASS_STORAGE:
+                scsi_mods += modules
+
+    system_mods[MODULE_CLASS_SCSI]= scsi_mods
+    system_mods[MODULE_CLASS_NETWORK]= network_mods
+
+    return system_mods
+
+
+def getKernelVersion( vars = {} , log = sys.stderr):
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NODE_MODEL_OPTIONS=vars["NODE_MODEL_OPTIONS"]
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+        try:
+            os.stat("%s/boot/kernel-boot%s" % (SYSIMG_PATH,option))
+            os.stat("%s/boot/initrd-boot%s" % (SYSIMG_PATH,option))
+        except OSError, e:
+            # smp kernel is not there; remove option from modeloptions
+            # such that the rest of the code base thinks we are just
+            # using the base kernel.
+            NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP
+            vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS
+            log.write( "WARNING: Couldn't locate smp kernel.\n")
+            option = ''
+    try:
+        initrd= os.readlink( "%s/boot/initrd-boot%s" % (SYSIMG_PATH,option) )
+        kernel_version= initrd.replace("initrd-", "").replace(".img", "")    
+    except OSError, e:
+        initrd = None
+        kernel_version = None
+        
+    return (initrd, kernel_version)
+
+
+if __name__ == "__main__":
+    devices= get_block_device_list()
+    print "block devices detected:"
+    if not devices:
+        print "no devices found!"
+    else:
+        for dev in devices.keys():
+            print "%s %s" % (dev, repr(devices[dev]))
+            
+
+    print ""
+    memory= get_total_phsyical_mem()
+    if not memory:
+        print "unable to read /proc/meminfo for memory"
+    else:
+        print "total physical memory: %d kb" % memory
+        
+
+    print ""
+
+    kernel_version = None
+    if len(sys.argv) > 2:
+        kernel_version = sys.argv[1]
+        
+    modules= get_system_modules()
+    if not modules:
+        print "unable to list system modules"
+    else:
+        for module_class in (MODULE_CLASS_SCSI,MODULE_CLASS_NETWORK):
+            if len(modules[module_class]) > 0:
+                module_list = ""
+                for a_mod in modules[module_class]:
+                    module_list = module_list + "%s " % a_mod
+                print "all %s modules: %s" % (module_class, module_list)
+                
diff --git a/source/utils.py b/source/utils.py
new file mode 100644 (file)
index 0000000..3c6c225
--- /dev/null
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, shutil
+import subprocess
+import shlex
+import socket
+import fcntl
+import string
+import exceptions
+
+from Exceptions import *
+
+####################
+# the simplest way to debug is to let the node take off, 
+# ssh into it as root using the debug ssh key in /etc/planetlab
+# then go to /tmp/source 
+# edit this file locally to turn on breakpoints if needed, then run
+# ./BootManager.py
+####################
+
+### handling breakpoints in the startup process
+import select, sys, string
+
+### global debugging settings
+
+# enabling this will cause the node to ask for breakpoint-mode at startup
+# production code should read False/False
+PROMPT_MODE=False
+# default for when prompt is turned off, or it's on but the timeout triggers
+BREAKPOINT_MODE=False
+
+# verbose mode is just fine
+VERBOSE_MODE=True
+# in seconds : if no input, proceed
+PROMPT_TIMEOUT=5
+
+def prompt_for_breakpoint_mode ():
+
+    global BREAKPOINT_MODE
+    if PROMPT_MODE:
+        default_answer=BREAKPOINT_MODE
+        answer=''
+        if BREAKPOINT_MODE:
+            display="[y]/n"
+        else:
+            display="y/[n]"
+        sys.stdout.write ("Want to run in breakpoint mode ? %s "%display)
+        sys.stdout.flush()
+        r,w,e = select.select ([sys.stdin],[],[],PROMPT_TIMEOUT)
+        if r:
+            answer = string.strip(sys.stdin.readline())
+        else:
+            sys.stdout.write("\nTimed-out (%d s)"%PROMPT_TIMEOUT)
+        if answer:
+            BREAKPOINT_MODE = ( answer == "y" or answer == "Y")
+        else:
+            BREAKPOINT_MODE = default_answer
+    label="Off"
+    if BREAKPOINT_MODE:
+        label="On"
+    sys.stdout.write("\nCurrent BREAKPOINT_MODE is %s\n"%label)
+
+def breakpoint (message, cmd = None):
+
+    if BREAKPOINT_MODE:
+
+        if cmd is None:
+            cmd="/bin/sh"
+            message=message+" -- Entering bash - type ^D to proceed"
+
+        print message
+        os.system(cmd)
+
+
+########################################
+def makedirs( path ):
+    """
+    from python docs for os.makedirs:
+    Throws an error exception if the leaf directory
+    already exists or cannot be created.
+
+    That is real useful. Instead, we'll create the directory, then use a
+    separate function to test for its existance.
+
+    Return 1 if the directory exists and/or has been created, a BootManagerException
+    otherwise. Does not test the writability of said directory.
+    """
+    try:
+        os.makedirs( path )
+    except OSError:
+        pass
+    try:
+        os.listdir( path )
+    except OSError:
+        raise BootManagerException, "Unable to create directory tree: %s" % path
+    
+    return 1
+
+
+
+def removedir( path ):
+    """
+    remove a directory tree, return 1 if successful, a BootManagerException
+    if failure.
+    """
+    try:
+        os.listdir( path )
+    except OSError:
+        return 1
+
+    try:
+        shutil.rmtree( path )
+    except OSError, desc:
+        raise BootManagerException, "Unable to remove directory tree: %s" % path
+    
+    return 1
+
+
+def sysexec( cmd, log=None, fsck=False, shell=False ):
+    """
+    execute a system command, output the results to the logger
+    if log <> None
+
+    return 1 if command completed (return code of non-zero),
+    0 if failed. A BootManagerException is raised if the command
+    was unable to execute or was interrupted by the user with Ctrl+C
+    """
+    try:
+        # Thierry - Jan. 6 2011
+        # would probably make sense to look for | here as well
+        # however this is fragile and hard to test thoroughly
+        # let the caller set 'shell' when that is desirable
+        if shell or cmd.__contains__(">"):
+            prog = subprocess.Popen(cmd, shell=True)
+            if log is not None:
+                log.write("sysexec (shell mode) >>> %s" % cmd)
+            if VERBOSE_MODE:
+                print "sysexec (shell mode) >>> %s" % cmd
+        else:
+            prog = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            if log is not None:
+                log.write("sysexec >>> %s\n" % cmd)
+            if VERBOSE_MODE:
+                print "sysexec >>> %s" % cmd
+    except OSError:
+        raise BootManagerException, \
+              "Unable to create instance of subprocess.Popen " \
+              "for command: %s" % cmd
+    try:
+        (stdoutdata, stderrdata) = prog.communicate()
+    except KeyboardInterrupt:
+        raise BootManagerException, "Interrupted by user"
+
+    # log stdout & stderr
+    if log is not None:
+        if stdoutdata:
+            log.write("==========stdout\n"+stdoutdata)
+        if stderrdata:
+            log.write("==========stderr\n"+stderrdata)
+
+    returncode = prog.wait()
+
+    if fsck:
+       # The exit code returned by fsck is the sum of the following conditions:
+       #      0    - No errors
+       #      1    - File system errors corrected
+       #      2    - System should be rebooted
+       #      4    - File system errors left uncorrected
+       #      8    - Operational error
+       #      16   - Usage or syntax error
+       #      32   - Fsck canceled by user request
+       #      128  - Shared library error
+       if returncode != 0 and returncode != 1:
+            raise BootManagerException, "Running %s failed (rc=%d)" % (cmd,returncode)
+    else:
+        if returncode != 0:
+            raise BootManagerException, "Running %s failed (rc=%d)" % (cmd,returncode)
+
+    prog = None
+    return 1
+
+
+def sysexec_chroot( path, cmd, log=None, shell=False):
+    """
+    same as sysexec, but inside a chroot
+    """
+    preload = ""
+    release = os.uname()[2]
+    # 2.6.12 kernels need this
+    if release[:5] == "2.6.1":
+        library = "%s/lib/libc-opendir-hack.so" % path
+        if not os.path.exists(library):
+            shutil.copy("./libc-opendir-hack.so", library)
+        preload = "/bin/env LD_PRELOAD=/lib/libc-opendir-hack.so"
+    sysexec("chroot %s %s %s" % (path, preload, cmd), log, shell=shell)
+
+
+def sysexec_chroot_noerr( path, cmd, log=None, shell=False ):
+    """
+    same as sysexec_chroot, but capture boot manager exceptions
+    """
+    try:
+        rc= 0
+        rc= sysexec_chroot( cmd, log, shell=shell )
+    except BootManagerException, e:
+        pass
+
+    return rc
+
+
+def sysexec_noerr( cmd, log=None, shell=False ):
+    """
+    same as sysexec, but capture boot manager exceptions
+    """
+    try:
+        rc= 0
+        rc= sysexec( cmd, log, shell=shell )
+    except BootManagerException, e:
+        pass
+
+    return rc
+
+
+
+def chdir( dir ):
+    """
+    change to a directory, return 1 if successful, a BootManagerException if failure
+    """
+    try:
+        os.chdir( dir )
+    except OSError:
+        raise BootManagerException, "Unable to change to directory: %s" % dir
+
+    return 1
+
+
+
+def removefile( filepath ):
+    """
+    removes a file, return 1 if successful, 0 if failure
+    """
+    try:
+        os.remove( filepath )
+    except OSError:
+        raise BootManagerException, "Unable to remove file: %s" % filepath
+
+    return 1
+
+
+
+# from: http://forums.devshed.com/archive/t-51149/
+#              Ethernet-card-address-Through-Python-or-C
+
+def hexy(n):
+    return "%02x" % (ord(n))
+
+def get_mac_from_interface(ifname):
+    """
+    given a device name, like eth0, return its mac_address.
+    return None if the device doesn't exist.
+    """
+    
+    SIOCGIFHWADDR = 0x8927 # magic number
+
+    s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
+    ifname = string.strip(ifname)
+    ifr = ifname + '\0'*(32-len(ifname))
+
+    try:
+        r= fcntl.ioctl(s.fileno(),SIOCGIFHWADDR,ifr)
+        addr = map(hexy,r[18:24])
+        ret = (':'.join(map(str, addr)))
+    except IOError, e:
+        ret = None
+        
+    return ret
+
+def check_file_hash(filename, hash_filename):
+    """Check the file's integrity with a given hash."""
+    return sha1_file(filename) == open(hash_filename).read().split()[0].strip()
+
+def sha1_file(filename):
+    """Calculate sha1 hash of file."""
+    try:
+        try:
+            import hashlib
+            m = hashlib.sha1()
+        except:
+            import sha
+            m=sha.new()
+        f = file(filename, 'rb')
+        while True:
+            # 256 KB seems ideal for speed/memory tradeoff
+            # It wont get much faster with bigger blocks, but
+            # heap peak grows
+            block = f.read(256 * 1024)
+            if len(block) == 0:
+                # end of file
+                break
+            m.update(block)
+            # Simple trick to keep total heap even lower
+            # Delete the previous block, so while next one is read
+            # we wont have two allocated blocks with same size
+            del block
+        return m.hexdigest()
+    except IOError:
+        raise BootManagerException, "Cannot calculate SHA1 hash of %s" % filename