Merge commit 'local_master/bootmanager-vender' into myplc-0_4-branch myplc-0_4-branch
authorroot <root@blitz.inria.fr>
Fri, 16 Jul 2010 04:32:09 +0000 (06:32 +0200)
committerroot <root@blitz.inria.fr>
Fri, 16 Jul 2010 04:32:09 +0000 (06:32 +0200)
51 files changed:
bootmanager.spec [new file with mode: 0644]
build.sh [new file with mode: 0755]
documentation/boot-manager-pdn.pdf [new file with mode: 0644]
documentation/boot-manager-pdn.xml [new file with mode: 0644]
documentation/pdn-pdf-style.xsl [new file with mode: 0644]
dummy_bootloader/Makefile [new file with mode: 0644]
dummy_bootloader/dummy_bootloader [new file with mode: 0644]
dummy_bootloader/dummy_bootloader.S [new file with mode: 0644]
source/BootAPI.py [new file with mode: 0644]
source/BootManager.py [new file with mode: 0755]
source/BootServerRequest.py [new file with mode: 0644]
source/COPYRIGHT [new file with mode: 0644]
source/Exceptions.py [new file with mode: 0644]
source/ModelOptions.py [new file with mode: 0644]
source/compatibility.py [new file with mode: 0644]
source/configuration [new file with mode: 0644]
source/debug_files/debug_root_ssh_key [new file with mode: 0644]
source/debug_files/sshd_config_v2 [new file with mode: 0644]
source/debug_files/sshd_config_v3 [new file with mode: 0644]
source/merge_hw_tables.py [new file with mode: 0755]
source/notify_messages.py [new file with mode: 0644]
source/steps/AuthenticateWithPLC.py [new file with mode: 0644]
source/steps/ChainBootNode.py [new file with mode: 0644]
source/steps/CheckForNewDisks.py [new file with mode: 0644]
source/steps/CheckHardwareRequirements.py [new file with mode: 0644]
source/steps/ConfirmInstallWithUser.py [new file with mode: 0644]
source/steps/GetAndUpdateNodeDetails.py [new file with mode: 0644]
source/steps/InitializeBootManager.py [new file with mode: 0644]
source/steps/InstallBootstrapRPM.py [new file with mode: 0644]
source/steps/InstallBuildVServer.py [new file with mode: 0644]
source/steps/InstallInit.py [new file with mode: 0644]
source/steps/InstallNodeInit.py [new file with mode: 0644]
source/steps/InstallPartitionDisks.py [new file with mode: 0644]
source/steps/InstallUninitHardware.py [new file with mode: 0644]
source/steps/InstallWriteConfig.py [new file with mode: 0644]
source/steps/MakeInitrd.py [new file with mode: 0644]
source/steps/ReadNodeConfiguration.py [new file with mode: 0644]
source/steps/SendHardwareConfigToPLC.py [new file with mode: 0644]
source/steps/StartDebug.py [new file with mode: 0644]
source/steps/UpdateBootStateWithPLC.py [new file with mode: 0644]
source/steps/UpdateNodeConfiguration.py [new file with mode: 0644]
source/steps/ValidateNodeInstall.py [new file with mode: 0644]
source/steps/WriteModprobeConfig.py [new file with mode: 0644]
source/steps/WriteNetworkConfig.py [new file with mode: 0644]
source/steps/__init__.py [new file with mode: 0644]
source/systeminfo.py [new file with mode: 0755]
source/utils.py [new file with mode: 0644]
support-files/Makefile [new file with mode: 0644]
support-files/buildnode.sh [new file with mode: 0755]
support-files/desc [new file with mode: 0644]
support-files/uudecode.gz [new file with mode: 0755]

diff --git a/bootmanager.spec b/bootmanager.spec
new file mode 100644 (file)
index 0000000..8794128
--- /dev/null
@@ -0,0 +1,84 @@
+%define name bootmanager
+%define version 3.1.15
+%define release 2%{?pldistro:.%{pldistro}}%{?date:.%{date}}
+
+Vendor: PlanetLab
+Packager: PlanetLab Central <support@planet-lab.org>
+Distribution: PlanetLab 3.0
+URL: http://cvs.planet-lab.org/cvs/bootmanager
+
+Summary: The PlanetLab Boot Manager
+Name: bootmanager
+Version: %{version}
+Release: %{release}
+License: BSD
+Group: System Environment/Base
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+
+Requires: tar, gnupg, sharutils, bzip2
+
+AutoReqProv: no
+%define debug_package %{nil}
+
+%description
+The PlanetLab Boot Manager securely authenticates and boots PlanetLab
+nodes.
+
+%prep
+%setup -q
+
+%build
+pushd bootmanager
+
+./build.sh
+make -C support-files
+
+popd
+
+%install
+rm -rf $RPM_BUILD_ROOT
+
+pushd bootmanager
+
+# Install source so that it can be rebuilt
+find build.sh source | cpio -p -d -u $RPM_BUILD_ROOT/%{_datadir}/%{name}/
+
+install -D -m 755 bootmanager.sh $RPM_BUILD_ROOT/var/www/html/boot/bootmanager.sh
+for file in \
+    uudecode.gz \
+    PlanetLab-Bootstrap.tar.bz2 ; do
+  install -D -m 644 support-files/$file $RPM_BUILD_ROOT/var/www/html/boot/$file
+done
+
+popd
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+# If run under sudo
+if [ -n "$SUDO_USER" ] ; then
+    # Allow user to delete the build directory
+    chown -R $SUDO_USER .
+    # Some temporary cdroot files like /var/empty/sshd and
+    # /usr/bin/sudo get created with non-readable permissions.
+    find . -not -perm +0600 -exec chmod u+rw {} \;
+    # Allow user to delete the built RPM(s)
+    chown -R $SUDO_USER %{_rpmdir}/%{_arch}
+fi
+
+%post
+cat <<EOF
+Remember to GPG sign /var/www/html/boot/bootmanager.sh with the
+PlanetLab private key.
+EOF
+
+%files
+%defattr(-,root,root,-)
+%{_datadir}/%{name}
+/var/www/html/boot/*
+
+%changelog
+* Fri Sep  2 2005 Mark Huang <mlhuang@cotton.CS.Princeton.EDU> - 
+- Initial build.
+
diff --git a/build.sh b/build.sh
new file mode 100755 (executable)
index 0000000..9fe81cd
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Builds bootmanager.sh[.sgn], the PlanetLab Boot Manager script, and
+# PlanetLab-Bootstrap.tar.bz2, the initial root filesystem of a new
+# PlanetLab node. For backward compatibility with old version 2.0 Boot
+# CDs, additional utilities are also built and packaged as
+# alpina-BootLVM.tar.gz and alpina-PartDisks.tar.gz.
+#
+# The bootmanager.sh script contains in it a uuencoded tarball of the
+# Boot Manager, customized for this PLC installation.
+#
+# Aaron Klingaman <alk@absarokasoft.com>
+# Mark Huang <mlhuang@cs.princeton.edu>
+# Copyright (C) 2004-2006 The Trustees of Princeton University
+#
+# $Id: build.sh,v 1.5 2006/04/03 19:40:55 mlhuang Exp $
+#
+
+# Source PLC configuration
+if [ -f /etc/planetlab/plc_config ] ; then
+    . /etc/planetlab/plc_config
+else
+    PLC_BOOT_HOST=boot.planet-lab.org
+    PLC_API_HOST=www.planet-lab.org
+    PLC_API_PATH=PLCAPI
+fi
+
+# Do not tolerate errors
+set -e
+
+# Change to our source directory
+srcdir=$(cd $(dirname $0) && pwd -P)
+
+# Source bootmanager configuration
+. $srcdir/source/configuration
+
+# Write boot script. plc_www/boot/index.php writes this script out
+# after a nonce check.
+
+DEST_SCRIPT=bootmanager.sh
+
+cat > $DEST_SCRIPT <<EOF
+#!/bin/bash
+#
+# PlanetLab Boot Manager $VERSION
+#
+# DO NOT EDIT. Generated by $USER@$HOSTNAME at
+# $(date) 
+#
+
+# Do not tolerate errors
+set -e
+
+UUDECODE=/usr/bin/uudecode
+
+# once we get the beta cds out of use, this can be removed
+if [ ! -x \$UUDECODE ]; then
+  UUDECODE=/tmp/uudecode
+  curl -s http://$PLC_BOOT_HOST/boot/uudecode.gz | gzip -d -c > \$UUDECODE
+  chmod +x \$UUDECODE
+fi
+
+EOF
+
+echo '($UUDECODE | /bin/tar -C /tmp -xj) << _EOF_' >> $DEST_SCRIPT
+
+# XXX Currently, the value of PLC_API_PORT is set to 80 by default, so
+# that the portions of the web site that still use oldapi can continue
+# to work. However, the Boot Manager supports HTTPS access, which we
+# want to remain the default, so hard code 443 here.
+sed -i -e "s@^BOOT_API_SERVER.*@BOOT_API_SERVER=https://$PLC_API_HOST:443/$PLC_API_PATH/@" \
+    $srcdir/source/configuration
+
+# Replace the default debug SSH key
+if [ -f "$PLC_DEBUG_SSH_KEY_PUB" ] ; then
+    install -D -m 644 "$PLC_DEBUG_SSH_KEY_PUB" $srcdir/source/debug_files/debug_root_ssh_key
+fi
+
+# Embed the uuencoded tarball in the script
+tar -cj -C $srcdir source/ | uuencode -m - >> $DEST_SCRIPT
+
+echo '_EOF_' >> $DEST_SCRIPT
+echo 'cd /tmp/source' >> $DEST_SCRIPT
+echo 'chmod +x BootManager.py && ./BootManager.py' >> $DEST_SCRIPT
+
+# Sign the whole script, if the keyring is on this machine.
+if [ -f "$PLC_ROOT_GPG_KEY" -a -f "$PLC_ROOT_GPG_KEY_PUB" ] ; then
+    gpg --homedir=/root \
+       --no-default-keyring \
+       --keyring "$PLC_ROOT_GPG_KEY_PUB" \
+       --secret-keyring "$PLC_ROOT_GPG_KEY" \
+       --yes --sign --output $DEST_SCRIPT.sgn \
+       $DEST_SCRIPT
+else
+    echo "Warning: Remember to sign $PWD/$DEST_SCRIPT!" >&2
+fi
diff --git a/documentation/boot-manager-pdn.pdf b/documentation/boot-manager-pdn.pdf
new file mode 100644 (file)
index 0000000..19999d8
--- /dev/null
@@ -0,0 +1,567 @@
+%PDF-1.3
+%ª«¬­
+4 0 obj
+<< /Type /Info
+/Producer (FOP 0.20.5) >>
+endobj
+5 0 obj
+<< /Length 2168 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gb"/+h/D%+&:`#5Tspj`W_&pKLLF9uCfn0L$T\B<=SG'RZqXLH4c)enrUhC(g,qDo$>eOg(//%`G9SKmM>+@OTt=W9I/,o4Ou+^_'J*9$;&BIpqB?9t(NC#.s#>g3)J475jCK(gBb;o)_/GZu%=&^O:Pp?tb,EI8bM*dg,$s6)X1m_$\=0$E_e[[`$[rU%5K^U])l-oBmX;aA/6ni'aE&%b8Dik>mPm9dPfLW%:G$-n]Z'6i;Y?]ZUe8M69UqE_`pNYAlTX*HPgr0@(6W;)k1E3:`/4$ckY>)L7@k3%;O++<b?f;#(<9UH/2^EXX3*,Wb(J#RY>!#:dE@9r_j2ub21,t`-3D<qm:B4IoQ8?ZN*0YE*H,Z*LP9Nt;pIX<Hj/e=aN2%0S>W;c2;dg]F#mLb[O'0i2b-Z6=C2ipqSPkN<QlisX_%TNSLeRlKW/;:^5(#!aW9Vb$P`JEODR5B0knP<q`;%&>pBTA<GhO0M:8=Q/ok_^isk>smXTl3NXtFbY`U]WB-+^oj(bGAZKQc"-?tUgR@lDQn8[8_Z$Gu(hoJEM-f84d^:JUa%!:HWK>k2&o#uDfV?;2n+-^_7Ve\b]_*F?`=1j.qE#thCMB/k$(K:g[EBSPentCF30%!.2;!c)0/0d_O@8asM/efUBOr.#0=CcuLm?%FU3'DrH;7tj]aHl_D-mDNG[#1Zs.c"nL!ob"VSF>YYSS;+QNOl'lH/t)X*rS/7Vl>:<.?SCP.OrV7$2gJDh3KhQo!KZX<0ZQ(_]t4)qD=;-S7J(/o$UDG!gIbclh>2j`]V!/V;h^GM:gjN7im'sBSLl6>Z"9t5jEplYTMe>qcE&V/?_Xi[p[4(M`\GA"'/[b1cn.Z\UEG!7o'pa)Q@E\,8JKtS4J(qY.jkP854K-HK5Zs8i.EN."bA'?f"d*>3R@!"`E(7q,_;C9$_@?7ilsJBqRsF-qPQcV=PC3[X;&t#s,Oe*dQhokCj==V3<_N,=kbd/5<g^S*/p>WB.Y"*dQho';?2IE=sR`;rES[)n"t9-*.-fD=Vb;ifQ_PrD/*/9tc1P.Y`=*]A%?&lGa0L.6K>a-W)C5B()t^M@)SGo#RT((@u)?@ik'-^mPBhniXO#Js,ESVL%<c7o%_"P@:(b0^Rc<b_sL>_VmA?fKRQS,)kS)H7Hp\jH2rFUmY\TB!N&t$s\F(AeYXBOjb8Tj`fVd_k,NM?b:RDd:\.AH'FNROVX$@k-mbVi=V,5o&uiHOPW;i#TP@K%!9Bih%%,1S2Bntj,lhQrC8E'J7Gt<FP0R+mdP+h(1#D-J+r)f7im(@(90XN'1kFmar[?A8]iSX$R\0\R"HiI$CIUL"'JB^4:[Q1FV>ouEj"RP68BJ-f,\-0:rjp#d=SGPGdk-hrV;Jn-9$l!e47D<cj[u-U_UM%geg9-m2d%si=669AQ8gooTFaUF_[`8P>&.9*fgNk6nm&/8Q[![W!/Z98BXcf-ET%kAO^M6TER:;eu5<Ze_shV6S4%Q5eToGA6NO%;Tm\!eiqK=d:lZ_^>I:`"%72?5YFbsP0F@<3bnph4uY/lAc6^Aqc"nNnh="p`7QQ^&/C&V88)4PaXkG+6e/caPu[K=l0K.eXepGi?HUdKcIB]H2aCR:c3j4KZSd:KotttM!3YY.@do-ETOBhfO84E'S<fDj?C3[O+*K^U5AYDHH`[QSULGi#erg:>ZY]<1*]h!nlXhOIFXXT++TbS]7&6)&FHYU`-a9V@=#K'>8IORYYSN8#'>)H03DpH&bVR+ErE,!?OVsj,FLSZVI:hsQP]K/`X$HM5dhZ_2C<NNY*MPtC\>ELioE&(25f32]c\t`Rs-"YO".i(S8!UW-PTF,Z%a?e6/pZYW<5_;I(p(5Eo<M4%9I9>j*W9dGV9uU/\Xu`(FaRu$V$\WabBR]#Is>\'d7&h;O01<kot""6Y>5m5.DNqG"(=(RLMD+W?Wd_&a%7;[_7L^r@qV\ZLle`p-:VO"_H:r%D[#^0r1u/qXBNNcHDW,"ntYkY9gqk3Q'DjdgU-#M;69m+gU%#/Cch9i!=]sZ[H+2pe%-G^(e35-EY<Eb!X6k*8_U%I(P*\.'Y<`"LNBpZB*J'lY0BZFk9QoLJ<GSLEfo&h/-h'en=a]h[cT4uR$1>[rJA'5;R)[aYVKoJ&#ki.>?q(8euaU6~>
+endstream
+endobj
+6 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 5 0 R
+/Annots 7 0 R
+>>
+endobj
+7 0 obj
+[
+8 0 R
+10 0 R
+12 0 R
+14 0 R
+16 0 R
+18 0 R
+20 0 R
+22 0 R
+24 0 R
+26 0 R
+28 0 R
+30 0 R
+32 0 R
+34 0 R
+36 0 R
+38 0 R
+]
+endobj
+8 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 497.439 169.43 487.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 9 0 R
+/H /I
+>>
+endobj
+10 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 486.439 182.22 476.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 11 0 R
+/H /I
+>>
+endobj
+12 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 475.439 178.88 465.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 13 0 R
+/H /I
+>>
+endobj
+14 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 464.439 262.65 454.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 15 0 R
+/H /I
+>>
+endobj
+16 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 453.439 268.75 443.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 17 0 R
+/H /I
+>>
+endobj
+18 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 442.439 182.66 432.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 19 0 R
+/H /I
+>>
+endobj
+20 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 431.439 168.77 421.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 21 0 R
+/H /I
+>>
+endobj
+22 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 420.439 197.1 410.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 23 0 R
+/H /I
+>>
+endobj
+24 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 409.439 204.44 399.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 25 0 R
+/H /I
+>>
+endobj
+26 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 398.439 236.27 388.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 27 0 R
+/H /I
+>>
+endobj
+28 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 387.439 230.71 377.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 29 0 R
+/H /I
+>>
+endobj
+30 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 376.439 192.93 366.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 31 0 R
+/H /I
+>>
+endobj
+32 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 365.439 210.43 355.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 33 0 R
+/H /I
+>>
+endobj
+34 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 96.0 354.439 182.66 344.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 35 0 R
+/H /I
+>>
+endobj
+36 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 343.439 175.56 333.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 37 0 R
+/H /I
+>>
+endobj
+38 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 120.0 332.439 172.78 322.439 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A 39 0 R
+/H /I
+>>
+endobj
+40 0 obj
+<< /Length 3413 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gatm>:!_S(&\dF0psoT;C99/4D@]/KFO:fe2F(VW%o!SA9\XA/d1msShu<J43(%[69?s7[I,Qk*LZED<8Uku1oA`XB.c*R_e][4%Ap&P9Y;Ym.=)fu,MbOi'`Tj:+rT!:k\pLp_+k^aarjL<8fug';*+"'*]-u^e;3u\_)`Cg*P',>W<O2%m.BEHHR^4A!qA9WCqBGe(!Y_P2F@nK-s$=,q;o;XNA:%7OSF:/u9df7NX+.9Yr\BVZFruccC&=d$jlRLD_l8hOK>-T9M[5#im8I@lY9"almSLL5UL=-\l+mUaa+ENYU(M$%3M3(o^0TtMkU3Ob$KQB/A4.C9nY^I4HS*Zn:^so4+rPR=NMB4&]2,/P8PqnQ?+)1_DdR$ZX%$1er&QV()E*^e\rCl9Nf60CP=E565dl<"_+?OJ89XBW+P>CmfGbM;ICuqu5@6B9Z-H)T)7r/cOEA6nc6M;;7WiNib,grL%#<'("_D=Pkoff33V?!b0:4DXBOpj?*+30Cm%`rs^Y`2i(hG`A;Y;KWG?%WdjWHp<PUG#&@h_g*p2,^GbF,hPdA[(IlOjl>VJH(7lWa2"XRZc)GFdtcqQkUsT5;9'P"co'C,dC.7-X8e$X,S/Zjpa9:&i-/Oi3Y0,g]XlYal2fKUQPQc`O]`^m$<EXYb>L:FS/Wf3EGcb"$!u8.VeoS,7/+h.:d';V+ulDf/\CgO5j4AV5qJ,>W]t8AdEN"<G.2S"(;7-=t%ZLUu5/3@KAa+4.26IX6nA/roWo*jPm;DP*J/ZH6NO3+gh+i[dZ+7.G.M$V4W^[_.!#Y&+rNqoR3mhd,/)O<gF!,__KtPGMVj@%sfhjBQWOFQtD<[]d`jJ\-oIFE).nAWe?B[$cmOo'+h<Bc:R%iaf"37NM7f:7<U5"YBlNN7YD9Q1#Bh>(a:X+WPDj)^*N(G&?9ZZBbM]fPejAPNAW\<f1^JJ0W5)\NW8fYR<H[T#9HVq&mU4LB':Lk;D%:oi5N"EdKjPZ(5f7;A]VM]4QC+jTXEuS(4D]Y5`(iLi]f7q&iV3UK?'FmS4@QOkF>/Z'?]i@SY"Ckf#Y"k*`4:[o&_CiR0-G/fmV'n"X/C)&.e2^;hYCG@8)u?W]*e3@/I";VUPqE`kl`=4pa^D^I$i$%o#'iOQ9C2G?'M4=3_qjAT=4aNamg0]Rr^At>>7fUu_MK*\Jc,HWO0\6`JNR0"Ig!K/@M#\e*DMG<`Fd<F15klGGM/rK7>]3P#ba%$\\4"P&*Q@f8iM@$1<gYamV7l&EbC_<:(5"eu+?'Js[L^7H;)0g&n*Iu:GL3sZ\NrU'1D)3UlS&Z"$)2+bm*T@A#2PaIB&2'R.8J^&g(6&6F?U:I?dMc7\@mmB=A`5gJ-WMVD.\aIajC+rfd6]/1=/'9<#6u,ps'S)`k-SXoGT][I2&;,i.R@FuPe*l*fg,h\X"<SA=Fnu,!_HLT6,;SN$e:VRL")LgcZF;<DtYD7"!WjFr.^([O*=G5e(2u>8bPH7hhDPdQu-@:WCX=Ai0-HS:->.njHR]H&,o_h"75$<VgOR;emq&B!dYChXOa`H^ZC!J?=]]s)p$q(indld4'+LjFB#.(NC!#%1fng&+E+">SU2S"X_GhuCBW6i%u.=(b/gl,+f"`F<SKRg"bR]dUFm]P$kdUr-r0"0p+US"eU2M&jd?q0W'u'&`FB@9D;BD*i>i%6BX7t<$'C0h54O)lN@UhM:\rT:2AMd9%ZI5<"pQ,/?dUI,L5GnL,H\&#2eQ=nE#Mhpdqkf!:Sa8P?<X^f1]d(/o)_=E'-J._j7,K>Jiut6M\O/VA4[RBY$]@;--rSi[BNub&])U8K^uOK.)nud3Lu@_AQ*C%"p*TMYh_pu2@1B,7"*("8[[!b$O]l/H8HX_@L#`0^`@_d7i9K,NapE1(X`C*GLYE'.e]^)K$@")`kss/];p4!lNb-b1,MCtm1Wk)7n.:QTS@^dqq9f$]8b^-op[ZFI,P\U<q\V`[hd3&APqCO>8`tV(/@7L=iBOYMP8./_g&T0mQY"PZ_\*EBeu@Af6<1tV;<aCW_JbuP7jDd7@MP;;ogdbR3WH6TH-9mZ6,'XJ8\>oKE/>)Css\]K3uYI9E\K+<ABf[,?1U"oFg6mT'-ZIk9OI"m^8-3Q'10>N%W.-#\bn#68VV]AQBq8To9$ZQ1Y)"m\K@-dhrmR1as:N(6UY9=\'Yc6I9#F@@`s^h,oVb#ek[q?g(PI[R$(/i&lQ1&##Y$Ef<*"p[cGu_N\\aE^FkI?p8BLG\%4!O)%o]P'H;H`_4QQ[`OQk*[FLZ2n))?6@uiCYQ3*BI<QJqQI<jd#@fN>&/U;m_jF'D4QNUUTsqm^O?"NL+q3XfWA#O-=/YncM"7FD>HgerRZbq0FGZ(n/Fb\b3:h&3&2p3Hh2e.M`8c0=OT)51?oY6'*R^K=N.dC09c&t?L-Cdgk[Jca3>;MdXtim\(-W@1Es$=#olmu%f=e[o?Q>"Mq(!.pP(_"+SX8f2V(1[9dMs>>K^f%_0m\j)Jnf]1nESaE8nf?]#W<m*.3hXg`SEY;O!E.r3DN=&+En>eD[E*Na7cG:CJPliHd83U>lRUtbP]P?Q)ILGaHu"B9>ldEOMOpe&'':LU)$_]\7[osEOeOnaoC.8mG#kb+X-K&`1lj?37s%\<o"IcL@Y+!iL4V][l6(F"(OLNjA'6LZ%S#k9Tq<eN<Va@2T'=gd1]>ZYT7%27/k+$cF;2=*Di-EN8H%GS[)0Q2J@>G^.?'U6!B>"DCgU!Dh:W:V9cp3-7R-ec7n!Ng^PbB9!)'pJp1Ath!pb"G.J@94,!E'38(Mul#/Lk#_nsuYn7t7a`rWj;,;M\EM()jCMER8j,aWVLd:`5e<P@f,jN_Pc=Rlo4!cP99#3O\LDj*7)MQ/g\4X`K#==/c]OAL/S82.rnXei;pm/?[r6#-BEfgXqnh@.dm3u_e"Bp(L2Ad)SB1(T<PY[YU+pIY3^-:gHIXopuGGAq=6k*pmn*f:?rnXVUo#:C4r,oMK;"G_]hQa0.L+[i>kYLn>lmAo1a=4Y',olb`cL$$h1"/"mT<b8RTO#.r<*uh]5qO$F-&$2G>Tb0AUl_4LnPP]f<rj3J.Jbu!!f)0rkTK$rf!s?:"Y6qD`=`cH/Ml.fQHM23;jk1mj=G'dO5\L<W2=A'YqO#;3cHFYqa=\l`1sYHHN\Vr(O30keQlG:4g7Kb(PeE^HK-8TW-X6r\MW?\JriraL3Pgo^8-5a+9h3fYl-57R1,hJPmU)rQ.VrhQX7Fg2SC/UFFE2-Yk@"EI:_=6F,^!P)ji'$ig=:mnlNmS->-<0IA>TPb)AIWa[3iA1V;ptEoPY`4<2W/Tt32Jh!dRP@#T`=DgRKD:_Y5/2C^W\WYjT6MP)G&]ZjdXVc(;hh$aqAe)?!&Kg6[Xo^_.Zb6MKo~>
+endstream
+endobj
+41 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 40 0 R
+>>
+endobj
+42 0 obj
+<< /Length 3473 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&9on>E&\ZMon83LPO4KIYAEUB7lt/?39Ep[jp5mRNQ=u3*(?pX<4l0Nf8l]C:AJp])JHk#,8/@-&@^5-X0COAfAN`[&a^XsaGdj%\hkdZ5qb(R9es#nR9@A4];U>@8ZqT?&5,?lCkFXq&^HU?&pPqPVG5GINKF=Q_//[pRo[PA7Rl'\#S[>LFk,R-mf@N?'V2oF,eOi.pQNYU>YssWb0hNN];i=ZorI4J/fum_mPM9p&0Y%pjgeVMK`jKY.>151q0!\?0)r\4</OrLAcd,8p5/YN2/Qcplg`BIP7a<)L>ACE-af(nJr[3DVU+i/#lK>MLr7di@Bg;U&S>CLoD0lrd)U,NoJZE@fh5n;8AA&JZ4AU.dF>\FAeQa?sact4CbXYg'>"7"SdBjKo%WVG=B\EA3p-CYnhpL`#s7nDRiI3hp'""r9Q>WEr$jmnL,qOV\Pa]!?T,Vj6-"Ab:A]a7A6Y31<3NM[#$dBSJHG:V&o;EV)D(AAZ#NOFqNV+PL6Ci^:gi[1u&C'j==R8RoSX%G%o/K<M\a<al=_ke!IcCfjcB;=Cd5?p5XIlgZWT*XJ5+*+&^"nijcbnHr4'[QC#N26n&.N"l0?Q<l@#74>\o0VH]a*+.g8j+V5_YU,]ZPsd2YqM?'^PjU0b9#G2quKDn=K;M\J7jFrmQ%.rV7:(nn0;jo[9nu-48IBAPm/AJ@jTQP0Mpi>6,caLo]c8-*ON<Y&=C]#"Y6l!4ObfVKjE'b-lHlgCaUgT5*[k4"#im%7"p,KT1<)XGp:A<Z3IJ_?/q$4Ja-$)tYA0IWqi'ZeOlUIGaB^8E.4I$9UF*B4l>3Y!EuWM8$qsYU?1CR6jScAbq/oGt("i$ttq\o9<,P-H:BMj2UoUUK2l'A`&P:G8_L$iKK[1lZu9?Xe",OVoQB@oeK`!cDA^&:9VQS8.6^=8o;1KPPI79:I)"]=6:$p?a<-H?D$<;i*m=AOi&e<>.(Go.c%\8p,9i<aFVHe;:tZ/qC7@]0)M5_/G+^3NN?0V&Tl"K!S4!DPa2jX?acbbs%p9DB"qHpAjW_g8#7q\6>A1b/-jXk8*q8H6R=?^J&=H;FAct&i^4Y7CK>=XE7_%_7H`t?@6In92$)[km=Ru]e.5NIAu$<Oq<_c"JC'2f;o%5ocu][5SIk;&)A"Qo)c_ecJL:Z%m+;<B`NWUPM?CFk?R6Q+PMHW&2$FfLRJ+!=)M0O,_k6dH`doARQ>PF#PU742`$k`a`N=dIk6H5'X*l[PT'9#j&"Jh>RT0RXkab732X+UnB$I91G/1[31YLd[Z0[cP7pV:sN*IK'r(on@$X]E_*Lt$#Al^u/:cd?s[8Xpb9+H!M$%^u2jID'cjn`e^<p4^%>\1)+^hDlZ/Zs'8+`]BGP+Y$+&FOs;NAttArgOjl0fDH'`^0c%hj[Y,M\161\Qn2=f(%jZ<VUeOiC+%SGb-&SHgcutb2PJD52'Ec&b9oK_Vi#nKK2MR25WH96ti>[5a['iIo:!+@KJ&SA^ckS6KlsS7B%tgl4heSVP%6\@3#@Yl^/7_q#ZHq51]<2h?XjFrk>:r5f^p\l!YZ-1@TnFq/CsS.)3<S?.BR*MFa32qp@6fUs:%#+N'G<egqr_#RsPFddRVI9F/HRH9Aj'lqJ:%_2X_m+m=#_lm3&5I`F,Tqi!TZ"FH'aAM=Al"9B@rT(r]YdfAd-VF=Xl!%I#TLhAVH*1*?a/4C-7O'fEj1<Q7PbZMfO!oC?E3<G#f+S5qb(8cbF"Z)ug`EkZ/rT0>65oH<gb95BY?0g*%*m=U%>SKSQ1KXmV.)&*pSR-rj+Dc>lFO^(h6Y5ncLtqYUqGL]nJNh*.;'bM$]6O%M@J2\hja>Fs<lU)!G"R=$(@sr9\M)77Lo.,FgH1-Q!'7hg+u>$L*_uFne5GWM-L@V9Z/s@J8mNY4EJek3&`6a?RAeO21DZs;&O)6#I;2LjE]6?""gE*l:<63kB/>'YIfZ25:>'hBG6%iYalC#R(+#0=2ag:UQ&Co$gtiqf2AEc)k<@Sa3%!;MeSCSYLh.*'3ZHB?c?[RP7g6]eGqbJ<;uiG35d8]dKCD@9>!YUL,)H)S0:iC.&,fW64MP/tLhW9E($e*J@B'qbUC"VE3u5V:'h7%a\EQiCkW&Yc7jGMU@+U^kONJ`hhi9H;fMfZl*"56@EUY<+P5/W;i:oRQDjXXO+CNN$C\ga"&B[e@L`'!kq9+h?0htl`?,%,sQr%oW981J1U.'?IZ)ouP'Yl2m>8,D+49Z(p&"GVtfEVP1JAg]#7kU[AjB-BidfSKh:,[m>r-tYlYgkM#>]]<;h%r!?re;9PB"]C+0s2p-n&;Re^5e&uCG%="B_KkUj&(6%+Gop/W/J<()n`Hm.#.[4B_5C,kM2nN^E\"`mgr\`P&m/f+).@FmZ,P2gb0JE`80\o=btf7Fs93RMC;/Z"K]4]O*?Q'htX5g;4-\3NUS,2Lg'LDOk9@!O\F<h4(2u-TAf<to@NL<[4(.H0>&r:B>%sJ(M.(Km=%O"Xr`rl:[35?e8GuO.V5k_`5Pi;0gCj=:M1\frB$\`quc3COcq9ELkTI\Hkm.WTeZ4a`#0ReHmU(sDg`n%c^PTUQWbGVdAD$-dt)ocT[05UhgKmq*50KQ55\X)O)LY<9]LtBGWk&iTo\*VdbW&_]F<*f8B3?1T%?F(E%NFe,S,b&[r>f(&s5+gjQN@BI"_iV./nb7B\8<,$o,RRJ'hnr5Rn4:X,X\,0oT"%7/%WIM5a9_e3CSR9e:\'?':LVeq3>>!90e7E$5sYp\>t4<"I]GNuHpgU*\^;0QC5.eIQbdjd_]df.tk?11X^3QG8-n/o$X.g'<e>HNP7e,L8n$r664FI+)Ls\k\Zm_LUt&&74BY+@:!9`./"W\`GEV@N(uflN6"<$rS]'WR/B];4C&GmKFP3:pK5cMp-Po%bqk'r[O;rYPetB@aXdXftU?Wl!q+DIGj433(MfE<VC9)OT!fU@qYW$jTm8]QA%AuJs`mLXD;#slHCrQkJN3rs#UW5%Z5_40q?Oh<T9hR5gp!e[dn5J_ClV87"I1DNDGQ*Brto-dY`NY>Rq4#Rg7nT'Q8Eh8dmm.Y*i_&f(]:T.&q=RMGO%u@;=G-`EIt,_00WW5t*Jl\.t2p0StkYY]7_7UVTT+7iF?LGV;h@/a83_YR>JurqA03jZiPsSu@Moaa=<BB8bQ(GePAlU5P0!n'Z;fK-^gOVR=V9r5E^8_]A%q<KO"&N]R_[_0HT0a'D,/KP5HQ]_h];'XA^fNj08`CT!*a:6)r+Z+p<;=tQp4&KQ*aK1D>+lZPX1/`9\<Rlo@j/)7^(o[a03[l`!oN!"Vi4\!LZ=rZml<.Wd\?p%6\(LnWd*K6A!pBPjmj]tAF).W])Q`/"s?M\Y1T\LQrGT4up31AG1p."o)<poOb=O6Qg2Z0*4Kr=Pi[`:nUF6\5gg<Fs1"dY'XZC\1baj0nd&3oscjkYCep5ckL~>
+endstream
+endobj
+43 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 42 0 R
+>>
+endobj
+44 0 obj
+<< /Length 4016 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&;30Hk%Y!lMaP4-gl:p7`:@@3t>AUQ-F:Z@H!;E_(%uRJ76>\5cacji]5?XdMg8"$,O;E>06r=@]Od(?@I/!6qc"sr3(:2'A]>a./DE1q2hf?/1%E)_\b*L$5Z'kR\?6;DU5-?e*7t+eO/"]#LM/_Z16WR._dckCE8KnFT2R5H'<gb9t<ReFj10?EV%CVh3\hiS"SNI5/&h\<B4'`MNmt+1Vb-Ph/gB6VJcV/f`/6i![p?1f4*trL.pWKKeHPUMGa*b]?+D>o:crCnaV/Dg;)J*G:-`ZV&E)NgC:._`8P0ldH(#Afl\ureM=_1!JS-m<V?R9@H[!>WK^>fS2g/$sB[tqU,oi9f*%kFrnPL595'%FrHJY6`gb%a[:ZlD(N36R?.'.ooaG\1apj4G<O)nlB]0:^pmL\Vsq>%t1Oqp%)FX]=bFD%ZFV2lo;X@E7b+\<Qiuql%5=XY6$1T<ZI<mdM[s4X9`[[V%om:_-G[#s!^rR<R$3&h;S=\1a5FWSPZCS32jXHc-nb)O#fUjfFkE;iNVMViR@m^L[#,\8r/95n1sE8#5a+k'!0SZhG!4E0a`8UAV:oPP*th70McY6#Y\2"E`mm5FZ)Qjuao?TMIgaFbb7;.s4g=!R@t9>6:h<Zddq&j9o0$**sJ]bZ(U,rmjNf?Wi(I^j?EU&'2@q=V7@jS^]cE1B,81h'#3]#lV#H+)C\H%]WS'2J8Ns=#-QTZchRn&0-!rZNZ?N%(I]rD1aWU*EV(KN-hBk/,_'%[H0;]D)j"]M)0VFS8Ur5k1F_U^8eVk@:J,QPmu2A>K7]*':_@q,L12FNGJ7_(Q(k%@'eb/%BT=/^1tKK+^2;[GD4\mL?dut6&hF]QC[!@950@KptpK2L>.r1mqgu7\dC+%KIMn8_M+K5ENtNccM<JbKo>@?a6C$t4*B@VP\eQD7DCsU5.WD_&j"k()*Ds!Mr;NP]@l7b2s\S'X=1X81Jod1<;]"1qtc0J)0?I8Z+FXjUkq=Jdatq!=:,0$7@(WPBd(Vo?#5h`eVZ[i]E!h+[NX<(JMVhbF4,fiDi]t:r_3r/.S_2,@-@Ilf]Ia8]1X_)fFkKrcDd/I.PPERdG)"qS>.=We,)oO:ENIjj]MADP*Qf-3[rSkpq$&jHh+s@iiN[5H4$12`)(VQ!s(M3[%3ImKYkD&a9?p3[N_2@6Au-,^f_V/;.YuFY;Y5=gutQ\=5+;9]XUL2Q$k6q[,)Q4V.*Q@g(jU=2:N\!Am%HPK=rkE5u;7H;A]VU/oVpmHN4jZIMXBR<cR8@;Q]i*/#!3V!%969&0;#UE.S@N_oS5dDHnqT3>agF2LRZgG:XqlW+WH^MT'$:1eoa'OslU.aL3j6,5V"hKA2<;Z?M1Em?m,:I6/=%St@_Mp9d<[0hK'dHLQTp$":el4_8-oit"p*;k)W;UkI4m0s1J-K%_H]%MC]@>tXI(lmdH!>8GVBE_&9-AOb::YbT!pLJ9l:/@G(.r5(Wg(DY6E6MtmMhA>fQBtqF>'N;c]he%fFI_YK*CfcTfk`1M8N/*3Vr.0<LLgWE:qdd7)7#]7-.QGjEE\WuMc@(=*MqJ%?Oi!MtHltRa6i+n1%$I%(VNNnpO>\+0Xk+9DG4Oqd/t"VqK*uS,WO]s[WnH%_q<l36/s4Z0Cja]ubV3>H.qQ[/%]"Qi9C9.7N=O%+PKukm6m_iA_$khmSBmKi[b;0:Z(Q_Y25jonbBDW7L_nZ^!uBR=ZWr]68&CU;f-4S%[#J(T]>2&"Qb`@=+F9XB=V6QT!04)NrW;&_Ohm7kOhB);VLGS"Mcd`O"-Gp@BMP@*q>hF*5@JpkI<L/Y>SUkO83>Trl`6'9NWF4ZLEZ.LBl*T_9cA"SFsB%Jha#a:gX5^W3,<#.d4bnE<,1d]Ho!CPWnH%)l<G)k"B=FI\U5&M<F!>jOmr#BpP2PJ=?h5TLk`/IU4Jq1?atBDP7F`.4O#e)C/d8BLc#>%;@GQ;'>R4jDl_IFiD#9)J9^17qH!R(Q[BN-U^/CC#6KV_;*G&H)M0?RNOW4ID1+!;\A1p5(9Jcnc"'AGD7a8S6iV?!-4n_5]Bp!Pqfs'1<UJ.qn;GU]\Nf"j7bQ9>%!Ye,)#iOe<LBtV[&""G4mNiZ.gXqh<Mu-:QuQj`M1LmaVb_r=@b!InBB2@/`%0(tM4Q_Hd*tO7EFYbr]*Zgs33[D:hN6/X^*'QA2C(ju]\8+YerFI3,=G5Y*YPun&=(!>&/B+b7^u/<QuLI/A@7V;?)gqcM:&MMA,6km$C>(e+.Gn^-C_+bdSGOK6>&pXb<;i\QT\O8%)NV4Z6Qq$/ubW4dTs+9";(Pc_toFqPF=4f#?\%qP0im'+BAJU0Lc)=/8MSGKS?KHcpuAUYUO5$jrC15$'jHY-7piS(5f'k*pRj?5J4$b"l$BO:M4i)6?!["'g(nS2s`60oB\=q510;<Nm;NuAV6'-`:`$i2.(f&g1SYGQ7l\Q],:97]%@T7HWX['f\,?:G3jllQHT5%68,Bf6$hKRb[pWr]^c*5)eUQ[UGOr\RRT0WQNR?jg]rEDMnbC@l$YQ00rY!bcW4?i_!U+iE2ruABA)j,mML!o2=!X@S7H>nXH.'YmC&f<VhK(L=58Zr(`m$[&oH&q>bVDR'0pap*,=02.9LNuh&)LkWTmVN*qpD?$/_hT>nOqp%LdK^9#g6@6_FG'FkJ_!G2SerN%lmR?jPnkW7!un:_I4BM^QZ".1_qY5STe>XAH=`KVKI;grJ*%QTonTq_UO=rX!UD#\dAP7CJ83JOiTn(8a4ZU^P?SGfj)T=W,<@=?l@)'R`*"73@o\X.,'G\ML>t"YQ0Sjb49DQ#Ef/V-9C0Oe#Q7;hRcX9!-Yq*]@+p$JC022M>>71e@B7d^'YQDSG2l+jgk[-Zt*On"5EuDWF:)d7&5'UELU^'RV,;&h:Pb&GkCmiq_>"L/c0W^+`SJMg49oJkPF=]Paqo<HNdW"bH(%EBak.p7)`mN96A_c!Eeg^_4<@"E5D@,!b:DJ<DZ5.+X\;[M.aCAZT"8N4%.:5#Jjf)oH[rToe_/Gi11+V^9\BcP$.V_7p:*G14MR.-c%iBK47ejGS%k3?uCZZGT6)oi0EP8kg5@(!Vg+U=PcMU;*Lh\7D9j'T!sZ)%,?bU>$$'+HTgjPqeBVO.r4/7Qkeo0gsd&5Rc@4B^CSLAOaU7#gBiqjAU]Qft@C$np+0ufURtE=N/6N>MF2EK444D<pT7bKhVsa!+:'P#>sR`#`?%eOH'2r_[mS+K8SAa%-R[3fI3A:M!tufd(V3md1<&hW3g(jHL'ZZ[]Fms?IU!9EMoD<gp&OlhHP9>ongGW[XfW?k\7"*[V2n"j[nG((nkQ:K6Q.gG<hi]N.eE0>]Y."a9Y!=?^8kr0.nDs@0PSFW?O[c<JTo"<KFX;LhE:,?GV6P;(XD$7K&]UI=&T-\TBXYX)08#DVcdW54h5<ai5^Q2i=*JQ!8@F!q-'pmg/o4%ZLdWTOrSknEB+"4u^=$j-McW,O_l^k)C35,tRER&jH_Ud@(H+V@VQ:b'OLEhIcZ#;T+;Q95aMT-@d6nB,fh,W9$TRG<&jK"q1[&4$I>g,mB&Bc,uS0$m!UM#sVO,qg.h'E2Y,a_U'F0@IrCd(Y["K#l"EAA1;gL3DVXIL95uuOs/F,]N)g:I>8,W'n]09303ickT*YT2P_hY6#KpYZ42:MiWSbD6n!3,oI[c=Z`]4jB/:mnLRCQG%s/X7UqT75n<F7jgG4^j@3g*2An0Pu/t0OPC)Q:"We`?T."3IE6APFqWR/`ohL?V?:i0A0(`eRV]\L^l`'HaY:*_C&0T(6t=b?44?Y@AaV3iq>o4*GmR7.0Zd/:"7f`OD#VPbC)Y=E)spIYbBm;c*1Ii5to`q##]fV,qjCX$pPJsrs-_E[i'I8k`nB/sc=)&"1H`jucuYXa1>+EE%cWgJF9(E-;"JV(juJDn\MOb-X"D,r2GZXUV;^A;:^SJ@g8GBK[APC==Mqn&__cPQ"9Rr._4+$PY#!P^h4q]CU(>>b~>
+endstream
+endobj
+45 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 44 0 R
+>>
+endobj
+46 0 obj
+<< /Length 3900 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gasas9s<<_&`)VOGaAR&e!%Q6[qj\%:%;C$_$/\i[gPg&D;n0^8kR^GrUm!H6dAMoPJA-_Tf=#%al24*T:Xp<qK]PPj]T<B3kriSQZoK!mXD4UC-@0(I%@ALMl11WmgoHprQj0;`@pQU+krVaVrQdSB;:q1l)uiG>b`"sZtfIPdcXSY<V!Rl+Z3t3W@A'T4o(faI4n<Ph6*cj:>8b^M^s3kCFq#=rEs<0-ssn]g(Z^98Z1R^_o^i<rj_aLip#nFhsX@AkkePm(Z/nim@?jBj^=Ajib5uHj56h.[_4^P*1"W;D2^%CqMhIl=hUl#lBShLH:o4XSb*G=[mSb]RqU4f#u=W6+1)_n.D,SGGn1qekTNgQhR);dgUdA`>;?f8"sGr4,jYpB;;nll1JMFca<218<5(D[/U:D";[<l$l_ud?<(tP`0fU]>'U>DZebb;?r\>;oZQFKi9Mm0sdbFgWn#H=8\uHd>!k-*dXMXo@ARXuQFM6,)-1E7@,cPd1j1"hZ2TOIlC)k%sRlju1GJnBaKcL1_UG%6F0h`$I2q;8,_Dqb8MM='RrCrat<\=4*q91U"HDi@N&[SBsZaP!G]/A+i=f$OWNp<%^bOXAuH)q<%FaLTN\)(]^6ZcHab+<mlN$`nV1\+O4keq1F;clWC.BDdJ_rmmFhoG`IknLtZhI\i.cpH9_Ij4r:!gLm//77P?A8;Ffoh_n,`i\X%(5B%1Y.PIW4g_$`JH4RK%iU-h^^HEFRIXuLH&D?>p1N"$N9j;$$7]pPK[%OX9+otq=)Ij/D#V#>W)ZEp[9!uIj;SD<f7c&P;1*][L]\g%='-:ljBPg-B0:og=gY91eecjlHh@Kq>PYCNr]%N!G0XI2++G2a\(Rj2(sm`/;&WN6dY%4*8bbWZKAQe[gKo%s5p8nR`@/>9f$-1d986:u7P0jQe-_XE5DTlX&5/9!*L4<.YT,(-9RhM2c8fFEM%5ANjl!RmT',FeQ#_J,f1B#FlBQmU5q&>;"LuP@a@)5D<]?4cg3r/)5#Kq0EaWg=\/fK7]&6t)\m+(s9^Tcg7b%)6P*;KrG[^$M9)E91Fuf$^/^[Dadq6)`;$#5j#dnC]":#(Bj0@rc)G?eF@+nJ;>r&+R@*8eCMAWR0eAH/2b2:uPDb,?V0XMRrh)gnd`kcC+la0Ohj(#>]iJtJ3Nt35;+g1%H_h9`u_XkLgR^!L[]9W/_jCaih&0(`#L(PVs8u+B.#9>Xq]W.7uS?<P]YtVR.s14Md^qdL(h\6,p,WVQ#a^SWr)`L2q,b$TX[6bcqkK5ZE8o9k.,DU0L(/.JU1tJtrT9(N<JoS_X[:cfMCemILT)94*mD;k3,b(Cg1(<t$6X"c)!4ZnQ@E5?'A3$V_`Q^H/Mmn/0brE'-)3jb"'V%ImU)kWmSDOabaEELGYk4>Tm0'Z/Kt&3O^@I52cG)kYW>-HBNYaDOlD:^`5jD@elCK`I@bm;L7<1H</GYD!<bTU#X%.j%MVXYl5@t$\-/-`)-jF=ZLc"&H#e\f?:l/N&hmKlt,-$?I!:CWf2Vr[E#lFt(Pk4tl>RV6]E$u6&(!%CrT"/>+1HNs"/n"4<]-Ie\gDM4eDjlWEn!S1Cb^!nuo\)`M/,q]@(=;M">;OI9'UV-S&06HBJSq&`VBf:*"o1kL>qK4Pc$`<LGhfij6qj^=a'LJ9s.FAES:G;6K2o6&1@8=T@;Thd'ClSXb_5cO."uVC/5nB!+9=G9VEC+g=!K,42*uhoO^.,m.OjHQ<>\5MVT,Xm:+_<rJ2m_L_2j<lp-o0)H$D"b>9`[!,9T(o@8C=E"qfE'$L)8e;8nkXQ&u-mC.$-ls4nN20Us9FXoRmE=V5V*O:ifu0L2kP\Oc=eIZ3pBDq$l3Q46^fKMQ+W6h"k3cuN8P35&B]<PtARQ#2Z36V8L*!t?U\P@7>>Kr9aAV.cna9,\Di^.ghk-E9F\-DUcf69m95-I!MoZr5Ut3gABS'.`Q]2eGHiWaK6u=KX[3g+`ebr3MlIcS<b7;iW_Bn:Y\ln3XZn3/qXo^n/\M@=LU:bJ(NmMQj+KnrP.c.m670-F;RGK\U]ommhmOBfY4fc&#<N\=O^&3lSMD!.O0?3*\R^$BD)G4nL2%g0U4&8#<)R&K)(L:ZSMnc^s!14N_DIdtp16f2AEom?foW;Y6$34J\:12e>AJpU'$m'[oq4H/B[!V>!Hm.Ysqs+km,L$=u.Z9V>icIA.Q,qRlrh@j$lT#`M>@VSAbjX&A,eM_Zlb`KXs#K=Ac9)[p46DLqRm")b0l:L#m'XPs7(f^+HBQX[2crfgZbDHC+"1#AkH/W'_mEMKd<=VA6.Cn75.jS>J)S^1!'A&M$C6^?:!Sn[%onnY\_qAde:?U&HEDZ#:7DL?()/ielIVTn7jj]h5q(/cuElu_%FLW,%Pmf)$!cFT+mMTStTCjitJjda)D\lM\_DWQno^ZerH,Opic*stq,!Vc^^ngsnB$,HrXTsqp]Rb,T^$h8SJjHD]A4Q:D(G.j`&*A/.P5_k+"Wdsmj3Qroq(nRKrq,,"f]Ef0'n#Ybrnm+@A+p(8'5;BXr[43P+#i$#*-E>(PPN<;`n2(<Oj>5P'IFsWg"#7q]"`M>;Zm5u.`m?JBYp@_'Okc[9n5cY3?\/+]N"A2,U!st1#bs(-Gc6cGDCbp494bTK8`5JZI<!`SqS?7qBF$+io0I!%?^$i/aTA"0i860)1Gjo,5X@Co5D(A_(p?10GWcL;!A>4iWXni"OS#W_alV/b4?GB<?/YQcYkm/4KTR-<?R<J=;3O"!m8i;Fpb(rWqMql/#KWH6\JR[HckZJAMTBO/5\GW^hX\D<-LA.!+;_H@&SIJFiOL0M8nH,(G%Z_Ycg5bdh0-*jeLM^h\eBbZ61/6rc,ig+\[a`gr"Sc72GjV6L]X1'p`[@AJ40,p"<:-T(Icml1L@"\gV.e;Q4N7d:WfoUE*Y.c2')$XODioL,9-T=HQ*OBY3.pP5*73tVa-i6Nho+g/]g.G1o,mO&>.l*_P8YLgSlAj#Y-/Sr1n;.V4De_Vna-&;ZtDlVHgRNiV>uD$4M(`I%:"@Uc.ql$dGL3rM)&kH$*X:;jM<,#id;YOj:k3KJ5&P&n*8`loJPs?H&sUFdgHBrdr'F["G[f,YI%A1lZ8[7/:]t$UTL,'!Q$K\2Ss0Aun*&o?_jt*[PdWNo`\'n,u(Lq\X(I>`NM(dfsQUZX!l(4<$R)6OenCS$JE<#+u5qL/bRBO@::`M-^O%q7LPYs7+Hh0:GY*Sn-'_J7.L)r/+XJp>cj#3%$R]=@]CRs08i_n#0O_Q]3YW3+#q7A@-Q>XA!03qBjjq2CHD";+X0%`j,=Y[5Jd"EsgN^>Se)1nBCn?&_:S7^QlZp'Rjrt>K0V)rXU`-?/Vu&p9,Bg9PiiKXH@[\1-=SVGC+6'be^-r1-^V@[h#U1+Cf]OR,F?mj/WpTOYG;0$N:j8lWW38!>HZ?gd2:eB;[=$SQ2E&O<CJ=*iA_I5CtY2.O%-_1BP8toGG^oAtc$]cg1#^s)PTk)0*Tmp<E@K8<!M\]C?V'5Cqdc#UG0*'7oTJB(>md%)j@$9[W>`c$Hds%(TjP>i!XOSG%WId!&NW+m(PpVf<oIK,KjVn6j,JF3IO>C4\VVdjN-fDg8bHD1LVMF_R8fbGJ*rXW)Md!GlH00(O%g!Bl$3!m8#o2uM2-'m5sfKh6M#8f&km;eS"gi`\(%nA'WtKE^\k\sir6^=Eh#SO^[T"/Qh`HMB9$Pj+6Allce5k7<Q\m*f.O8?N.%%k:^B#sB>DA>P#-eO7irem6>g<m&.0G=`<-i<p?CAXOb?,O9bPJ*!G!*5&"93[c6;:40S.m^#9p>9()lB`]@)3jiq.qfbmKoL\ZS<W,:lic5hh2u1H^$o`;V<4]sXrqc9Ts0OfWKE~>
+endstream
+endobj
+47 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 46 0 R
+>>
+endobj
+48 0 obj
+<< /Length 3575 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gasas9=Nq0%c5g@aDS$'8Z1pf1V;C)dk`kZ+18VjWEBe%gi1CoDM)m(p9,Q'!XZoHmZS(m+@$4sPFV*6hgPEB[j[/7Z$=!q.i'f,b!k9<je?",k+Pn\b/<5L?2XH1mQN29gT9#-h;Z,]8g6<%b9`:FHR%N"LAE5$2HgX@2YpnShWcT%(MS\F;bb/rZAam11e%nR@1*S9[o%.%@OdmbjmnBE&Z'6qIJ71?^W#eo-L09Vb+p)Zm+]ZW>46jbqrb"87,]SVEbfecM5JZZ,!Z,"_icEjTlrQe)scoIWC0u\3t@D[ZC9K=%s'Pslu<&MG(2:,oX[m7UP7YgPdRSoo=?ss8QX6k<jc"Zl<YO8OfM:MIIf@H17N!kPIEb<fAGcc\iHV#QMscTSG"DEJ`=Hr#)HS&]-L:Y@C!HW<U%edaqE_MoG=Er)^[?\/C$nc40s/Zm7e*uM.*3f:8(@emUeBo%37N"X'9"4j\56)fOrbVqn&3!%2rFmPK%?GnM^,seNTB;&@>ML3/q1mb;d"'@?\dH1qN/75lp!^I9U;=%?jD%!u]KqJ@^V>2Aci,g1SF/FiPn=WU8Of&RV&IH^u-p:*o"QHtSM<g[+X7D,D#5k^A7&`i,461rn90#r^$GG_O(m[f,=*.dqVrNb4]cd#N-dIl#P2PC4%,gDFqF]U-B7buijDj6*<^-KhBN=AlCO9,a><qso<K0aJQb[4>BEncL[dIGQXENk;[TR_SMCCLu6j;r/Nq`T_J*VKEO-C+q7ui]:Knhig$f?[X>+\9N4__dko->u`hGQTSg<>X`/#gCj35Lq4#+brKE^VhEHjl=nW"luh_+gSA/"Ro0Z[4&L+JhA/^2c0:tE=`f;;aFY2ue3m%`j.R,:^/a0_hf;o#UuTB,3Jm3sp"->2Pd/MLl,hF(4V;7iSK,KOLsGnQaa#%-XE*a,bIj/8S"sU3*X?>@2%T\I"8klS%=:KqN;f2/+DYN*J4Ne9+-a"8H1\,F"<9ARX8;-QUCnU*4f$Demk\)hD!>EX*Idn$<t8?XOs@582UZWFkpbBq-'Rg3Kb\6/j9&\h#pjK"/cZ)m*8!64*s[[;"$$r!a3g1Y!XCuKC='s3c!`ML#5=jOF^Z?N!>iZ96%jiuja6[!^/jAH+$6^O`HX+MW?oAG;2a:o1FRYGi4%0K%LRJWI_Q"7JAFQN17)%?/rM!DR1#fAZ%KT>,0O@Lph](GH[?n!pb2:i+_`ENM@kYZ*!UW+`2KLCs$U@;E^(3g0?uo->@rKm2I%,7)]Q,_Jl8?F+Cq_R-pgjZb_-A4j$l)Vb80Bu?$78M0'AQ<></Ws?>Fn>O8_+I0>#]u35IS2"7P9EjKQcgI`:&O/8kI*(#<D_VpD!ZAB:fJ9uQ,2#42tmLG_910pA<+REC,7AbKZXkqT&6mr0Z<E_!Y2[o&TI4Emh;b:DDc,6u6U.[/!V6aJ\^)'C,(%"pSBlp3C<Ce-YFc.?Lt>k4c]IR1!2[UQ<5^o^,H#jD'C!bH!`^63u*!M@U#@S:*lik0c8),_[`\ohI9j'c3kB6)'?!gmR_o(V!(K@X>6=lheBS[l9`n9J9IRYY@\b.,ntBDJEWNIP`9VUGG8Cj-Em<4O0BS>Y&JY"!m^%b!KT7JYS*?<BVY/mYl5,JBJ/#f`NB.H<D8h1#9AZZ)>_qWKG1EJoU[hk<\?O8E2kZtiM,@<L:h>Gp<)QX+uZ)^0Xpq?6md@XLeq(RLp9=g""6EP"8J8m(WtK]Ls('>6$*/*A;c+q]PW8eTM^nl=q:c#):f)Bpi'#nlSmnAKmMO@#V7B?0*jgK02@9/c=?TN6X7N>^,D7)>13X'A40`>ge3R-8P@VaGKaDXVmJItXpWo016ILrQfl(cRCm>[C/pa@@4h8;tsn-Q3%?@lgDI@Gi<,>JpkTINZ933L)TB3E8LC)VL&-:5H0\T3(Z69o;tZ"U2H9)E^*dci=FD')m&p?G)e)b2XNB(stKdJ_/$1ZUSc"3%eG)Q-89"Q(1PC2fUO+&0o"-`f^nQ)/$Ul+JB?N&RT\Kn[.pf16'L,G#pH<6;S.?EPDlp-fdZg@[os3-p^S/63:TPgeG4NN1i.qY&!$)V(/i>;TH@&R0W#C;2VcpK%Lu<0S]9)$B[m0>-`-]'QGD\6#gViKuO'P"Zj;g'SleK<'io`M(ihDpqd,#A9A^HTh?dg:cP8I]D+ifQ4QI[[9O:]9CY!4[AE+nFMV]>M:[@p*S[;B\SO/@gk1c+6i[U9qH0@\+0FH]Bg81.O,P=^[F;TSeh7OHb"Zk$8PaP-^nmOjVQjG(;MB-,MBKYZ;`&:pCC=l`KGs=f9*ns9>Y&=GJiZT61KFaMgEdNcZ0n"*"QDk.L8^?LgheKhq^LRKR*gleQq9*s;*ep6`M+qJ8SI+I"JV/F&1Z-A/)!']VK2[G\h8h\%_*2Zf,8R>TYX[&c2@k7U8,0ph;(^,,IeQQH.eeL.gN*6PG&lrm8sb,hnWf>0#;8I.I4JkAa,?=]]b1XU]UGF7q7@E--F>M@U:*0hF![_q80D,s!"dm?pF%d]J$#SZc'@o<XmWBL^?+NoH:0U4?F;=6Jr7H9?+S;(<ah`*S>nRL:_`DQd:ZTV#<lTQT6-%K(e_oI6,"p4g'-q^$^i2gm*!^J=K6$WRIrX%,&3Z.*F=>jDNNWlfBGb.H]LR2c+r1.?#F1lJ)FBr=cEnZP0^Z?J/ul1!lfW=-SrgH!g^5L:0bF<^]+2LSSkTlr?!J&!U$KnE#VS6E-V?Y"l\<B1MH-5eE-3NZI`?^ckD<K.8-%coD/i%ZW<\MZSH:MW_Q+)%JS6=".61V@bfXmZN)RKhhp8:0fPaKF/+GDp@9WBU`&<B*f\fYTAO0N7idG`qBgI'^,f>ekh.pX*!i&(g-8=PG!(fA;^+(#`:<,[8M,/d2!j'MHt%abE4m#2:`i3^LEHU#(@sV@.)=pD(""Lm0g*g[UWfIhddJjA=9c2`?1Dge)(Al?Z3JbYU!7#]n8<708,9M@"ih8N8YQTi[%Z_oUVIuZp;8/2FQ`81POei[lMp(678:jY2lXOaa/W^fc_MM!F5AD,<fNHRDtb+[g]+0U@Ma+6C3\)&Jtgo"Nng\`=6J\o[mc^nBR0SIh)S2QXKbb-J1gr2W1V#CnXrQ$:HJJ:^pV=DKKLO>pU[9'=9a&Sdg<eJFgo=J2=ud5R;,=MFQ,a)HjW:KeF^e$+1EDkRF%0@oC;RSELdl_4<_A2ol4CEPd-$Q>N:dMB8:'BpCF#]a&FuZ?Sb.#ueBHGjMGKcEmeS@;W\5T;W&ln7+\]=3Q8Hs&He<5n8;T77Ic/'u0VHH"Kr:RTpJnW\$J&=@cm9?iTh%kQI"J'3uXj09?*D<L+'jaasK(UkkF^BJOu9_s([Q7RBWHK)TP:$tgHN0-JaD+HU2b,)@fH=%L1"RA=snZ2^HW4)U:f7)Ac;60\D"=ala`AYo^%;d?Th7C_\ODg5!nZ8aT5T*Zf(rr4q0AG0H"(O*4c?L`!q^FH]F;lI$`X$#5V03C&YMJtSiR:ZQeXa;;3DK4;MkPn8BU("<@f6f59Mb^p0?_#6o'H<B>mP_*Z$Jj`mIm1XE5JAliT`~>
+endstream
+endobj
+49 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 48 0 R
+>>
+endobj
+50 0 obj
+<< /Length 4422 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gat%&99\,?&\e-Dka$4R9T+4tp>erR[8(RJ=7,(UZNl;>7]>95!PjgF^HNdP)r7/8]2H5uqZJa+#1*m25<lmZprd980=&R`nap]RH[_=-(?(V+nmtVb(<\8dboZ3(*tNYjIeFplDL2<@ZO^HOfOjsGiDQXA43"94jg$`"[#M)Q4*J'^U/!5]ri?tsis#!*Q_/DqXci^aVI]1jogO\a7;$bkLD0/dc"eNg7DHBliNMOs1s_nopTm(sO8iT^Xa?%D%scY=93_[E4\0+&C+TOiMDM$)/Oc^UgJoQ;U\Lm*XB$sPoiUH=e#qeQgY<a`pZ+A@hAN;mWcXn/r9$M:M\fqh9IjnnLs#:ckUJH3"C%p6ok`E"fT"Y1+Z<QIh=u%U]2a=$2M]1YhbO>UN%%SY+@%6)JCt82Ah8aHf(5+^IU'pS40D$nRAT1L%uq(I\Fl7<=uVai)NA^kTYB'e2R&"pS*SPjj@hOs5+L/P=Aql$hFHDB%CFf"gY'JI]kU$S0u`]\n5_XBh!"=nrEN=c]Ki#$onU)@bd7'Hp[W0>_:AlpNU$GP[H>5-Y]8P%/CKBjg6"3h0<CJ=hPTI9l8s??[GsEQ$a7`8Rl]HHH7QP*s(5"RD05rhFh,c'?,(Tnr2Qk,VH*O%3eK3Q%"N+hri3&-kbM_MQHR.R,1@K!7WJ)%dd#]s3bhfu.B@D9kAi&i0c3[NPl;F;efV'pTU1+o=8JEVDp70P!W18M0!4eS!K^7>ml0EP/cP<GrqbAV-eE%<>K=!2%W5sfaYptGFOeLK5lq,60E?63?Nmh55Fo'j%`cd"&R]_]T`%B6-h;,'8&!M>'gZ*(H4MO[$1YEd'"o,D*&\_8BVCtqcgnj=AqP_6[O\mR@Aj@@dEO.F/Ja8@:q#;VEG"L;gc8q@>uQ"'gk&!K002(J>s7=lfN<UPdWNO-$T4q=*%caA=T4_bbie,D18h!6:J"g/gGqslGf0%f9PZUPgMGEpE,a-RXR;':aTS3RTa\@<b2G&#)$L,d!::j.Znrm5D92c1h6A`S7hOfM_4m)J@W5($:8?c<l#(fhcLcm=qEMs<PPHQgl@P9;_Q@S@CtUZNBk7\LEg^d\!Ku7h;nD5YUUF;tIQ=5a.&0i:/LA;6JfRb*J<0IP`Glhc7ZAne.:OGQ`O)Zi;7)=lD)Ds]N+Q-0kH,'&!%eKVe".mIR*t5VA%AfIVOcLS_3*L@I5k!QH'>@Uao[`ikPosf#?*2\fMRU0Y(Br,e^[2eR'/9kkE38^L4b!B?*n.W1um/^@906`*Wq?l_N%2JFAONdXA.10?MW047NUThaHXMZ'RirdlN>1=WX,lH+DefR)&&:o#Qm?R!=8*\*CDeIB?H'cjST#n6ZRt?-q(X#%o>[\c\fK@`h+-rg'G6+F6`^0+bpY]g"/IrWBZp3P_SlOLuH8_C[d>&fef):XJ=ss11Z@^'>L/LhDAWj]J4s0KehbV#YBStNoD,mYKqo-9XH,b?dfTD=pkJE\F%9#_6Qd>`:].c:IR>_ihAnmYd@phE3'mM[A#dRCDiDb(''!m'`JCdYS&h8<;L>#Z/0FNphP6,+nC=DCbL0")&9tc9!qG^K9QJUpROcBAO:Jp&et'$BF)c\5Y`EWNS8mtB3_ohe>NdY5bB4s/EItodOE5s@H1r3e8`uM\@J#f4P"9XGsOg@-!W1SRSa?'gXlE;JuLeI'ueOl5qdbk(bGj,>T(=Sa9[53cNNou/eS^`DHQ&\WG4)Q2o]:L^+b-uqkfLVPoN]bNabNd1MN.&`\`?D;jpp1>[6S=A3S#`YkM5P#1pD,5%QFEN#9Db`kt]N'^!,7`J0#mN9!n%'FF?3OZ;%&*fl$1F/2."<fSYh_ut.;=&.!(DhYq'BL'VH*EB(='/C+\Q@EPB/skT\1"`ibb6:k0>Rj64G9_'nK,g=A!0(e$';\k6KFA2m2aD<_:eh4HIO8Q:80g_O(S1(:j`3mlk!_qJ7ZR:bIWH[sWe:L`K7Pa0l"WhA\\,Ih_dT\kg0O>Wob/o-ip4-d&5<SKB/$FfU8pcM76:iW#Cu(O;W.Zp5eME"?!cB`+=4Te'8ZTG@Utkm!\g@*Y+36"Id2sR=`A?W"ie()B6uaZX`F\`+1&,P@Pl=\?=%\)1hb+3O`4EZPQZ2M?`Ig,U7fYnXOYL-Lig`[#uo^9_1Y)8BGu"T/P$M-jQgJI-!k'VP^ZW_F?Hk_F/%%?Aa<qm]#8/(7\*,I'[pZP"T&MfU2Bp*]=*c7;i3-&TMu999KPBQN12^\R*=s<GK.k>2!oEJYP&4]rVCdr,<?I"5W=p$BR3g)S7RAmUmm+V@W7!=>X"o'"[ik,Mr^jI!FZ%81Ek.QfA\)SfOc$$&L*cXc4Cs?PZq@,d/ukR6^_G"nc)aB^+Uujh2;8"%tNIEb;#iZ/Z0BFd^La@W'd5j=2bQ+'UX,Z3Zr.WYsllX:gA?;1`F\#1N]Cu8eQlXDPb^/J)0@@[XFB;S]%.u40:c=;``ZP`c4-\0SA+SIr@]L7>TnG!!j+/gg>G4a:"+:IeelS"u7q*5R`u3:r\\#!)c,dPRD+t4eCVS!XaLgWC*B_k7bA`YEGSc>PfTLI`Km=3N";?4al,@Ag^[;Pje^jdtJZ>=$p__#uuIGqe-P9$"3!'U1dI5g#@mZSftXCOh,h#nf;:g5/6i`3`\o!.K1>&$0gMW:3I/b0h1"eVA\2>1,SsaQA)S4=1dGVbqU6fdCi(@pL6lb\^GTuE\!Mo#%3=b#9_'Q[-E(_:Nnna&-&1@P&hguk$cf\,O"t`e.XB&"H:aY8&@*ZGr9/W,L1Q$IS)VH;iekno-mP,Fn+J24cC:fk45'?"p:jjMuS5PGRc`n->ZFAQnZK;ggW+skeigopWfl8*!.5/H5EeYj%//n3A)CrfWL>7"$nhY!$RNt+#%%fMK9mcp_"G5dt@%V#)>Ac1Lt;:UT]qt[/Wi@R0jG'd1tHZ,DD7=%=(hV9C\^nE,54'=_X.sI?5?2XYl#V(h1_@l!I%UdF'</Gqg=n5D9Zmci@&4o<TF+&[npihAAD0]'+^m#)b]uFan^%D:4_3:(TV,<]jo%)\.D,huKp!HN=-@["5=^SG]#RjoDN\>mE@W]")L'$$";1)Z$658:F7M;37itEAU,7Q(gRD/+&P7DeCo'%e55W/`9A\7tB3q!6F=YoCS\Cekkl'4jYACTN>DA-lX6n-RE.Q%Wh7:njJI0#%;(q77i.Z"805b2_i8mW;^'KW70Rbq":ukBDR?r.DDWL:'?%B6L_=;!ufucKZ_#cgXjeQCj,fF!KW%aaM7E<qT=>+RBb>bbG,YY/IRI[Wl4j(iC@o0[2'"lL]"KPe+<*reL]ThbC0E()/3Zkm$/2_ZeoXc/*h!Tf-S&qYfT6<ri%G_Q&,8Qb^_EqgO\2M=)Fr)o0Y3LWuOnd"Sk]N#I$nRd7dtdZ%U5N/cbE/)\VNg-7);nnc7Um',-TR%=l<(K`$t_!:fXi"=:d?gl=#O:tg>npedJjh76\3pKLZ5RW\/3!+ih@73lZ&e;X)04C6D$!+(o/[fZ*lZG@=bG6tn),bcaC_^?4I&!RSb@Sl&[EX:$+CDu^bqLc2s?Egc]e)LrMQj$.lp8B]lT@2Y8_]!XqZNXHoOB-8I]+LibV&3J0"[3N47PVa86lQJ`'5a<IJBBhp+.oMJoWa7B1Zlo0/3RSF\"g_d3[*Tkf<f'J&:@UaAF@+^kM%l[Dt$SU$ese6GT`ed'gR"3&YVKW`#j_F9`>+^!ruNGW2!lH.dTm3Cl!G?kSl0D;7mXoX1!FB2i>I?bn.ZFd,-J7^u3g1;nCV_=;R>1p,?_`-Bs]BGu$mOMb\U'na::*-BOtRd>Y'FCHH1Y[*=A_G/N(;*8O&mQZu*Eb+`4\886Ij<`\lh?dm/"03K1Jj"a$*_k@BnT>_?$"pVi,g!Qj9V1K_<bmu(;IK]G>g]5!`p],:as&6lE_\2D$&A^I@(bqD?Gm!U_LT%^R\Tf+V7p1tW>YsjZ;ma]D'nY9=(ZE#PlV.VpH#TT)T=Go4h!o@b*p`V[*#[*;P]cS4*5-1(;ne>,;IV#HY`qZm\l%%cU-#sS6$T8fZq3:pqgFo^$VGdq";ijM$3B7t'"dq@`3b[<#?DF+Z[Jpf:.+FDc3\($7JZKU%)"S.TKNU4FDLUMaCi-(S7tq?2!r2t=)UdQ&+jan[*P9S:S0-=H4"t6ml82_T!^p=83*X)Gu[jhLS'LfR65n*b"%LY3Nfr*A2/W>"BVNtn!W^??SoGu<L!;1b[lK+TQkrNC;:fR$fET$ktSJtZ)IG<H[/`0$uCj,,%1=&A,ti8LN9c=F7I$,1'Ie2q"D^MPo!mY:Q'@T9kk3nL7+Ec@sq?aicDnm55!#7IVRjG1(^smM)psg&8EgUZ^o0r*bArJ`3K*b5J8!Hkaus(P/po\qTJ*\3=dN[LDBgfq!NX1='@]~>
+endstream
+endobj
+51 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 50 0 R
+>>
+endobj
+52 0 obj
+<< /Length 3411 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gb!#^99\DG%DL/ln?KUJUU;.ljjl5QbtMkre!9SXP63Da2JXR9dDOhdY7F'P![u5THVAQ^(\*Uj!K[9^b+B.cIen?'Lf_u=EB&AK/^>Ht`QJ2!M`;Tim4*tPmG^qorq3(/?M9)#?@5GtRqT-Q'%CaA\S"(Zn8UstoU3e7Pm(#4dh#B5;WZ'n/O=O:e>AMuP+]8002.7H<hd#<'pDFZif12SRYfBsR'>JLSuP_`r:\>k-q+k!o^R4sn&=8S'5MIY0s_Xo4Q'"hC1Q1pg("]8[9nFZ]HjLW<F*<NI?_:H-6p<rW9m"YSo&7`,s#UZ1b[a029P[QbdQ9<,qnDXM6%YA*B'SoMrC%#/>,Gh>th$mMg?Sg\4GFYXbkW7]:%eC!K'IAFPEE_G44fL[]!S$:fZjAW`!t5SB`g#`N]&mp"h<5hB;._BFqA/gSH`47r-TZgI`:IWet,IK]+p]b-9:L`U,?WD1AqX1L;Z@Hi75So1+I]&Qc4%aGVY@=piI<(NEt`(6q%hD:aTh1bHbY.V1VYJo!B7?r4ngcSk4P]i"Y.*eT[d"ie-M?'(-9N$4ns-Ul0_ATO!.=;#^V=/jk`:_F&i=%CrJkpP$u$5po"0Zj3hYm5i^fU,le$+\>a,i+Ir)6N;7-]+VkI_XblSdlRh6:+)nRLlGkOitip'Tc&$gH4\!H87B(gWl6PKPhUZaL`2E6DP5-5asA/0&h3',m87`4-=_cJSd6I`.R!8Cdt`@+j.d+1to`EIgq?eO)-&faL4"<<QB..;2_;uMaHbV,R^$X8RPY<Wk'pMC+&@cC@Dpt*%)sReo8b1dpo=$:b&N^O*#^Hg4mD6'kDd@.Mq2GG%^bshU(e]JsoVaB3cLR&o_t+d]-&s#_l6GUH;;*+JgGa/lGDup`!cI]\bKCDuK(>3DEQS_g9AZ0RIA+-*fk/#)A,RNk]"i$.bK4`TYFDE6%r,Z0$4;Q&RlPG-Z:?]nqLu%-JUe@q.N,W!>^ee))`B)-n`:BVho0]Lp3>/@Di^j:T"&C$PQ^j&&)8jVJdfAH_AA3iR2X#]rp.>Esane97.GZ,(aYhFGu0L7:$BXI%&hE:T<JF:_?4ZE@p9Oo[T((-mC?6<jH:8R4T!Kq&RZ0e@[O@)HeQAf=.VSpOnu$KK5sd?V=3OKrRk=3=nW2:rCD(,/b5LW&kGZKiF'BsM*VPgiMKK*2aQ\DenCh@,VZ$5#QWS0cl*W7iZD[!]&fS[/(T;g:R]MeFA?\WqFb]*DdOC5,t90nZhBqrP'*U1'/TO?$ffiuh3EVS(MmD?Jp(N;&4IDl,0jb'df!crksZ>Vi+^2rN_4STV;U3E_Y3n,DS7F:iGdA2f`Q?B8qYW0*q(DR0O1Z7'e+[G==LF_jjr(NTuJK*fVKb0%5pR@rHqE8mlsb>!q.s)6@'<]siVOR.0glLP$%Ni^-j#tJZcqsPrGY(6T!`&>qSQA!#%AH\VFgi*6jDW/VQC,_moE'YhF5AVI:^"a^nd_=D5>g:kkld_S2$IZRmjq:97#CpcJ_KGtS#?#'.j@?MaP9*(J+IH:4iFBSDMua9"e7`_D*(hlEken%qQbhTLE^>EuDt"VsMX"e*P$+Q>(@a/]5h/%?r:e^-TJ3%o++ZZ9*OUsJ0WJ':N/7D3fd)NS9*S8B)[rYb*T"!9]&`*d.r^scE'Xl^$Caer.Gp_WIE1#*0Ze3*-@lh5I!^#\g1i/0HOfF0QEm9;0&m,+]BRf['6sSl9/[[Z"[?oV.^PfCbij#iXPhDl7*n/?c3DqN]!Fu8/=6X!XlKTK\1m51c<pV)G!d#X5%e5"W=Ed7KX78FaM/BGVE@EDp.1H"oMSW2)\HIA,SZ0_A'FsmONt.[PU.7jUt9%:rXIo3#^k1RIV"AfcOW@IjVoNCA.6G0.CUb</<NiG'Zsg6ln.R='-rXTkY@`JNCK9YWoHa#6MCTI=/!T9\o<Xbj`KH3gQSOl)?R:o4laR!iaN3@HtRA";7]RaG@57;17DdQc0R^O7C7I[Mu@5QX#)dDASKd/#[YJ(dNDPPWh.[.ZCF512Y4:..53]o5I*Z=rEVThN,Mnhp/euu;s/&Yp"clGGIDPHTFL,3mBY8cnatKCn#u,N3F_5b(%!sXTeB-g-s`rUM05l5)uQ]A;/rrtC`-]QJ)hFYE&7d79c@K):s1nA0']kurMpN7r<LNqVH!AE)b*:T$f-RLT5RKs_C),QQ_-:4WJq7[=Y#uN#b1'DcqWaR)i:V3eEP55II;Z#i9W_6bSUp+&rF4>Q'crPpM0>J^bHi0@Z^0iFYP(Z^S4QE*n]RBa>rA2*@X5=;`q'a(SE!,la$?UQI@TAk4G#9Ki_J^3$,G_EQ/A\DIpV?4/@?B?p=Q<S+dEq33;)*:Wr&)*Y3(l1q\.>H*g^nU\\20\lml&j8R89TBnJ3IIdE94]^&gp[D9BhbOSGk'P3pZ<<Cbl(.O;D7%1\@5A.8#",UK2dI?\hZcR*-"i21`BFaHj%W'.@3Hur2OSI>m[5Unk]Tg"YS6(LbG6j9NE8^X3o`uFpN6L^,a[7MkM/3cABqpTN9iW:0.f8hL3[n)n?fbk*l6I2ac/9$"Gb1__2d7u1eO#&*C"MGj+W$UM$rXarrbY_AIcl]0F%k(JNm>rM7nQQ$fJf2oN<K9O+3']D2fDq/2G#o-A7+7Yc=MDT#N@.O2G('NQ=qVf-*QNCOC!/F5Gkm<E0\Ai7e[0e+E$<ICpEM6FNi1b)iih`#O:.m_YJKEP.@S`7j=(RI0K8a7)p[L7"@TL$XJE3fp%*Bp6[f`*PdY5@JTh+_akHSZCJp57FtlUoh2WftW\h#.f&G,<krYK\fAsX@>sMDYn1KUC/u2cLWC>Z%Q#>N>)QfLYC=?rk%QVF4#cnGo)<_=r#r,I$uBZ*'P&[`'-Kgi$3\DOJ&Ip%%LR!gLkE.&7605+VOJS7R[6YHkh&on&s\ZqE%nT2DubiPi05TB0p^ao<,Yhg:6kc%p=a9744gpbuHYij)ZmGWO!Z'n&?jhg4Gd=GPO4&!]5jXTA#UD>I*oO]RbXXa(N5A4Y%kH)669Ud!%:AFk\hla`3l`2]4;K_XbX7J-odJIKAN?!5l!4KRhgeAVack1",R&F3bh9)Kki$VBZY"]dJ'[=,,Zi&-9L]+k_-t_8_2)htf?s`@SRD:!J(^iK$ZQT$mJ=.nEr_'FirGN./PH^]2MJK`1a5cAM_>#(1Wihh\e";AHnO!m%u8:c[i]EPpp=KQNaSk8-$5FBegnEG)/'CP#&/).9BpoG\AYe$=8#8=8)<?,eh*Wd;q(I-b\G`4g1O$@caLg@WK%#P-/:=Th++X93KPC^W/da9Gf1Q&ju5\5Dn0(KrQU-i>_R_d[SfBqCf)RqTE;0=L4[/jUg%I%(9&;ipAFlDrZiDn8Q9ojVdi7A`-dnCPR5rrJq[OAc~>
+endstream
+endobj
+53 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 52 0 R
+>>
+endobj
+54 0 obj
+<< /Length 3415 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+GasIk?]uEX%Y)g&nDg=edrBU/n%DH$QeT4.e>4ehQK3U2A"-(0I,4Eg]UsYH&P18n;LW?Y!$R#WK^elZ2t&r5B'#ahbT7:5(:3%jEjd[:E`OeE\62d"\X>UPB4(,;rf]LYS_[?tgN*%aPX_\9h)"MW\$PP0a+#=tau6E3ARJE*0)$Y#H9?17eTG#2(Q1^>]DMLr9Xj<>a,#g\`J?]tdDZZ4)Dt&+K1\nY-'Pf3qDAo\Kg55so6E)[gF%"iZi1g;rUKFLQXlK=a+,$Q%Tag7^C]hVQTnF)NZ5@^/:qaYhJ$s@%F_u!C9H-GU3=HS.iTNS7^Yb#O"hg@0jqr0`lQLSHrr-jWA[1SqT'uON/4[CBp:`9j'__?ESj=96+"bdKBadma+35($)$uS=mO7&1oJ!H=1b,8+e*Z-F).&kVCm%#1.N/`H;WK/"F@5Yfj3%`R=OaEj?Q?(\^WrQ$\WsJ3bKF@2F2R^eDt#@,"I4$9oh;d*H+nI"AssPd[/^mFBIb8/lka:*_H?0oU:Dp3CqM/WHm[.%3"%W#3tfP+6H+FJq1Y_c8;IUl.DV1`3e]%a)t\iDfqj;R#N?'?6+0#bt1GiEG&#ood8n@Gqe[hK08!<H?gLtAq2f59Y1XghjX*mW^1+?1j`]FZWm`dZDJ:cK*qNHZb8@D*B>i(#@!t7iSVh$:rngrp\.:PX;bAqME)<o*l#N?ApV"/l\?a+!=ZsXe@.Nn`^mR>>qT3SpS.]XmHpG9m0FXS9gEdN&eGk(1/ajSG[rXhSEVS*5uTJ[%Pn#p#J15uI]Z?HL\4#U6;!l;-=b.VCL>_ZnEMms1niIY5hQ\TRf/b\O^r@O+hD5STk5<^2+qPn2=/QTZT;:WQ5J#NL59Q4"%Gu::=SH$Eb2nPB<n6Yq/F\u+k<Glmrl<[O9u$0C^B%%($U@4U-FX#d;d`;6R[G<4kY&_Q#$mfO24l%rIc8.gDI.eGld4Wh..]cL*ct2GfoEl)O%p'Vp9I!\LHgJ-:bTET-N9Qd<us$euWlRQPo*hm3n;h,!GS&H:N^;b)rpF$s/St+mAhjY4mBoKBZ^Ua;[U[_N`X>&p:k:,P.P?))O50aLfJQoU/9o]TkmgC/"9Ndj4cIL=,)#UQ`C572nt\a-"BHBF2r1F^3@aJ;3.1OZ'Jc%eQ_2KVKK+'%jZpKXsHAaGF:3Gsc-NbhG$Q>V\%`V7u$g`O$/4LlCYgVnI<`Gl=hfBb,Q@@)J/l\1k#4:1ej+*&9=cl<f[k.i4=dGN'/YXR/"%?XqFc5&ECjhn.MX[H*CcEec+'3#9r@E^hh(P.EdVS364DnqQm%l`P<I6@*6(YodL%P?62WCC]&:T=+aUPOH\k;jU7HN@YY>+u!peUJ.m;3FmuWBGiE:dE:TsokZR$#43).;-QVXocfn-iAa@+Ksm05o>\LL@hQ5p8%c-0JLM+h<VKp8A"8E5O.1M2$U=u\!C9@+T]CLX-`g9@2^]r5r"mh-Y<no0Zb/1#%RIbs;`]a!%%H$AXtY'`r22X=bL*GK1Ik'!(70?lo8S4"TuH6P*(L.+j1cl-dh,K#B1BlGcSJJ=cKu)XLcj,%EG+R2_ULb'3+NF+3Me8]=n@3J)P&_bs1iu;/L/.%1[I*J&0n2'/6!d['M(IbRgP^>0NPIqdB&S?5CT#Gr69SKOM,HRFYP<K'-ZdC,X<RBS:YKL6r@399sDo3pUiZ5&\sl4M3f%E@B=N4Emcku:p+YmK9r:[.UZ%u4[GS?P'4bO9M$k>T(Wt%TRHBt)&ce]8tZ%E'N%jD9:7+1F@X66pP-Ri?ja?c<#*YtL!#F"<JZ1EcXV(0&eeGkHar^LU1+n3aZ^u6l;S*m=u*"Z9D"&h3WVo$ROeJ6+tLrP(7BTLGGJX1X%s+</O2WgBgDg2,PpB`/@;:&rE4-)YAc)/`X0s-_R*18iV9:fXm&/P"mF>n$O$I=2%ipM"cIOSVju!a;@Z8dcV-8F;L"$sl'r@7C("W"dhLEDQ@j2>;>6Obj;/-:<;,<==jd-didUkheLSl(b(\C7`f5$8h<p.0Aoo9V?)XM@;]<DBC'fk`XdkSM;!.peUg[4<A_5=4j&C"J(/6V5P$.aC.?k&Yai#N@C>S0-DpD+DR+.gXRd9O5<rF!I2iI4\8+;WHXqPKIS+p%IF+G7Re[Qth%l\`B.[k5R_)obC11dC:45*rA68C=uk+X%7=UZ5Y.'Jh\4\9b)TgU!RMO7[f&OR'iVF+cI+;Z?"T#[3m=$$*BJ0<MC2L,s$TYHp?HB,7P[Z6uFYLDK7?tWg)*DYiWM)[6RO#bKEaEtAs3<`KOJG/9$U#tCKKXi8F79lmWr)'XgM="`a9@[TZ<5_K3mHZ#B&Sa0J7C%=P&4SSF-YsLmFuQrc!@'_/itS&BKb!@tRIkr?mD/=0T*Xl59/=7)96)<ghij1gnS'MOX_jUGG$S.fGUMI:*_A5&>Jo2?!qVH0>Q!!K:(>5W!m]U'nN^23_SBD:3el^lj,Jn.I_-S0't$9<\dSRk[,Ff'i\)dUJ&_RFPkdcUC(6AbX]*E?b$SuDmQlFF$%,cd88Uo88I[6\J^O>u4?I-ViZo0a'A\aY93M(m!+=T>ghKEsmpG(B]>rti^=PD-V?i4`9X4s?&oGS:KQY-;;f9O$boIBMD9B=^#!F8kST@S<'Yfg/8Hs/0/35*'@"$\smI''m<SWR$\&:,R4.\n&_L$5a%WLk`mMfqo!5OT:&KnY0@]g1)(8<)$`h8q-*#"+c(-($K+6FR9#`hVb%+8?c3'LesaVYVEiMXl(EXJg;Wp7``X0mA&JohV+i!kojY<r[VjL'?e_Jb1$jGlFg(9YYp&E3$ba<P[GHIDH(-Se#!FZ`#@ih[DY8<Z1XTaid)^EpQ:EOk&-9$D+B#@42Q,"-fL=CUC6eS,;"Hu&2!CB3>97KBA9`$kEE5l5eJZ\.f>gc1h'h"Uc$2tiXa&WS_5N3X()gbTpQB)7mP:%gl\T?#U5T_#0h?m<g.j\j_Irr[&rV[l8b/o[g7[naV!=bjc7OGhC4,Y<2HSdI;X03kI>"%)eO_j#'nD)[EX1`POD_n1u;EDXVt>MFX%H&ZWV+fI6>gj08FGqYamToL/*S??9KrnT%F3#7BpDW'.;Hg&59o\Z**HQ<WAONd(TSr!C=V"Ngkn(r17PEZJ?$PRlGqMRh6h_F?mr=ta#m@pP[G`jm=3k<FkaBea2mcC=+7sr08qR-gaJ4WGT2ZG*pT\in%MNNsP,^7<0ho@",6j]Va%j,9!()1mM1o-Y)doO/aMJBVWQWD&+5)GKe@d#sn>E]cFA+Y7%=n2gp^GIO*DZ9KkdLSj5*n&$YDUDQ8"0):KZ'XUV/)7d,;u(IuUA)$8[p,bW>qO[IB!IeAa<-#.QUgo@\a]>(H6b`N\/k+A(\S@QX1HsZ;BBrTkMr&H!S^]%!!~>
+endstream
+endobj
+55 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 54 0 R
+/Annots 56 0 R
+>>
+endobj
+56 0 obj
+[
+57 0 R
+]
+endobj
+57 0 obj
+<< /Type /Annot
+/Subtype /Link
+/Rect [ 86.16 140.009 126.71 130.009 ]
+/C [ 0 0 0 ]
+/Border [ 0 0 0 ]
+/A << /URI (http://rhlinux.redhat.com/anaconda)
+/S /URI >>
+/H /I
+>>
+endobj
+58 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F3
+/BaseFont /Helvetica-Bold
+/Encoding /WinAnsiEncoding >>
+endobj
+59 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F5
+/BaseFont /Times-Roman
+/Encoding /WinAnsiEncoding >>
+endobj
+60 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F6
+/BaseFont /Times-Italic
+/Encoding /WinAnsiEncoding >>
+endobj
+61 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F1
+/BaseFont /Helvetica
+/Encoding /WinAnsiEncoding >>
+endobj
+62 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F9
+/BaseFont /Courier
+/Encoding /WinAnsiEncoding >>
+endobj
+63 0 obj
+<< /Type /Font
+/Subtype /Type1
+/Name /F7
+/BaseFont /Times-Bold
+/Encoding /WinAnsiEncoding >>
+endobj
+1 0 obj
+<< /Type /Pages
+/Count 9
+/Kids [6 0 R 41 0 R 43 0 R 45 0 R 47 0 R 49 0 R 51 0 R 53 0 R 55 0 R ] >>
+endobj
+2 0 obj
+<< /Type /Catalog
+/Pages 1 0 R
+ >>
+endobj
+3 0 obj
+<< 
+/Font << /F3 58 0 R /F5 59 0 R /F1 61 0 R /F6 60 0 R /F9 62 0 R /F7 63 0 R >> 
+/ProcSet [ /PDF /ImageC /Text ] >> 
+endobj
+9 0 obj
+<<
+/S /GoTo
+/D [6 0 R /XYZ 67.0 317.439 null]
+>>
+endobj
+11 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 693.0 null]
+>>
+endobj
+13 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 550.675 null]
+>>
+endobj
+15 0 obj
+<<
+/S /GoTo
+/D [41 0 R /XYZ 67.0 517.35 null]
+>>
+endobj
+17 0 obj
+<<
+/S /GoTo
+/D [43 0 R /XYZ 67.0 673.0 null]
+>>
+endobj
+19 0 obj
+<<
+/S /GoTo
+/D [43 0 R /XYZ 67.0 246.561 null]
+>>
+endobj
+21 0 obj
+<<
+/S /GoTo
+/D [45 0 R /XYZ 67.0 448.0 null]
+>>
+endobj
+23 0 obj
+<<
+/S /GoTo
+/D [45 0 R /XYZ 67.0 179.561 null]
+>>
+endobj
+25 0 obj
+<<
+/S /GoTo
+/D [47 0 R /XYZ 67.0 370.0 null]
+>>
+endobj
+27 0 obj
+<<
+/S /GoTo
+/D [47 0 R /XYZ 67.0 336.675 null]
+>>
+endobj
+29 0 obj
+<<
+/S /GoTo
+/D [49 0 R /XYZ 67.0 437.0 null]
+>>
+endobj
+31 0 obj
+<<
+/S /GoTo
+/D [51 0 R /XYZ 67.0 250.0 null]
+>>
+endobj
+33 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 573.0 null]
+>>
+endobj
+35 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 478.561 null]
+>>
+endobj
+37 0 obj
+<<
+/S /GoTo
+/D [53 0 R /XYZ 67.0 417.122 null]
+>>
+endobj
+39 0 obj
+<<
+/S /GoTo
+/D [55 0 R /XYZ 67.0 194.0 null]
+>>
+endobj
+xref
+0 64
+0000000000 65535 f 
+0000036861 00000 n 
+0000036975 00000 n 
+0000037025 00000 n 
+0000000015 00000 n 
+0000000071 00000 n 
+0000002331 00000 n 
+0000002451 00000 n 
+0000002581 00000 n 
+0000037159 00000 n 
+0000002715 00000 n 
+0000037223 00000 n 
+0000002851 00000 n 
+0000037287 00000 n 
+0000002987 00000 n 
+0000037353 00000 n 
+0000003122 00000 n 
+0000037418 00000 n 
+0000003257 00000 n 
+0000037482 00000 n 
+0000003392 00000 n 
+0000037548 00000 n 
+0000003527 00000 n 
+0000037612 00000 n 
+0000003661 00000 n 
+0000037678 00000 n 
+0000003797 00000 n 
+0000037742 00000 n 
+0000003932 00000 n 
+0000037808 00000 n 
+0000004067 00000 n 
+0000037872 00000 n 
+0000004202 00000 n 
+0000037936 00000 n 
+0000004337 00000 n 
+0000038000 00000 n 
+0000004472 00000 n 
+0000038066 00000 n 
+0000004608 00000 n 
+0000038132 00000 n 
+0000004744 00000 n 
+0000008250 00000 n 
+0000008358 00000 n 
+0000011924 00000 n 
+0000012032 00000 n 
+0000016141 00000 n 
+0000016249 00000 n 
+0000020242 00000 n 
+0000020350 00000 n 
+0000024018 00000 n 
+0000024126 00000 n 
+0000028641 00000 n 
+0000028749 00000 n 
+0000032253 00000 n 
+0000032361 00000 n 
+0000035869 00000 n 
+0000035992 00000 n 
+0000036019 00000 n 
+0000036204 00000 n 
+0000036317 00000 n 
+0000036427 00000 n 
+0000036538 00000 n 
+0000036646 00000 n 
+0000036752 00000 n 
+trailer
+<<
+/Size 64
+/Root 2 0 R
+/Info 4 0 R
+>>
+startxref
+38196
+%%EOF
diff --git a/documentation/boot-manager-pdn.xml b/documentation/boot-manager-pdn.xml
new file mode 100644 (file)
index 0000000..2f28c94
--- /dev/null
@@ -0,0 +1,829 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.3//EN"
+"http://www.oasis-open.org/docbook/xml/4.3/docbookx.dtd">
+<article>
+  <articleinfo>
+    <title>The PlanetLab Boot Manager</title>
+
+    <author>
+      <firstname>Aaron</firstname>
+
+      <surname>Klingaman</surname>
+
+      <email>alk@cs.princeton.edu</email>
+    </author>
+
+    <affiliation>
+      <orgname>Princeton University</orgname>
+    </affiliation>
+
+    <abstract>
+      <para>This document outlines the design and policy decisions of a new
+      PlanetLab component called the Boot Manager. The Boot Manager
+      encompasses several systems and all policy regarding how new nodes are
+      brought into the system, how they are authenticated with PlanetLab
+      Central (PLC), what authenticated operations they can perform, and what
+      constitutes a node's identity.</para>
+    </abstract>
+
+    <revhistory>
+      <revision>
+        <revnumber>1.0</revnumber>
+
+        <date>January 14, 2005</date>
+
+        <authorinitials>AK</authorinitials>
+
+        <revdescription>
+          <para>Initial draft.</para>
+        </revdescription>
+      </revision>
+    </revhistory>
+  </articleinfo>
+
+  <section>
+    <title>Overview</title>
+
+    <para>This document describes the history of and groups several previously
+    separate, undocumented components and policy decisions of the PlanetLab
+    infrastructure into one logical group, which will be called the
+    <firstterm>Boot Manager</firstterm>. In addition, specific recommendations
+    are made for changes and additions to these parts to support new features
+    and better security outlined in detail later. These include:</para>
+
+    <orderedlist>
+      <listitem>
+        <para>How new nodes are added to the PlanetLab system, and the chain
+        of trust that accompanies that addition</para>
+      </listitem>
+
+      <listitem>
+        <para>How to prevent unauthorized nodes from becoming part of the
+        system, and the consequences of that happening</para>
+      </listitem>
+
+      <listitem>
+        <para>How any existing node authenticates itself with PlanetLab
+        Central (PLC), and what operations can it perform</para>
+      </listitem>
+
+      <listitem>
+        <para>What constitutes node identity, and, when this identity should
+        and should not change</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Not covered by this document are topics including node to node
+    authentication, or any service or system running after a node is fully
+    booted and the Boot Manager is no longer applicable.</para>
+  </section>
+
+  <section>
+    <title>Terminology</title>
+
+    <para>Before continuing, terms used through this document, including what
+    a site is, what nodes are, and what PlanetLab consists of will be defined.
+    Current organizational structure consists of groups of
+    <firstterm>sites</firstterm>, usually a geographical location
+    corresponding one to one with a company or university. These sites have
+    any number of <firstterm>users</firstterm> or
+    <firstterm>researchers</firstterm>, including a <firstterm>principle
+    investigator</firstterm> , or <firstterm>PI</firstterm>, responsible for
+    the users, and one or more <firstterm>technical contacts</firstterm>.
+    Sites are usually composed of at least two machines running the PlanetLab
+    software, usually referred to as <firstterm>nodes</firstterm>. All user
+    and node management operations are done through a set of servers located
+    in one physical location which is known as <firstterm>PlanetLab
+    Central</firstterm>, or <firstterm>PLC</firstterm>.There are also a set of
+    PlanetLab <firstterm>administrators</firstterm>; not necessarily
+    affiliated with a particular site. <firstterm>PlanetLab</firstterm> then
+    collectively refers to all sites and their nodes and users, and PlanetLab
+    Central.</para>
+  </section>
+
+  <section>
+    <title>Background</title>
+
+    <section>
+      <title>How Sites Become Part of PlanetLab</title>
+
+      <para>A full discussion and evaluation of the process and security
+      implications of sites becoming part of PlanetLab is outside the scope of
+      this document. It will be assumed that the process is relatively secure,
+      and that user and PI accounts at that site are legitimate. However, it
+      is necessary to provide some basic information about the process.</para>
+
+      <para>What does it mean for a site to be part of PlanetLab?
+      Primarily:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>The site's record (e.g. name, url, geographical location,
+          contact information) is in the PLC database</para>
+        </listitem>
+
+        <listitem>
+          <para>There are a set of users (their email address, password,
+          personal information) associated with the site in the PLC
+          database</para>
+        </listitem>
+
+        <listitem>
+          <para>The ability for those users and PIs to perform some operations
+          at PLC, and gain direct access to the nodes</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The process for allowing new sites to become part of PlanetLab has
+      been continually evolving since the beginning of PlanetLab. Initially,
+      the first sites were selected and invited, and record of their existence
+      in PLC was entered in by hand by an administrator. With a site now part
+      of PlanetLab, users and PIs at those sites could then register for
+      accounts to perform operations at PLC. Privileged accounts, such as PI
+      accounts, were enabled by administrators. At the time, this
+      administrative overhead was not a problem given the relatively limited
+      number of total sites.</para>
+
+      <para>Over time, parts of these operations have been streamlined. Now, a
+      site can submit all their relevant info on the PLC website, for review
+      and approval by administrators. They also no longer require an explicit
+      invitation. With the creation of the PlanetLab Consortium, there is now
+      an additional paperwork step before a site becomes a member of
+      PlanetLab.</para>
+
+      <para>With the introduction of the additional consortium step, the
+      process now exists as:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>A site either requests to join PlanetLab by contacting
+          administrators over email, or through other external
+          communication</para>
+        </listitem>
+
+        <listitem>
+          <para>Necessary consortium paper work is signed by all
+          parties</para>
+        </listitem>
+
+        <listitem>
+          <para>PI(s) submit connect (join) requests with remaining site and
+          personal information</para>
+        </listitem>
+
+        <listitem>
+          <para>Administrators verify that the PI is who they say they are,
+          and enable their site and accounts at PLC</para>
+        </listitem>
+      </orderedlist>
+    </section>
+
+    <section>
+      <title>How Nodes Become Part of PlanetLab</title>
+
+      <para>After a site has been approved and added to PLC, they are required
+      to install and make available to other users at least two nodes (as per
+      current policy).</para>
+
+      <para>In the first revisions of the PLC software, nodes were only added
+      to the system by hand. Usually a PI or technical contact would
+      communicate the network settings of the node, and it was then added to
+      PLC by an administrator. This prevented any nodes that weren't part of
+      PlanetLab to be recognized by PLC. No mechanisms existed to ensure that
+      the node's network (effectively its identity) was not hijacked by
+      another machine.</para>
+
+      <para>Since the beginning of PlanetLab, there have been little to no
+      restrictions on what machines the PlanetLab software can run on. This is
+      primarily due to the fact that all source code is now available, and it
+      is technically feasible for anyone to bring up a machine that is running
+      the PlanetLab software, or closely resembles it. What is important,
+      however, is when these nodes become recognized by PLC, and then
+      available to the users via PLC. Otherwise, a user would have to go
+      through non-PLC channels in order to find these nodes. Even then, they
+      could not use PLC to run their experiments on the nodes, because PLC
+      does not know about those nodes.</para>
+
+      <para>When a node becomes part of PlanetLab, it:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>Is recognized by PLC as being at the site by its existence in
+          our database</para>
+        </listitem>
+
+        <listitem>
+          <para>The existing node boot mechanisms allow the machine to come
+          online after communicating its identity to PLC</para>
+        </listitem>
+
+        <listitem>
+          <para>Researchers can use the node for their experiments by using
+          administrative interfaces at PLC</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Rather than adding each node by hand, the current system instead
+      allows for an entire network subnet to be authorized to contain nodes.
+      When a site joins, a PLC administrator authorizes the subnet the nodes
+      will be on, and any machines on that network are allowed to become
+      recognized by PLC automatically. This had immediate advantages,
+      primarily being one of not requiring overhead for PLC administrators to
+      add each node by hand as was done in the beginning. Given that a common
+      interest was to see PlanetLab grow in terms of number of nodes (as one
+      metric), the assumption was made that allowing any node to come online
+      on an authorized subnet without explicit approval from an administrator
+      or PI would benefit everyone.</para>
+    </section>
+
+    <section>
+      <title>Node Installation</title>
+
+      <para>To date, there have been three major revisions of the software
+      that installs a PlanetLab node. Not only have the mechanisms in which
+      the nodes get installed changed, but, under what context the
+      installation is running.</para>
+
+      <para>The first revision of the installer was primarily nothing more
+      than a customized RedHat (version 7.3) boot disk, with a PlanetLab
+      specific post script to perform final initialization steps. The network
+      settings, and which packages to install were all stored on the disk, so
+      a custom disk was generated on demand for each node. Anyone with one of
+      these disks could install a PlanetLab node.</para>
+
+      <para>The second revision of the installer was released in conjunction
+      the release of the new PlanetLab boot cd. The intention was not
+      necessarily to have the node packages on the cd (as they would quickly
+      go out of date), but, to provide a mechanism to allow administrators to
+      regain control of a machine, in the event that the node was compromised,
+      or the installed software was corrupted. The nodes were configured to
+      always start off the cd, and, rather than have a custom cd per node, the
+      network settings were stored on a floppy disk. Both the floppy disk and
+      the boot cd were to remain in the machine at all times. The RedHat
+      installer, Anaconda <citation>1</citation>, that was used prior to the
+      boot cd was modified to run in the context of this boot cd. This allowed
+      us a great deal of flexibility, as the cd was built so that all it would
+      do was:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>Bring a full Linux system online, running only off the
+          cd</para>
+        </listitem>
+
+        <listitem>
+          <para>Load any network and other drivers necessary, based on the
+          hardware of the node</para>
+        </listitem>
+
+        <listitem>
+          <para>Configure the network interface with the settings from the
+          floppy disk</para>
+        </listitem>
+
+        <listitem>
+          <para>Contact a special PLC boot server, and download and execute a
+          script.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The boot cd uses HTTPS to contact the boot server, and uses a
+      certification authority (CA) certificate to verify the identity of the
+      machine at PLC. This way, it can be assured that the installation of a
+      particular node is correct, in at least that all packages originated
+      from PLC. The script downloaded by the boot cd for a node depends on the
+      current state of that node, in the PLC database. The PLC database must
+      identify the node in order to accomplish that. That is covered below, in
+      Node Identity.</para>
+
+      <para>The third and current version of the installer still runs in the
+      context of the boot cd, but was a complete rewrite to better handle
+      packages, and remove much unneeded complexity in the previous
+      installer.</para>
+    </section>
+
+    <section>
+      <title>Node Identity</title>
+
+      <para>In the first revisions of the PlanetLab software, nodes were
+      solely identified by their network settings, primarily, the hostname and
+      the physical address of the network adapter (MAC address). This worked
+      well then, as this set of information was unique, and allowed for the
+      direct mapping of node identity to a physical machine. It was stored
+      this way in the PLC database as well.</para>
+
+      <para>As the design of the database progressed, the PlanetLab software
+      needed to identify nodes not by any one aspect of the physical machine,
+      but by a more generic identifier (as this identifier needed to be used
+      internally to refer to other aspects of a node, like which site it is
+      at) - what has been called a node id. Although better in some respects,
+      there are still drawbacks. For example, deleting a node entry from the
+      database and recreating a similar one could result in a new node id,
+      when nothing on the node itself really has changed. These problems are
+      primarily due to a lack of policy being documented, and instead, the
+      implementation details defining the policy.</para>
+
+      <para>Currently, when a node requests a script from the boot server as
+      the last step of the boot cd operation, it sends to PLC the output of
+      the program 'ifconfig' (among other data), which contains the network
+      settings the machine was configured with. From the network settings, the
+      primary MAC address is extracted by PLC and used to check the database
+      if the node exists. Here, the MAC address is used to look up a
+      corresponding numeric node id, which is used internally. The MAC address
+      and the node id are tied - if a new MAC address is used, a new node id
+      will be generated. If the node does exist, an appropriate script is sent
+      in response, based on the current node state. Again, this was fine, as
+      long as a node was identified correctly.</para>
+    </section>
+
+    <section>
+      <title>Node Authentication</title>
+
+      <para>What does a node (or PI, for that matter) have to do to prove that
+      it is one of the real, or legitimate, PlanetLab nodes? At first, this
+      was not an issue because the nodes were added to the system by
+      administrators, and all communication paths led only from PLC to the
+      nodes. Everything was downloaded from PLC, including information about
+      what experimenters can use the system, what packages to install for
+      updates. For this, a node only needed to send enough information in the
+      request to identify itself with PLC. From the PLC point of view, it did
+      not matter which node downloaded the packages for a node, so long as the
+      node was identified correctly and received the packages it was supposed
+      to. This was acceptable since the node was added to PLC by hand, thus it
+      was already 'authenticated'. During this period, a number of assumptions
+      were made:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>That a rogue node with the same network settings would not be
+          a problem, as the site technical contacts could prevent or detect
+          that</para>
+        </listitem>
+
+        <listitem>
+          <para>The ability to check to ensure a particular node was already
+          authenticated was not done (aside from assuring that the host's
+          public ssh key fingerprint did not change from one login to the
+          next)</para>
+        </listitem>
+      </orderedlist>
+
+      <para>As more previously manual steps became automated, a number of
+      situations came up in which a node would need to initiate and perform
+      some operation at PLC. There is only a small set of these operations,
+      and are limited to items such as, adding a node to the system (under a
+      previously authorized subnet), changing the 'boot state' (a record of if
+      the machine is being installed, or is in a debug mode) of a node, or,
+      uploading the logs of an installation.</para>
+
+      <para>To handle this new node authentication, a 32 byte random nonce
+      value was generated and sent to PLC during node boot time (at the same
+      time the network settings are sent). The nonce value in the PLC database
+      for that particular node is updated if the node is identified correctly,
+      and is used for authenticating subsequent, node initiated operations.
+      Then, for example, when a node install finished, a node could request
+      it's state updated, and all it would need to do would be to resend its
+      network settings, and the original nonce for authentication. If the
+      nonce in the database matched what was sent, then the requested
+      operation was performed.</para>
+
+      <para>The problem here is obvious: now, any node that can be identified
+      is essentially automatically authenticated. For a node to be identified,
+      it has to be in the database, and, new nodes can be automatically added
+      on any authorized subnets without intervention of an administrator or
+      tech contact. With this system, it is trivial to add a rogue node to the
+      system, even at a different site that was not originally authorized,
+      because the whole system is based on what a node sends PLC, which is
+      trivial to spoof.</para>
+    </section>
+  </section>
+
+  <section>
+    <title>Recommendations</title>
+
+    <section>
+      <title>How PLC Will Identify Nodes</title>
+
+      <para>Before any suggestions on what to change regarding the node
+      identity policy can me made, the question, what makes a node a node,
+      should be answered. This primarily depends on who is asking. From an
+      administrators point of view, a node could be tied to a particular
+      installation of the software. Reinstall the node, and it becomes a new
+      node with a new identity. However, from an end user's perspective, the
+      machine still has the same network address and hostname, and their
+      software simply was removed. For them, changing the node identity in
+      this situation does not make any sense, and usually causes them
+      unnecessary work, as they have to re-add that machine to their
+      experiment (because, as far as the PLC database is concerned, the node
+      never existed before then). This question is particularly import for
+      several reasons:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>It gives users a way to identify it, in order to use it for
+          their research</para>
+        </listitem>
+
+        <listitem>
+          <para>The node identity could be used by other external systems, as
+          a universal identifier</para>
+        </listitem>
+      </orderedlist>
+
+      <para>The following recommendation is made for a new node identity
+      policy. Rather that tie node identity to some attribute of the physical
+      machine, such as its hardware configuration as is currently, instead,
+      PLC will assign an arbitrary, unused identity to the node upon its
+      creation, and that identity will be stored locally at the node (most
+      likely on an external medium like floppy disk). Then as long as that
+      identity is still on the node, any hardware or software changes will not
+      necessarily require a change of the node identity. This will then allow
+      PLC, if necessary in the future, to change the node identity policy as
+      needed.</para>
+
+      <para>The following policy will apply to this new node identity:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>In the past, a tech contact was able to change the network
+          settings on a node automatically by updating the network
+          configuration floppy. Now, these changes will have to be done at PLC
+          (with the option of assigning a new node identity). Thus, the node's
+          network settings (excluding MAC address), are tied to the
+          identity.</para>
+        </listitem>
+
+        <listitem>
+          <para>Attempting to move the node identity to another machine will
+          halt that machine from being used by researchers until the change is
+          dealt with by either a PLC administrator or a site technical
+          contact. If approved, the node would reconfigure itself
+          appropriately.</para>
+        </listitem>
+
+        <listitem>
+          <para>A node identity cannot be reused after the node has been
+          deleted from the PLC database.</para>
+        </listitem>
+
+        <listitem>
+          <para>The node identity will not change across software reinstalls,
+          changes of the harddisks or network adapters (as long as the network
+          settings remain), or any other hardware changes.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Given the current design of the PLC database, there is still a
+      need to use, at least internally, a numeric based node identifier. Other
+      software and APIs available to researchers also use this identifier, so
+      the question becomes whether or not the above policy can be applied to
+      it without significantly changing either the PLC software or the
+      researcher's experiments. Answering this question is beyond the scope of
+      this document, and is left as implementation decision.</para>
+    </section>
+
+    <section>
+      <title>Authenticating Node Identity</title>
+
+      <para>It is clear that the previous model for authentication will need
+      to change, which assumes with identity comes authorization, to one where
+      a node can present its identity, then authenticate it as a separate step
+      in order to become authorized. During the boot process, a node can still
+      send sufficient information to identify itself, but, a new system is
+      required to prove that what it sends in fact does come from the node,
+      and not someone attempting to impersonate the node. This is especially
+      important as node identities are made public knowledge.</para>
+
+      <para>Authentication in distributed systems is a fairly widely
+      researched problem, and the goal here is not to build a new mechanism
+      from scratch, but rather to identify an existing method that can be used
+      to fulfill our requirements. Our requirements are fairly simple, and
+      include:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>The ability to trace the origin of a node added to PlanetLab,
+          including the party responsible for the addition.</para>
+        </listitem>
+
+        <listitem>
+          <para>Authenticating requests initiated by nodes to change
+          information at PLC. These requests involve little actual
+          communication between the nodes and PLC, and the overhead for
+          authenticating each request is small given the number and frequency
+          of them. This also means the need to open an authenticated channel
+          for multiple requests will not be necessary.</para>
+        </listitem>
+      </orderedlist>
+
+      <para>Given the public nature of PlanetLab, the need to encrypt data
+      during these system processes to prevent other parties from seeing it is
+      not necessary (also, simply hiding the details of the authentication
+      process is not a valid security model). Assuring the requests are not
+      modified during transmission is necessary, however. A public/private key
+      pair system could be used, where each site would be responsible for
+      generating a private key, and signing their node's identity. PLC could
+      then have a list of all public keys, and could validate the identities.
+      However, this is not recommended for several reasons:</para>
+
+      <orderedlist>
+        <listitem>
+          <para>It places an additional burden on the site to generate and
+          keep secure these private keys. Having a private key for each node
+          would be unreasonable, so one key would be used for all nodes at a
+          particular site.</para>
+        </listitem>
+
+        <listitem>
+          <para>By using one key for all nodes, it not only increases the cost
+          of a compromised key (all identities would have to be resigned),
+          but, use of the key to add unauthorized nodes could not as easily be
+          detected.</para>
+        </listitem>
+
+        <listitem>
+          <para>Differences in versions of the software used to generate keys
+          would have to be handling, increasing the complexity of supporting a
+          system at PLC</para>
+        </listitem>
+      </orderedlist>
+
+      <para>To fulfill the above requirements for node identity, the
+      recommendation is made to use a message authenticate system using hash
+      functions and shared secrets such as in <citation>2</citation>. In such
+      a system, the shared secret (or refered to as key, but not in the
+      public/private key pair sense), is as simple as a fixed size, random
+      generated number. Of primary importance in such a system is the control
+      and distribution of the key.</para>
+
+      <para>Securing a key at PLC is relatively straight forward. Only a
+      limited number of administrators have direct access to the PLC database,
+      so keys can be stored there with relative confidence, provided access to
+      the PLC machines is secure. Should any of these keys be compromised, all
+      keys would need to be regenerated and redistributed, so security here is
+      highly important.</para>
+
+      <para>However, securing the secret on the client side, at the node, is
+      more difficult. The key could be placed on some removable media that
+      will not be erased, such as a floppy disk or a small usb based disk, but
+      mechanisms must be in place to prevent the key from being read by anyone
+      except the boot manager and the boot cd processes, and not by any users
+      of the machine. In a situation like this, physical security is a
+      problem. Anyone who could get access to the machine can easily copy that
+      key and use it elsewhere. One possible solution to such a problem is to
+      instead make the key a combination of two different values, one stored
+      on the floppy disk, the other being a value that is only known to the
+      PI, and must be entered by hand for each message authentication. Then,
+      in order to compromise the entire key, not only must the attacker have
+      physical access to the machine, but would have to know the other half of
+      the key, which would not be recorded anywhere except in the PLC
+      database. This ultimately cannot work because of the need for human
+      intervention each time a node needs to be authenticated.</para>
+
+      <para>Ultimately, the best solution for the circumstances here is to
+      leave the entire key on the disk; leave physical security to the
+      individual sites; and put checks in place to attempt to identify if the
+      key is being reused elsewhere. As before, the post-boot manager system
+      (running the real PlanetLab kernel), can be configured to prevent the
+      floppy disk from being read by any logged in user (local or not).</para>
+
+      <para>If the key was identified as being reused elsewhere, appropriate
+      actions would include deleting the key from the PLC database
+      (effectively halting any use of it), and notifying the technical
+      contacts and PIs at the site. If necessary, they could regenerate a new
+      keys after corrective actions had been taken.</para>
+    </section>
+
+    <section>
+      <title>Adding New Nodes</title>
+
+      <para>It is important to have control over the process for which nodes
+      are added to the PlanetLab system, and to be able to derive which party
+      is responsible for that machine at any point in the future. This is
+      because several different parties come to PLC for the list of nodes, and
+      PLC needs to provide a list that only includes nodes that have been
+      authorized. For one, the researchers who are looking to run experiments
+      need to identify a set of PlanetLab machines. Two, non-PlanetLab related
+      people who may have traffic related concerns or complaints, and are
+      trying to track down who is responsible for a node and/or the
+      researcher's experiment.</para>
+
+      <para>It is possible to envision at least several scenarios where having
+      a non-authorized node in the PLC database would be a problem. One of
+      which would be a researcher inadvertently using a rogue node (those who
+      installed it could easily have root access) to run an experiment, and,
+      that experiment being compromised across all of PlanetLab, or the
+      results from their research being tampered with. Another could include a
+      rogue node being used for malicious purposes, such as a spam relay, and
+      the (initial) blame being directed at PLC, simply because of the
+      association.</para>
+
+      <para>As shown previously, simply authorizing an entire network is
+      insufficient, as the ability to identify who authorized an individual
+      node on that subnet is unknown. Having the PlanetLab administrators add
+      all nodes by hand incorporates too much overhead, given the number of
+      nodes and the current growth of PlanetLab. This also places the
+      administrators in a state where they may not have the contact
+      information for the responsible party. A decent compromise will be to
+      require either the PIs or technical contacts at each site to enter in
+      their own nodes using the existing PLC interfaces. Given that one of the
+      existing steps for bringing a node online involves generating a
+      floppy-based network configuration file on the PlanetLab website, this
+      process can be extended to also add record of the nodes with little
+      additional impact to PIs and tech contacts. At this point, the per-node
+      shared secret and a node identity necessary for node authentication
+      would be generated and saved at PLC as well.</para>
+    </section>
+
+    <section>
+      <title>How To Remove Nodes</title>
+
+      <para>There may be the need for an administrator, PI, or technical
+      contact to remove a node from the system. This can be done simply by
+      removing the node record from the PLC database, thereby preventing it
+      from successfully authenticating at boot time. In addition, a node could
+      be effectively disabled (but not removed), by deleting the private key
+      for that node from the database. Once restarted, it would not be able to
+      come back online until a new key is generated.</para>
+    </section>
+
+    <section>
+      <title>Node Installation</title>
+
+      <para>The node installer shall be integrated into the Boot Manager,
+      rather than continue to be a standalone component. This will allow the
+      boot manager, when appropriate, to invoke the installer directly.</para>
+    </section>
+  </section>
+
+  <section>
+    <title>Conclusion</title>
+
+    <para>As outlined above, this new system effectively encapsulates a new
+    policy for node identity, and a new mechanism for verifying the node
+    identity and authenticating node-initiated PLC changes. In total, the boot
+    manager collectively will consist of:</para>
+
+    <orderedlist>
+      <listitem>
+        <para>A set of interfaces at PLC that are used to perform
+        authenticated, node-initiated changes.</para>
+      </listitem>
+
+      <listitem>
+        <para>A set of interfaces at PLC that are used to add new nodes to the
+        system.</para>
+      </listitem>
+
+      <listitem>
+        <para>A package downloaded by the boot cd at every boot, which used to
+        install nodes, update configurations, or boot nodes, using the
+        interfaces above.</para>
+      </listitem>
+
+      <listitem>
+        <para>The policy for identifying nodes, and when that identity should
+        change.</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Given the above recommendations, the boot strap process and the
+    chain of trust for adding a new node now exists as detailed below. A site,
+    a principle investigator, and a tech contact are assumed to be already
+    present, and authorized.</para>
+
+    <orderedlist>
+      <listitem>
+        <para>The technical contact downloads a boot cd for the new node.
+        Since the HTTPS certificate for the public web server is signed by a
+        trusted third party, the image can be verified by either ensuring it
+        was downloaded via HTTPS, or by downloading the PlanetLab public key
+        and verifying a signed copy of the cd, also available on the
+        website.</para>
+      </listitem>
+
+      <listitem>
+        <para>The now validated boot cd contains the CA certificate for the
+        boot server, so any host initiated communication that is using this
+        certificate on the cd can be sure that the server is in fact the
+        PlanetLab boot server.</para>
+      </listitem>
+
+      <listitem>
+        <para>The PI logs into their account on the PlanetLab website, also
+        over HTTPS and verifying the SSL certificates. Once logged in, they
+        use a tool to generate a configuration file for the new node, which
+        includes the network settings and node identity. During this
+        configuration file generation, record of the nodes existence is
+        entered into PLC, and a random, shared secret is generated for this
+        machine. The shared secret is saved in the PLC database, and is also
+        included in this configuration file.</para>
+      </listitem>
+
+      <listitem>
+        <para>Both the cd and the new configuration file (on a floppy disk),
+        are inserted into the machine. The machine is configured such that it
+        always starts off the cd, and never the floppy disk or the machines
+        hard disks.</para>
+      </listitem>
+
+      <listitem>
+        <para>After the boot cd finishes bringing the machine online, loading
+        all hardware and network settings from the floppy, it contacts the
+        boot server using HTTPS and the certificate on the cd, and downloads
+        and executes the boot manager.</para>
+      </listitem>
+
+      <listitem>
+        <para>The boot manager then contacts PLC to get the current state of
+        the node it is currently running on.</para>
+      </listitem>
+
+      <listitem>
+        <para>Based on this state, the boot manager can either continue
+        booting the node (if already installed), install the machine if
+        necessary, or take any other action as appropriate. Since this is a
+        new machine, the installation will be initiated.</para>
+      </listitem>
+
+      <listitem>
+        <para>After successful installation, the boot manager needs to change
+        the state of the node such that the next time it starts, it will
+        instead continue the normal boot process. The boot manager contacts
+        PLC and requests a change of node state. This request consists of the
+        node identity, data pertaining to the request itself, and a message
+        authentication code based on the shared secret from the floppy disk
+        and the request data.</para>
+      </listitem>
+
+      <listitem>
+        <para>The boot manager, in order to authenticate the request,
+        generates its own message authentication code based on the submitted
+        data and its own copy of the shared secret. If the message
+        authenticate codes match, then the requested action is performed and
+        the boot manager notified of success.</para>
+      </listitem>
+
+      <listitem>
+        <para>If the node is already installed, and no actions are necessary,
+        the machine is booted. To protect the shared secret on the floppy disk
+        from users of the machine, the kernel during runtime cannot access the
+        floppy disk. At this point, control of the system is removed from the
+        boot manager and run-time software takes control.</para>
+      </listitem>
+    </orderedlist>
+
+    <para>Any action the boot manager may need to take that requires some
+    value to be changed in PLC can use the steps outlined in 8 through 10. As
+    an extra precaution to prevent unauthorized nodes from booting, the
+    process in step 7 should also use the authentication steps in 8 through
+    10.</para>
+
+    <para>Given that the shared secret on the floppy disk can only be accessed
+    in the cd environment (when the boot manager is running and the boot cd
+    kernel provides floppy disk access), any operation that a node can perform
+    that results in a change in data at PLC must be performed during this
+    stage. During runtime, a node can still present its identity to PLC to
+    receive node-specific packages or configuration files, but all interfaces
+    that provide these packages or files cannot change any record or data at
+    PLC.</para>
+  </section>
+
+  <bibliography>
+    <biblioentry>
+      <abbrev>1</abbrev>
+
+      <title><ulink
+      url="http://rhlinux.redhat.com/anaconda">Anaconda</ulink></title>
+    </biblioentry>
+
+    <biblioentry>
+      <abbrev>2</abbrev>
+
+      <title>Message Authentication using Hash Functions - The HMAC
+      construction</title>
+
+      <authorgroup>
+        <author>
+          <firstname>Mihir</firstname>
+
+          <surname>Bellare</surname>
+        </author>
+
+        <author>
+          <firstname>Ran</firstname>
+
+          <surname>Canetti</surname>
+        </author>
+
+        <author>
+          <firstname>Hugo</firstname>
+
+          <surname>Krawczyk</surname>
+        </author>
+      </authorgroup>
+
+      <date>Spring 1996</date>
+    </biblioentry>
+  </bibliography>
+</article>
diff --git a/documentation/pdn-pdf-style.xsl b/documentation/pdn-pdf-style.xsl
new file mode 100644 (file)
index 0000000..ff5b631
--- /dev/null
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+
+<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/fo/docbook.xsl"/>
+
+<xsl:param name="header.rule" select="0"></xsl:param>
+<xsl:param name="footer.rule" select="0"></xsl:param>
+<xsl:param name="section.autolabel" select="1"></xsl:param>
+
+<!-- more room for the titles at the top of each page -->
+<xsl:param name="header.column.widths" select="'1 2 1'"></xsl:param>
+
+<!-- remove revision history -->
+<xsl:template match="revhistory" mode="titlepage.mode">
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/dummy_bootloader/Makefile b/dummy_bootloader/Makefile
new file mode 100644 (file)
index 0000000..4eafd6a
--- /dev/null
@@ -0,0 +1,17 @@
+all: dummy_bootloader
+
+dummy_bootloader: dummy_bootloader.S
+       nasm $< -o $@
+
+testbootdisk: dummy_bootloader
+       dd if=/dev/zero of=testbootdisk bs=512 count=2880
+
+run: testbootdisk
+       dd if=dummy_bootloader of=testbootdisk conv=notrunc
+       qemu -fda testbootdisk -boot a
+
+clean:
+       rm -f dummy_bootloader
+       rm -f testbootdisk
+
+.phony: all run clean
diff --git a/dummy_bootloader/dummy_bootloader b/dummy_bootloader/dummy_bootloader
new file mode 100644 (file)
index 0000000..ea5d7e5
Binary files /dev/null and b/dummy_bootloader/dummy_bootloader differ
diff --git a/dummy_bootloader/dummy_bootloader.S b/dummy_bootloader/dummy_bootloader.S
new file mode 100644 (file)
index 0000000..c8f1512
--- /dev/null
@@ -0,0 +1,44 @@
+SCREEN_COLS     equ 80
+SCREEN_ROWS     equ 25
+STACK_SEGMENT   equ 09000h     ; top of memory
+STACK_SIZE      equ 00fffh     ; 4K - 1 bytes of stack
+       
+TEXT_COLOR     equ 0x07        ; white on black
+
+       jmp 07c0h:start
+
+message                db "PlanetLab nodes require a boot cd at all times to function.",0
+       
+start:
+       mov ax, cs
+       mov ds, ax
+       mov es, ax
+       
+       mov sp, STACK_SEGMENT   ; setup stack (not really used)
+       mov ss, sp
+       mov sp, STACK_SIZE
+
+       ;; clear out the screen, using the scroll down bios int.
+       mov ah, 0x07            ; for int 0x10, 0x07 is scroll down window
+       mov al, 0               ; entire window
+       mov cx, 0               ; upper left corner = (0,0)
+       mov dh, SCREEN_ROWS     ; row of bottom
+       mov dl, SCREEN_COLS     ; column of right
+       mov bh, 7
+       int 10h                 
+       
+       mov si, message
+
+strout: lodsb
+       cmp al, 0
+       je done
+       mov ah, 0x0E            ; for int 0x10, 0xOE is char out
+       mov bx, TEXT_COLOR
+       int 0x10
+       jmp strout
+
+done:  
+       jmp done
+       
+       times 510 - ($ - $$) db 0 ;  last two bytes are magic for x86 boot sectors
+       dw 0aa55h
diff --git a/source/BootAPI.py b/source/BootAPI.py
new file mode 100644 (file)
index 0000000..a27593b
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import xmlrpclib
+import xml.parsers.expat
+import hmac
+import string
+import sha
+
+from Exceptions import *
+
+
+def create_auth_structure( vars, call_params ):
+    """
+    create and return an authentication structure for a Boot API
+    call. Vars contains the boot manager runtime variables, and
+    call_params is a tuple of the parameters that will be passed to the
+    API call. Return None if unable to (typically due to missing
+    keys in vars, such as node_id or node_key)
+    """
+    
+    auth= {}
+    auth['AuthMethod']= 'hmac'
+
+    try:
+        network= vars['NETWORK_SETTINGS']
+        
+        auth['node_id']= vars['NODE_ID']
+        auth['node_ip']= network['ip']
+        node_key= vars['NODE_KEY']
+    except KeyError, e:
+        return None
+
+    msg= serialize_params(call_params)
+    node_hmac= hmac.new(node_key,msg,sha).hexdigest()
+    auth['value']= node_hmac
+
+    return auth
+
+
+
+def serialize_params( call_params ):
+    """
+    convert a list of parameters into a format that will be used in the
+    hmac generation. both the boot manager and plc must have a common
+    format. full documentation is in the boot manager technical document,
+    but essentially we are going to take all the values (and keys for
+    dictionary objects), and put them into a list. sort them, and combine
+    them into one long string encased in a set of braces.
+    """
+
+    # if there are no parameters, just return empty paren set
+    if len(call_params) == 0:
+        return "[]"
+
+    values= []
+    
+    for param in call_params:
+        if isinstance(param,list) or isinstance(param,tuple):
+            values= values + map(str,param)
+        elif isinstance(param,dict):
+            values= values + collapse_dict(param)        
+        else:
+            values.append( str(param) )
+                
+    values.sort()
+    values= "[" + string.join(values,"") + "]"
+    return values
+
+    
+def collapse_dict( value ):
+    """
+    given a dictionary, return a list of all the keys and values as strings,
+    in no particular order
+    """
+
+    item_list= []
+    
+    if not isinstance(value,dict):
+        return item_list
+    
+    for key in value.keys():
+        key_value= value[key]
+        if isinstance(key_value,list) or isinstance(key_value,tuple):
+            item_list= item_list + map(str,key_value)
+        elif isinstance(key_value,dict):
+            item_list= item_list + collapse_dict(key_value)
+        else:
+            item_list.append( str(key_value) )
+
+    return item_list
+            
+    
+    
+def call_api_function( vars, function, user_params ):
+    """
+    call the named api function with params, and return the
+    value to the caller. the authentication structure is handled
+    automatically, and doesn't need to be passed in with params.
+
+    If the call fails, a BootManagerException is raised.
+    """
+    
+    try:
+        api_server= vars['API_SERVER_INST']
+    except KeyError, e:
+        raise BootManagerException, "No connection to the API server exists."
+
+    auth= create_auth_structure(vars,user_params)
+    if auth is None:
+        raise BootManagerException, \
+              "Could not create auth structure, missing values."
+    
+    params= (auth,)
+    params= params + user_params
+
+    try:
+        exec( "rc= api_server.%s(*params)" % function )
+        return rc
+    except xmlrpclib.Fault, fault:
+        raise BootManagerException, "API Fault: %s" % fault
+    except xmlrpclib.ProtocolError, err:
+        raise BootManagerException,"XML RPC protocol error: %s" % err
+    except xml.parsers.expat.ExpatError, err:
+        raise BootManagerException,"XML parsing error: %s" % err
diff --git a/source/BootManager.py b/source/BootManager.py
new file mode 100755 (executable)
index 0000000..c4eea37
--- /dev/null
@@ -0,0 +1,361 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+import sys, os, traceback
+from time import gmtime, strftime
+from gzip import GzipFile
+
+from steps import *
+from Exceptions import *
+import notify_messages
+import BootServerRequest
+
+# all output is written to this file
+LOG_FILE= "/tmp/bm.log"
+UPLOAD_LOG_PATH = "/alpina-logs/upload.php"
+
+# the new contents of PATH when the boot manager is running
+BIN_PATH= ('/usr/local/bin',
+           '/usr/local/sbin',
+           '/bin',
+           '/sbin',
+           '/usr/bin',
+           '/usr/sbin',
+           '/usr/local/planetlab/bin')
+           
+
+# the set of valid node run states
+NodeRunStates = {}
+
+class log:
+
+    def __init__( self, OutputFilePath= None ):
+        if OutputFilePath:
+            try:
+                self.OutputFilePath= OutputFilePath
+                self.OutputFile= GzipFile( OutputFilePath, "w", 9 )
+            except:
+                print( "Unable to open output file for log, continuing" )
+                self.OutputFile= None
+
+    
+    def LogEntry( self, str, inc_newline= 1, display_screen= 1 ):
+        if self.OutputFile:
+            self.OutputFile.write( str )
+        if display_screen:
+            sys.stdout.write( str )
+            
+        if inc_newline:
+            if display_screen:
+                sys.stdout.write( "\n" )
+            if self.OutputFile:
+                self.OutputFile.write( "\n" )
+
+        if self.OutputFile:
+            self.OutputFile.flush()
+
+            
+
+    def write( self, str ):
+        """
+        make log behave like a writable file object (for traceback
+        prints)
+        """
+        self.LogEntry( str, 0, 1 )
+
+
+    
+    def Upload( self ):
+        """
+        upload the contents of the log to the server
+        """
+
+        if self.OutputFile is not None:
+            self.LogEntry( "Uploading logs to %s" % UPLOAD_LOG_PATH )
+            
+            self.OutputFile.close()
+            self.OutputFile= None
+
+            bs_request = BootServerRequest.BootServerRequest()
+            bs_request.MakeRequest(PartialPath = UPLOAD_LOG_PATH,
+                                   GetVars = None, PostVars = None,
+                                   FormData = ["log=@" + self.OutputFilePath],
+                                   DoSSL = True, DoCertCheck = True)
+        
+    
+
+        
+
+
+class BootManager:
+
+    # file containing initial variables/constants
+    VARS_FILE = "configuration"
+
+    
+    def __init__(self, log, forceState):
+        # override machine's current state from the command line
+        self.forceState = forceState
+
+        # the main logging point
+        self.LOG= log
+
+        # set to 1 if we can run after initialization
+        self.CAN_RUN = 0
+             
+        # read in and store all variables in VARS_FILE into each line
+        # is in the format name=val (any whitespace around the = is
+        # removed. everything after the = to the end of the line is
+        # the value
+        vars = {}
+        vars_file= file(self.VARS_FILE,'r')
+        validConfFile = True
+        for line in vars_file:
+            # if its a comment or a whitespace line, ignore
+            if line[:1] == "#" or string.strip(line) == "":
+                continue
+
+            parts= string.split(line,"=")
+            if len(parts) != 2:
+                self.LOG.LogEntry( "Invalid line in vars file: %s" % line )
+                validConfFile = False
+                break
+
+            name= string.strip(parts[0])
+            value= string.strip(parts[1])
+            vars[name]= value
+
+        vars_file.close()
+        if not validConfFile:
+            self.LOG.LogEntry( "Unable to read configuration vars." )
+            return
+
+        # find out which directory we are running it, and set a variable
+        # for that. future steps may need to get files out of the bootmanager
+        # directory
+        current_dir= os.getcwd()
+        vars['BM_SOURCE_DIR']= current_dir
+
+        # not sure what the current PATH is set to, replace it with what
+        # we know will work with all the boot cds
+        os.environ['PATH']= string.join(BIN_PATH,":")
+                   
+        # this contains a set of information used and updated
+        # by each step
+        self.VARS= vars
+
+        self.CAN_RUN= 1
+
+    def Run(self):
+        """
+        core boot manager logic.
+
+        the way errors are handled is as such: if any particular step
+        cannot continue or unexpectibly fails, an exception is thrown.
+        in this case, the boot manager cannot continue running.
+
+        these step functions can also return a 0/1 depending on whether
+        or not it succeeded. In the case of steps like ConfirmInstallWithUser,
+        a 0 is returned and no exception is thrown if the user chose not
+        to confirm the install. The same goes with the CheckHardwareRequirements.
+        If requriements not met, but tests were succesfull, return 0.
+
+        for steps that run within the installer, they are expected to either
+        complete succesfully and return 1, or throw an execption.
+
+        For exact return values and expected operations, see the comments
+        at the top of each of the invididual step functions.
+        """
+
+        def _nodeNotInstalled():
+            # called by the _xxxState() functions below upon failure
+            self.VARS['BOOT_STATE']= 'dbg'
+            self.VARS['STATE_CHANGE_NOTIFY']= 1
+            self.VARS['STATE_CHANGE_NOTIFY_MESSAGE']= \
+                      notify_messages.MSG_NODE_NOT_INSTALLED
+            raise BootManagerException, \
+                  notify_messages.MSG_NODE_NOT_INSTALLED
+
+        def _bootRun():
+            # implements the boot logic, which consists of first
+            # double checking that the node was properly installed,
+            # checking whether someone added or changed disks, and
+            # then finally chain boots.
+
+            InstallInit.Run( self.VARS, self.LOG )                    
+            if ValidateNodeInstall.Run( self.VARS, self.LOG ):
+                WriteModprobeConfig.Run( self.VARS, self.LOG )
+                MakeInitrd.Run( self.VARS, self.LOG )
+                WriteNetworkConfig.Run( self.VARS, self.LOG )
+                # the following step should be done by NM
+                UpdateNodeConfiguration.Run( self.VARS, self.LOG )
+                CheckForNewDisks.Run( self.VARS, self.LOG )
+                SendHardwareConfigToPLC.Run( self.VARS, self.LOG )
+                ChainBootNode.Run( self.VARS, self.LOG )
+            else:
+                _nodeNotInstalled()
+
+        def _rinsRun():
+            # implements the reinstall logic, which will check whether
+            # the min. hardware requirements are met, install the
+            # software, and upon correct installation will switch too
+            # 'boot' state and chainboot into the production system
+            if not CheckHardwareRequirements.Run( self.VARS, self.LOG ):
+                self.VARS['BOOT_STATE']= 'dbg'
+                raise BootManagerException, "Hardware requirements not met."
+
+            # runinstaller
+            InstallInit.Run( self.VARS, self.LOG )                    
+            InstallPartitionDisks.Run( self.VARS, self.LOG )            
+            InstallBootstrapRPM.Run( self.VARS, self.LOG )            
+            InstallWriteConfig.Run( self.VARS, self.LOG )
+            InstallBuildVServer.Run( self.VARS, self.LOG )
+            InstallNodeInit.Run( self.VARS, self.LOG )
+            InstallUninitHardware.Run( self.VARS, self.LOG )
+            self.VARS['BOOT_STATE']= 'boot'
+            self.VARS['STATE_CHANGE_NOTIFY']= 1
+            self.VARS['STATE_CHANGE_NOTIFY_MESSAGE']= \
+                 notify_messages.MSG_INSTALL_FINISHED
+            UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+            _bootRun()
+            
+        def _newRun():
+            # implements the new install logic, which will first check
+            # with the user whether it is ok to install on this
+            # machine, switch to 'rins' state and then invoke the rins
+            # logic.  See rinsState logic comments for further
+            # details.
+            if not ConfirmInstallWithUser.Run( self.VARS, self.LOG ):
+                return 0
+            self.VARS['BOOT_STATE']= 'rins'
+            UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+            _rinsRun()
+
+        def _debugRun():
+            # implements debug logic, which just starts the sshd
+            # and just waits around
+            self.VARS['BOOT_STATE']='dbg'
+            UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+            StartDebug.Run( self.VARS, self.LOG )
+
+        def _badRun():
+            # should never happen; log event
+            self.LOG.write( "\nInvalid BOOT_STATE = %s\n" % self.VARS['BOOT_STATE'])
+            _debugRun()
+
+        global NodeRunStates
+        # setup state -> function hash table
+        NodeRunStates['new']  = _newRun
+        NodeRunStates['inst'] = _newRun
+        NodeRunStates['rins'] = _rinsRun
+        NodeRunStates['boot'] = _bootRun
+        NodeRunStates['dbg']  = _debugRun
+
+        success = 0
+        try:
+            InitializeBootManager.Run( self.VARS, self.LOG )
+            ReadNodeConfiguration.Run( self.VARS, self.LOG )
+            AuthenticateWithPLC.Run( self.VARS, self.LOG )
+            GetAndUpdateNodeDetails.Run( self.VARS, self.LOG )
+
+            # override machine's current state from the command line
+            if self.forceState is not None:
+                self.VARS['BOOT_STATE']= self.forceState
+                UpdateBootStateWithPLC.Run( self.VARS, self.LOG )
+
+            stateRun = NodeRunStates.get(self.VARS['BOOT_STATE'],_badRun)
+            stateRun()
+            success = 1
+
+        except KeyError, e:
+            self.LOG.write( "\n\nKeyError while running: %s\n" % str(e) )
+        except BootManagerException, e:
+            self.LOG.write( "\n\nException while running: %s\n" % str(e) )
+        except:
+            self.LOG.write( "\n\nImplementation Error\n")
+            traceback.print_exc(file=self.LOG.OutputFile)
+            traceback.print_exc()
+
+        if not success:
+            try:
+                _debugRun()
+            except BootManagerException, e:
+                self.LOG.write( "\n\nException while running: %s\n" % str(e) )
+            except:
+                self.LOG.write( "\n\nImplementation Error\n")
+                traceback.print_exc(file=self.LOG.OutputFile)
+                traceback.print_exc()
+
+        return success
+            
+            
+def main(argv):
+    global NodeRunStates
+    NodeRunStates = {'new':None,
+                     'inst':None,
+                     'rins':None,
+                     'boot':None,
+                     'dbg':None}
+
+    # set to 1 if error occurred
+    error= 0
+    
+    # all output goes through this class so we can save it and post
+    # the data back to PlanetLab central
+    LOG= log( LOG_FILE )
+
+    LOG.LogEntry( "BootManager started at: %s" % \
+                  strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) )
+
+    try:
+        forceState = None
+        if len(argv) == 2:
+            fState = argv[1]
+            if NodeRunStates.has_key(fState):
+                forceState = fState
+            else:
+                LOG.LogEntry("FATAL: cannot force node run state to=%s" % fState)
+                error = 1
+    except:
+        traceback.print_exc(file=LOG.OutputFile)
+        traceback.print_exc()
+        
+    if error:
+        LOG.LogEntry( "BootManager finished at: %s" % \
+                      strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) )
+        LOG.Upload()
+        return error
+
+    try:
+        bm= BootManager(LOG,forceState)
+        if bm.CAN_RUN == 0:
+            LOG.LogEntry( "Unable to initialize BootManager." )
+        else:
+            LOG.LogEntry( "Running version %s of BootManager." %
+                          bm.VARS['VERSION'] )
+            success= bm.Run()
+            if success:
+                LOG.LogEntry( "\nDone!" );
+            else:
+                LOG.LogEntry( "\nError occurred!" );
+                error = 1
+    except:
+        traceback.print_exc(file=LOG.OutputFile)
+        traceback.print_exc()
+
+    LOG.LogEntry( "BootManager finished at: %s" % \
+                  strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) )
+    LOG.Upload()
+
+    return error
+
+    
+if __name__ == "__main__":
+    error = main(sys.argv)
+    sys.exit(error)
diff --git a/source/BootServerRequest.py b/source/BootServerRequest.py
new file mode 100644 (file)
index 0000000..75fad3c
--- /dev/null
@@ -0,0 +1,468 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os, sys
+import re
+import string
+import urllib
+import tempfile
+
+# try to load pycurl
+try:
+    import pycurl
+    PYCURL_LOADED= 1
+except:
+    PYCURL_LOADED= 0
+
+
+# if there is no cStringIO, fall back to the original
+try:
+    from cStringIO import StringIO
+except:
+    from StringIO import StringIO
+
+
+
+class BootServerRequest:
+
+    VERBOSE = 0
+
+    # all possible places to check the cdrom mount point.
+    # /mnt/cdrom is typically after the machine has come up,
+    # and /usr is when the boot cd is running
+    CDROM_MOUNT_PATH = ("/mnt/cdrom/","/usr/")
+
+    # this is the server to contact if we don't have a bootcd
+    DEFAULT_BOOT_SERVER = "boot.planet-lab.org"
+
+    BOOTCD_VERSION_FILE = "bootme/ID"
+    BOOTCD_SERVER_FILE = "bootme/BOOTSERVER"
+    BOOTCD_SERVER_CERT_DIR = "bootme/cacert"
+    CACERT_NAME = "cacert.pem"
+    
+    # location of file containing http/https proxy info, if needed
+    PROXY_FILE = '/etc/planetlab/http_proxy'
+
+    # location of curl executable, if pycurl isn't available
+    # and the DownloadFile method is called (backup, only
+    # really need for the boot cd environment where pycurl
+    # doesn't exist
+    CURL_CMD = 'curl'
+
+    # in seconds, how maximum time allowed for connect
+    DEFAULT_CURL_CONNECT_TIMEOUT = 30
+
+    # in seconds, maximum time allowed for any transfer
+    DEFAULT_CURL_MAX_TRANSFER_TIME = 3600
+
+    CURL_SSL_VERSION = 3
+
+    HTTP_SUCCESS = 200
+
+    # proxy variables
+    USE_PROXY = 0
+    PROXY = 0
+
+    # bootcd variables
+    HAS_BOOTCD = 0
+    BOOTCD_VERSION = ""
+    BOOTSERVER_CERTS= {}
+
+    def __init__(self, verbose=0):
+
+        self.VERBOSE= verbose
+            
+        # see if we have a boot cd mounted by checking for the version file
+        # if HAS_BOOTCD == 0 then either the machine doesn't have
+        # a boot cd, or something else is mounted
+        self.HAS_BOOTCD = 0
+
+        for path in self.CDROM_MOUNT_PATH:
+            self.Message( "Checking existance of boot cd on %s" % path )
+
+            os.system("/bin/mount %s > /dev/null 2>&1" % path )
+                
+            version_file= path + self.BOOTCD_VERSION_FILE
+            self.Message( "Looking for version file %s" % version_file )
+
+            if os.access(version_file, os.R_OK) == 0:
+                self.Message( "No boot cd found." );
+            else:
+                self.Message( "Found boot cd." )
+                self.HAS_BOOTCD=1
+                break
+
+
+        if self.HAS_BOOTCD:
+
+            # check the version of the boot cd, and locate the certs
+            self.Message( "Getting boot cd version." )
+        
+            versionRegExp= re.compile(r"PlanetLab BootCD v(\S+)")
+                
+            bootcd_version_f= file(version_file,"r")
+            line= string.strip(bootcd_version_f.readline())
+            bootcd_version_f.close()
+            
+            match= versionRegExp.findall(line)
+            if match:
+                (self.BOOTCD_VERSION)= match[0]
+            
+            # right now, all the versions of the bootcd are supported,
+            # so no need to check it
+            
+            # create a list of the servers we should
+            # attempt to contact, and the certs for each
+            server_list= path + self.BOOTCD_SERVER_FILE
+            self.Message( "Getting list of servers off of cd from %s." %
+                          server_list )
+            
+            bootservers_f= file(server_list,"r")
+            bootservers= bootservers_f.readlines()
+            bootservers_f.close()
+            
+            for bootserver in bootservers:
+                bootserver = string.strip(bootserver)
+                cacert_path= "%s/%s/%s/%s" % \
+                             (path,self.BOOTCD_SERVER_CERT_DIR,
+                              bootserver,self.CACERT_NAME)
+                if os.access(cacert_path, os.R_OK):
+                    self.BOOTSERVER_CERTS[bootserver]= cacert_path
+
+            self.Message( "Set of servers to contact: %s" %
+                          str(self.BOOTSERVER_CERTS) )
+        else:
+            self.Message( "Using default boot server address." )
+            self.BOOTSERVER_CERTS[self.DEFAULT_BOOT_SERVER]= ""
+
+
+    def CheckProxy( self ):
+        # see if we have any proxy info from the machine
+        self.USE_PROXY= 0
+        self.Message( "Checking existance of proxy config file..." )
+        
+        if os.access(self.PROXY_FILE, os.R_OK) and \
+               os.path.isfile(self.PROXY_FILE):
+            self.PROXY= string.strip(file(self.PROXY_FILE,'r').readline())
+            self.USE_PROXY= 1
+            self.Message( "Using proxy %s." % self.PROXY )
+        else:
+            self.Message( "Not using any proxy." )
+
+
+
+    def Message( self, Msg ):
+        if( self.VERBOSE ):
+            print( Msg )
+
+
+
+    def Error( self, Msg ):
+        sys.stderr.write( Msg + "\n" )
+
+
+
+    def Warning( self, Msg ):
+        self.Error(Msg)
+
+
+
+    def MakeRequest( self, PartialPath, GetVars,
+                     PostVars, DoSSL, DoCertCheck,
+                     ConnectTimeout= DEFAULT_CURL_CONNECT_TIMEOUT,
+                     MaxTransferTime= DEFAULT_CURL_MAX_TRANSFER_TIME,
+                     FormData= None):
+
+        if hasattr(tempfile, "NamedTemporaryFile"):
+            buffer = tempfile.NamedTemporaryFile()
+            buffer_name = buffer.name
+        else:
+            buffer_name = tempfile.mktemp("MakeRequest")
+            buffer = open(buffer_name, "w+")
+
+        ok = self.DownloadFile(PartialPath, GetVars, PostVars,
+                               DoSSL, DoCertCheck, buffer_name,
+                               ConnectTimeout,
+                               MaxTransferTime,
+                               FormData)
+
+        # check the code, return the string only if it was successfull
+        if ok:
+            buffer.seek(0)
+            return buffer.read()
+        else:
+            return None
+
+    def DownloadFile(self, PartialPath, GetVars, PostVars,
+                     DoSSL, DoCertCheck, DestFilePath,
+                     ConnectTimeout= DEFAULT_CURL_CONNECT_TIMEOUT,
+                     MaxTransferTime= DEFAULT_CURL_MAX_TRANSFER_TIME,
+                     FormData= None):
+
+        self.Message( "Attempting to retrieve %s" % PartialPath )
+
+        # we can't do ssl and check the cert if we don't have a bootcd
+        if DoSSL and DoCertCheck and not self.HAS_BOOTCD:
+            self.Error( "No boot cd exists (needed to use -c and -s.\n" )
+            return 0
+
+        if DoSSL and not PYCURL_LOADED:
+            self.Warning( "Using SSL without pycurl will by default " \
+                          "check at least standard certs." )
+
+        # ConnectTimeout has to be greater than 0
+        if ConnectTimeout <= 0:
+            self.Error( "Connect timeout must be greater than zero.\n" )
+            return 0
+
+
+        self.CheckProxy()
+
+        dopostdata= 0
+
+        # setup the post and get vars for the request
+        if PostVars:
+            dopostdata= 1
+            postdata = urllib.urlencode(PostVars)
+            self.Message( "Posting data:\n%s\n" % postdata )
+            
+        getstr= ""
+        if GetVars:
+            getstr= "?" + urllib.urlencode(GetVars)
+            self.Message( "Get data:\n%s\n" % getstr )
+
+        # now, attempt to make the request, starting at the first
+        # server in the list
+        
+        for server in self.BOOTSERVER_CERTS:
+            self.Message( "Contacting server %s." % server )
+                        
+            certpath = self.BOOTSERVER_CERTS[server]
+
+            
+            # output what we are going to be doing
+            self.Message( "Connect timeout is %s seconds" % \
+                          ConnectTimeout )
+
+            self.Message( "Max transfer time is %s seconds" % \
+                          MaxTransferTime )
+
+            if DoSSL:
+                url = "https://%s/%s%s" % (server,PartialPath,getstr)
+                
+                if DoCertCheck and PYCURL_LOADED:
+                    self.Message( "Using SSL version %d and verifying peer." %
+                             self.CURL_SSL_VERSION )
+                else:
+                    self.Message( "Using SSL version %d." %
+                             self.CURL_SSL_VERSION )
+            else:
+                url = "http://%s/%s%s" % (server,PartialPath,getstr)
+                
+            self.Message( "URL: %s" % url )
+            
+            # setup a new pycurl instance, or a curl command line string
+            # if we don't have pycurl
+            
+            if PYCURL_LOADED:
+                curl= pycurl.Curl()
+
+                # don't want curl sending any signals
+                curl.setopt(pycurl.NOSIGNAL, 1)
+            
+                curl.setopt(pycurl.CONNECTTIMEOUT, ConnectTimeout)
+                curl.setopt(pycurl.TIMEOUT, MaxTransferTime)
+
+                # do not follow location when attempting to download a file
+                curl.setopt(pycurl.FOLLOWLOCATION, 0)
+
+                if self.USE_PROXY:
+                    curl.setopt(pycurl.PROXY, self.PROXY )
+
+                if DoSSL:
+                    curl.setopt(pycurl.SSLVERSION, self.CURL_SSL_VERSION)
+                
+                    if DoCertCheck:
+                        curl.setopt(pycurl.CAINFO, certpath)
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 2)
+                        
+                    else:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+                
+                if dopostdata:
+                    curl.setopt(pycurl.POSTFIELDS, postdata)
+
+                # setup multipart/form-data upload
+                if FormData:
+                    curl.setopt(pycurl.HTTPPOST, FormData)
+
+                curl.setopt(pycurl.URL, url)
+            else:
+
+                cmdline = "%s " \
+                          "--connect-timeout %d " \
+                          "--max-time %d " \
+                          "--header Pragma: " \
+                          "--output %s " \
+                          "--fail " % \
+                          (self.CURL_CMD, ConnectTimeout,
+                           MaxTransferTime, DestFilePath)
+
+                if dopostdata:
+                    cmdline = cmdline + "--data '" + postdata + "' "
+
+                if FormData:
+                    cmdline = cmdline + "".join(["--form '" + field + "' " for field in FormData])
+
+                if not self.VERBOSE:
+                    cmdline = cmdline + "--silent "
+                    
+                if self.USE_PROXY:
+                    cmdline = cmdline + "--proxy %s " % self.PROXY
+
+                if DoSSL:
+                    cmdline = cmdline + "--sslv%d " % self.CURL_SSL_VERSION
+
+                    if DoCertCheck:
+                        cmdline = cmdline + "--cacert %s " % certpath
+                 
+                cmdline = cmdline + url
+
+                self.Message( "curl command: %s" % cmdline )
+                
+                
+            if PYCURL_LOADED:
+                try:
+                    # setup the output file
+                    outfile = open(DestFilePath,"wb")
+                    
+                    self.Message( "Opened output file %s" % DestFilePath )
+                
+                    curl.setopt(pycurl.WRITEDATA, outfile)
+                
+                    self.Message( "Fetching..." )
+                    curl.perform()
+                    self.Message( "Done." )
+                
+                    http_result= curl.getinfo(pycurl.HTTP_CODE)
+                    curl.close()
+                
+                    outfile.close()
+                    self.Message( "Results saved in %s" % DestFilePath )
+
+                    # check the code, return 1 if successfull
+                    if http_result == self.HTTP_SUCCESS:
+                        self.Message( "Successfull!" )
+                        return 1
+                    else:
+                        self.Message( "Failure, resultant http code: %d" % \
+                                      http_result )
+
+                except pycurl.error, err:
+                    errno, errstr= err
+                    self.Error( "connect to %s failed; curl error %d: '%s'\n" %
+                       (server,errno,errstr) )
+        
+                if not outfile.closed:
+                    try:
+                        os.unlink(DestFilePath)
+                        outfile.close()
+                    except OSError:
+                        pass
+
+            else:
+                self.Message( "Fetching..." )
+                rc = os.system(cmdline)
+                self.Message( "Done." )
+                
+                if rc != 0:
+                    try:
+                        os.unlink( DestFilePath )
+                    except OSError:
+                        pass
+                    self.Message( "Failure, resultant curl code: %d" % rc )
+                    self.Message( "Removed %s" % DestFilePath )
+                else:
+                    self.Message( "Successfull!" )
+                    return 1
+            
+        self.Error( "Unable to successfully contact any boot servers.\n" )
+        return 0
+
+
+
+
+def usage():
+    print(
+    """
+Usage: BootServerRequest.py [options] <partialpath>
+Options:
+ -c/--checkcert        Check SSL certs. Ignored if -s/--ssl missing.
+ -h/--help             This help text
+ -o/--output <file>    Write result to file
+ -s/--ssl              Make the request over HTTPS
+ -v                    Makes the operation more talkative
+""");  
+
+
+
+if __name__ == "__main__":
+    import getopt
+    
+    # check out our command line options
+    try:
+        opt_list, arg_list = getopt.getopt(sys.argv[1:],
+                                           "o:vhsc",
+                                           [ "output=", "verbose", \
+                                             "help","ssl","checkcert"])
+
+        ssl= 0
+        checkcert= 0
+        output_file= None
+        verbose= 0
+        
+        for opt, arg in opt_list:
+            if opt in ("-h","--help"):
+                usage(0)
+                sys.exit()
+            
+            if opt in ("-c","--checkcert"):
+                checkcert= 1
+            
+            if opt in ("-s","--ssl"):
+                ssl= 1
+
+            if opt in ("-o","--output"):
+                output_file= arg
+
+            if opt == "-v":
+                verbose= 1
+    
+        if len(arg_list) != 1:
+            raise Exception
+
+        partialpath= arg_list[0]
+        if string.lower(partialpath[:4]) == "http":
+            raise Exception
+
+    except:
+        usage()
+        sys.exit(2)
+
+    # got the command line args straightened out
+    requestor= BootServerRequest(verbose)
+        
+    if output_file:
+        requestor.DownloadFile( partialpath, None, None, ssl,
+                                checkcert, output_file)
+    else:
+        result= requestor.MakeRequest( partialpath, None, None, ssl, checkcert)
+        if result:
+            print result
+        else:
+            sys.exit(1)
diff --git a/source/COPYRIGHT b/source/COPYRIGHT
new file mode 100644 (file)
index 0000000..6bf1167
--- /dev/null
@@ -0,0 +1,55 @@
+The BootManager source code was initially developed by Intel
+Corporation and subsequently rewritten by Princeton University.  The
+copyright for the BootManager source code is as follows:
+
+Copyright (c) 2003 Intel Corporation
+All rights reserved.
+
+Copyright (c) 2004-2006 The Trustees of Princeton University
+All rights reserved.
+
+The License from both Intel and Princeton for this software is
+follows:
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met: 
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+    * Neither the name of the copyright holder nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRINCETON
+UNIVERSITY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. 
+
+Intel Corporation places the following export restrictions on the
+software:
+
+EXPORT LAWS: THIS LICENSE ADDS NO RESTRICTIONS TO THE EXPORT LAWS OF
+YOUR JURISDICTION. It is licensee's responsibility to comply with any
+export regulations applicable in licensee's jurisdiction. Under
+CURRENT (May 2000) U.S. export regulations this software is eligible
+for export from the U.S. and can be downloaded by or otherwise
+exported or reexported worldwide EXCEPT to U.S. embargoed destinations
+which include Cuba, Iraq, Libya, North Korea, Iran, Syria, Sudan,
+Afghanistan and any other country to which the U.S. has embargoed
+goods and services.
+
diff --git a/source/Exceptions.py b/source/Exceptions.py
new file mode 100644 (file)
index 0000000..915f120
--- /dev/null
@@ -0,0 +1,15 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+class BootManagerException(Exception):
+    def __init__( self, err ):
+        self.__fault= err
+
+    def __str__( self ):
+        return self.__fault
+    
diff --git a/source/ModelOptions.py b/source/ModelOptions.py
new file mode 100644 (file)
index 0000000..697b701
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import string
+
+MINHW   = 0x001
+SMP     = 0x002
+X86_64  = 0x004
+INTEL   = 0x008
+AMD     = 0x010
+NUMA    = 0x020
+GEODE   = 0x040
+BADHD   = 0x080
+LAST    = 0x100
+
+modeloptions = {'smp':SMP,
+                'x64':X86_64,
+                'i64':X86_64|INTEL,
+                'a64':X86_64|AMD,
+                'i32':INTEL,
+                'a32':AMD,
+                'numa':NUMA,
+                'geode':GEODE,
+                'badhd':BADHD,
+                'minhw':MINHW}
+
+def Get(model):
+    modelinfo = string.split(model,'/')
+    options= 0
+    for mi in modelinfo:
+        info = string.strip(mi)
+        info = info.lower()
+        options = options | modeloptions.get(info,0)
+
+    return options
+
diff --git a/source/compatibility.py b/source/compatibility.py
new file mode 100644 (file)
index 0000000..ab2ab4a
--- /dev/null
@@ -0,0 +1,226 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+"""
+Various functions that are used to allow the boot manager to run on various
+different cds are included here
+"""
+
+import string
+import os, sys
+
+from Exceptions import *
+import utils
+import BootServerRequest
+
+
+def setup_lvm_2x_cd( vars, log ):
+    """
+    make available a set of lvm utilities for 2.x cds that don't have them
+    on the cd.
+
+    Expect the following variables to be set:
+    TEMP_PATH                somewhere to store what we need to run
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    SUPPORT_FILE_DIR         directory on the boot servers containing
+                             scripts and support files
+    LVM_SETUP_2X_CD          indicates if lvm is downloaded and setup for 2.x cds
+    
+    Set the following variables upon successfully running:
+    LVM_SETUP_2X_CD          indicates if lvm is downloaded and setup for 2.x cds
+    """
+    
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SUPPORT_FILE_DIR= vars["SUPPORT_FILE_DIR"]
+        if SUPPORT_FILE_DIR == None:
+            raise ValueError, "SUPPORT_FILE_DIR"
+        
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    if BOOT_CD_VERSION[0] != 2:
+        log.write( "Only 2.x boot cds need lvm setup manually.\n" )
+        return 1
+    
+    LVM_SETUP_2X_CD= 0
+    if 'LVM_SETUP_2X_CD' in vars.keys():
+        LVM_SETUP_2X_CD= vars['LVM_SETUP_2X_CD']
+        
+    if LVM_SETUP_2X_CD:
+        log.write( "LVM already downloaded and setup\n" )
+        return 1
+
+    log.write( "Downloading additional libraries for lvm\n" )
+
+    bs_request= BootServerRequest.BootServerRequest()
+        
+    utils.makedirs(TEMP_PATH)
+
+    # download and extract support tarball for this step,
+    # which has everything we need to successfully run
+    step_support_file= "alpina-BootLVM.tar.gz"
+    source_file= "%s/%s" % (SUPPORT_FILE_DIR,step_support_file)
+    dest_file= "%s/%s" % (TEMP_PATH, step_support_file)
+
+    log.write( "Downloading support file for this step\n" )
+    result= bs_request.DownloadFile( source_file, None, None,
+                                     1, 1, dest_file )
+    if not result:
+        raise BootManagerException, "Download failed."
+
+    log.write( "Extracting support files\n" )
+    old_wd= os.getcwd()
+    utils.chdir( TEMP_PATH )
+    utils.sysexec( "tar -C / -xzf %s" % step_support_file, log )
+    utils.removefile( dest_file )
+    utils.chdir( old_wd )
+
+    utils.sysexec( "ldconfig", log )
+
+    # load lvm-mod
+    log.write( "Loading lvm module\n" )
+    utils.sysexec( "modprobe lvm-mod", log )
+
+    # take note that we have lvm setup
+    LVM_SETUP_2X_CD= 1
+    vars['LVM_SETUP_2X_CD']= LVM_SETUP_2X_CD
+
+    return 1
+
+
+
+def setup_partdisks_2x_cd( vars, log ):
+    """
+    download necessary files to handle partitioning disks on 2.x cds
+
+    Expect the following variables to be set:
+    TEMP_PATH                somewhere to store what we need to run
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    SUPPORT_FILE_DIR         directory on the boot servers containing
+                             scripts and support files
+    PARTDISKS_SETUP_2X_CD    indicates if lvm is downloaded and setup for 2.x cds
+    
+    Set the following variables upon successfully running:
+    PARTDISKS_SETUP_2X_CD    indicates if lvm is downloaded and setup for 2.x cds
+    """
+
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SUPPORT_FILE_DIR= vars["SUPPORT_FILE_DIR"]
+        if SUPPORT_FILE_DIR == None:
+            raise ValueError, "SUPPORT_FILE_DIR"
+        
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    if BOOT_CD_VERSION[0] != 2:
+        log.write( "Only 2.x boot cds need partition disk tools setup manually.\n" )
+        return 1
+
+    PARTDISKS_SETUP_2X_CD= 0
+    if 'PARTDISKS_SETUP_2X_CD' in vars.keys():
+        PARTDISKS_SETUP_2X_CD= vars['PARTDISKS_SETUP_2X_CD']
+
+    if PARTDISKS_SETUP_2X_CD:
+        log.write( "Partition disk tools already downloaded and setup\n" )
+        return 1
+
+    log.write( "Downloading additional libraries for partitioning disks\n" )
+
+    bs_request= BootServerRequest.BootServerRequest()
+
+    # download and extract support tarball for this step,
+    # which has everything we need to successfully run
+    step_support_file= "alpina-PartDisks.tar.gz"
+    source_file= "%s/%s" % (SUPPORT_FILE_DIR,step_support_file)
+    dest_file= "%s/%s" % (TEMP_PATH, step_support_file)
+
+    log.write( "Downloading support file for this step\n" )
+    result= bs_request.DownloadFile( source_file, None, None,
+                                     1, 1, dest_file )
+    if not result:
+        raise BootManagerException, "Download failed."
+
+    log.write( "Extracting support files\n" )
+    old_wd= os.getcwd()
+    utils.chdir( TEMP_PATH )
+    utils.sysexec( "tar -xzf %s" % step_support_file, log )
+    utils.removefile( dest_file )
+    utils.chdir( old_wd )
+
+    # also included in the support package was a list of extra
+    # paths (lib-paths) for /etc/ld.so.conf.
+    # so add those, and rerun ldconfig
+    # so we can make our newly downloaded libraries available
+
+    ldconf_file= file("/etc/ld.so.conf","a+")
+    lib_paths_file= file( TEMP_PATH + "/lib-paths","r")
+
+    for line in lib_paths_file:
+        path= string.strip(line)
+        if path != "":
+            ldconf_file.write( "%s/%s\n" % (TEMP_PATH,path) )
+    ldconf_file.close()
+    lib_paths_file.close()
+
+    utils.sysexec( "ldconfig", log )
+
+    # update the PYTHONPATH to include the python modules in
+    # the support package
+    sys.path.append( TEMP_PATH + "/usr/lib/python2.2" )
+    sys.path.append( TEMP_PATH + "/usr/lib/python2.2/site-packages" )
+
+    # update the environment variable PATH to include
+    # TEMP_PATH/sbin and others there
+    new_paths= ('%s/sbin'% TEMP_PATH,
+                '%s/bin'% TEMP_PATH,
+                '%s/user/sbin'% TEMP_PATH,
+                '%s/user/bin'% TEMP_PATH)
+
+    old_path= os.environ['PATH']
+    os.environ['PATH']= old_path + ":" + string.join(new_paths,":")
+
+    # everything should be setup to import parted. this 
+    # import is just to make sure it'll work when this step
+    # is being run
+    log.write( "Imported parted\n" )
+    try:
+        import parted
+    except ImportError:
+        raise BootManagerException, "Unable to import parted."
+
+    # take note that we have part disks setup
+    PARTDISKS_SETUP_2X_CD= 1
+    vars['PARTDISKS_SETUP_2X_CD']= PARTDISKS_SETUP_2X_CD
+
+
+    
diff --git a/source/configuration b/source/configuration
new file mode 100644 (file)
index 0000000..5684fcf
--- /dev/null
@@ -0,0 +1,77 @@
+# this file contains a list of variables
+# to import to the INSTALL_STORE before
+# any of the steps run.
+
+
+# the current version of the bootmanager
+VERSION=3.1.15
+
+
+# full url to which api server to contact
+BOOT_API_SERVER=https://www.planet-lab.org:443/PLCAPI/
+
+
+# path to store temporary files during the install,
+# do not include trailing slashes
+TEMP_PATH=/mnt/tmp
+
+
+# path to the system mount point
+SYSIMG_PATH=/mnt/tmp/sysimg
+
+
+# where the cacerts for the boot cd can be found
+# currently, this must start with /mnt/cdrom
+# which is hardcoded in the installer
+CACERT_PATH=/mnt/cdrom/bootme/cacert
+
+
+# the nonce the boot cd created, need to authenticate
+# requests that are made to the boot server
+NONCE_FILE=/tmp/nonce
+
+
+# directory containing planetlab specific configuration
+# files, like the http_proxy file
+PLCONF_DIR=/etc/planetlab
+
+
+# directory on the boot server containing
+# support files and scripts
+SUPPORT_FILE_DIR=/boot
+
+
+# this sets the size of the root logical volume,
+# after the root and swap has been created, remaining
+# goes to the vserver partition
+ROOT_SIZE=3G
+
+
+# override the swap size
+SWAP_SIZE=1G
+
+
+# whether or not to skip hardware requirement check
+SKIP_HARDWARE_REQUIREMENT_CHECK=0
+
+
+# minimum amount of memory needed for installer, in kb
+MINIMUM_MEMORY=511000
+
+
+# minimum block disk size in GB to be added to lvm.
+# if any block devices are smaller than this, they are ignored.
+MINIMUM_DISK_SIZE=5
+
+
+# total minimum disk size in GB if all usable disks are below this
+# size, the node cannot be installed
+TOTAL_MINIMUM_DISK_SIZE=50
+
+
+# set of langugase for install (used in /etc/rpm/macros)
+INSTALL_LANGS=en_US
+
+
+# number of auth failures before starting debug mode
+NUM_AUTH_FAILURES_BEFORE_DEBUG=2
diff --git a/source/debug_files/debug_root_ssh_key b/source/debug_files/debug_root_ssh_key
new file mode 100644 (file)
index 0000000..0f4105e
--- /dev/null
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAs3jl1PRq97O4WKngafKUe4LTkQrKqgaHUj6sUKfC9KT40ek19jlzU2YWnuoaxEpSLks+Z0KPnSAIyZW5fnFYasIh9mrLSbY06d2Mor5919sCv9fIm/6QHq6gBiFjs50HITx53jWjeu/nmZeLOBsBtioLkNW2vBMQKHz6+q+wea2nh+YX3X5ZRpSp6znPR5fjaWzm0TEfA6oStUfsOIBds98XswghfT0GtWehG5FpPT/X9g7EObQKN/fzSSe1SdMSEMLPl+e0+KQ0+jB/pCULfSm9Qlw6I5cYQXwxKeT2tEPIcmLPe/U1hhoqGyaADo+a0OmCQ84yJ3obMNMWGH0uIQ== debug@planet-lab.org
diff --git a/source/debug_files/sshd_config_v2 b/source/debug_files/sshd_config_v2
new file mode 100644 (file)
index 0000000..76e0092
--- /dev/null
@@ -0,0 +1,93 @@
+# boot cd version 3.x sshd configuration file for debug mode
+
+#Port 22
+Protocol 2
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+#obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+#PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+
+#RSAAuthentication yes
+#PubkeyAuthentication yes
+#AuthorizedKeysFile    .ssh/authorized_keys
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#RhostsRSAAuthentication no
+# similar for protocol version 2
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+#PermitEmptyPasswords no
+PasswordAuthentication no
+
+# Change to no to disable s/key passwords
+#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# Set this to 'yes' to enable PAM authentication, account processing, 
+# and session processing. If this is enabled, PAM authentication will 
+# be allowed through the ChallengeResponseAuthentication mechanism. 
+# Depending on your PAM configuration, this may bypass the setting of 
+# PasswordAuthentication, PermitEmptyPasswords, and 
+# "PermitRootLogin without-password". If you just want the PAM account and 
+# session checks to run without PAM authentication, then enable this but set 
+# ChallengeResponseAuthentication=no
+PAMAuthenticationViaKbdInt no
+
+#AllowTcpForwarding yes
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PrintMotd yes
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression yes
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#ShowPatchLevel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem      sftp    /usr/libexec/openssh/sftp-server
diff --git a/source/debug_files/sshd_config_v3 b/source/debug_files/sshd_config_v3
new file mode 100644 (file)
index 0000000..2a8f428
--- /dev/null
@@ -0,0 +1,92 @@
+# boot cd version 3.x sshd configuration file for debug mode
+
+#Port 22
+Protocol 2
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+#obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+#PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+
+#RSAAuthentication yes
+#PubkeyAuthentication yes
+#AuthorizedKeysFile    .ssh/authorized_keys
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#RhostsRSAAuthentication no
+# similar for protocol version 2
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+#PermitEmptyPasswords no
+PasswordAuthentication no
+
+# Change to no to disable s/key passwords
+#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# Set this to 'yes' to enable PAM authentication, account processing, 
+# and session processing. If this is enabled, PAM authentication will 
+# be allowed through the ChallengeResponseAuthentication mechanism. 
+# Depending on your PAM configuration, this may bypass the setting of 
+# PasswordAuthentication, PermitEmptyPasswords, and 
+# "PermitRootLogin without-password". If you just want the PAM account and 
+# session checks to run without PAM authentication, then enable this but set 
+# ChallengeResponseAuthentication=no
+
+#AllowTcpForwarding yes
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PrintMotd yes
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression yes
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#ShowPatchLevel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem      sftp    /usr/libexec/openssh/sftp-server
diff --git a/source/merge_hw_tables.py b/source/merge_hw_tables.py
new file mode 100755 (executable)
index 0000000..036d0e6
--- /dev/null
@@ -0,0 +1,331 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+"""
+The point of this small utility is to take a file in the format
+of /lib/modules/`uname -r`/modules.pcimap and /usr/share/hwdata/pcitable
+and output a condensed, more easily used format for module detection. This is
+done by first getting a list of all the built modules, then loading the
+pci ids for each of those modules from modules.pcimap, then finally merging
+in the contents of pcitable (for built modules only). The result should be
+a file with a pretty comprehensive mapping of pci ids to module names.
+
+The output is used by the PlanetLab boot cd (3.0+) and the pl_hwinit script
+to load all the applicable modules by scanning lspci output.
+
+
+
+Expected format of file modules.dep includes lines of:
+
+/full/module/path/mod.ko: <dependencies>
+
+Expected format of file modules.pcimap includes lines of:
+
+# pci_module vendor device subvendor subdevice class class_mask driver_data
+cciss 0x00000e11 0x0000b060 0x00000e11 0x00004070 0x00000000 0x00000000 0x0
+cciss 0x00000e11 0x0000b178 0x00000e11 0x00004070 0x00000000 0x00000000 0x0
+
+Expected format of file pcitable includes lines of:
+
+# ("%d\t%d\t%s\t"%s"\n", vendid, devid, moduleName, cardDescription)
+# or ("%d\t%d\t%d\t%d\t%s\t"%s"\n", vendid, devid, subvendid, subdevid, moduleNa
+# me, cardDescription)
+0x0e11  0x0508  "tmspci"        "Compaq|Netelligent 4/16 Token Ring"
+0x1000  0x0407  0x8086  0x0532  "megaraid"      "Storage RAID Controller SRCU42X"
+
+Lines listing a module name of ignore or unknown from pcitable are skipped
+
+
+
+Output format, for each line that matches the above lines:
+cciss 0e11:b060 0e11:b178
+
+"""
+
+import os, sys
+import string
+
+PCI_ANY = 0xffffffffL
+
+def merge_files(modules_dep_path, modules_pcimap_path, pcitable_path):
+    """
+    merge the three files as described above, and return a dictionary.
+    keys are module names, value is a list of pci ids for that module,
+    in the form "0e11:b178"
+    """
+
+    try:
+        modulesdep_file= file(modules_dep_path,"r")
+    except IOError:
+        sys.stderr.write( "Unable to open modules.dep: %s\n" %
+                          modules_dep_path )
+        return
+
+    try:
+        pcimap_file= file(modules_pcimap_path,"r")
+    except IOError:
+        sys.stderr.write( "Unable to open modules.pcimap: %s\n" %
+                          modules_pcimap_path )
+        return
+
+    try:
+        pcitable_file= file(pcitable_path,"r")
+    except IOError:
+        sys.stderr.write( "Unable to open pcitable: %s\n" %
+                          pcitable_path )
+        return
+
+    # associative array to store all matches of module -> ['vendor:device',..]
+    # entries
+    all_modules= {}
+    all_pci_ids= {}
+
+    # first step, create an associative array of all the built modules
+    for line in modulesdep_file:
+        parts= string.split(line,":")
+        if len(parts) < 2:
+            continue
+
+        full_mod_path= parts[0]
+        parts= string.split(full_mod_path,"/")
+        module= parts[len(parts)-1]
+        module_len= len(module)
+        if module[module_len-3:] == ".ko":
+            module= module[:-3]
+            all_modules[module]= []
+
+    modulesdep_file.close()
+
+    # now, parse the pcimap and add devices
+    line_num= 0
+    for line in pcimap_file:
+        line_num= line_num+1
+
+        # skip blank lines, or lines that begin with # (comments)
+        line= string.strip(line)
+        if len(line) == 0:
+            continue
+
+        if line[0] == "#":
+            continue
+
+        line_parts= string.split(line)
+        if line_parts is None or len(line_parts) != 8:
+            sys.stderr.write( "Skipping line %d in pcimap " \
+                              "(incorrect format %s)\n" % (line_num,line) )
+            continue
+
+        # first two parts are always vendor / device id
+        module= line_parts[0]
+
+        # XXX In kernel versions <2.6.14, mptscsih is the actual
+        # module that should be loaded instead of mptbase.
+        if module == "mptbase":
+            module= "mptscsih"
+
+        try:
+            vendor_id= long(line_parts[1],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping line %d in %s " \
+                              "(incorrect vendor id format %s)\n" % (line_num,modules_pcimap_path,line_parts[1]))
+            continue
+
+        try:
+            device_id= long(line_parts[2],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping line %d in %s " \
+                              "(incorrect device id format %s)\n" % (line_num,modules_pcimap_path,line_parts[2]))
+            continue
+
+        try:
+            subvendor_id= long(line_parts[3],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping line %d in %s " \
+                              "(incorrect subvendor id format %s)\n" % (line_num,modules_pcimap_path,line_parts[3]))
+            continue
+
+        try:
+            subdevice_id= long(line_parts[4],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping line %d in %s " \
+                              "(incorrect subdevice id format %s)\n" % (line_num,modules_pcimap_path,line_parts[4]))
+            continue
+
+        full_id= (vendor_id, device_id, subvendor_id, subdevice_id)
+        if not all_modules.has_key(module):
+            # normally shouldn't get here, as the list is
+            # prepopulated with all the built modules
+
+            # XXX we probably shouldn't be doing this at all
+            all_modules[module] = [full_id]
+        else:
+            all_modules[module].append(full_id)
+            
+        if all_pci_ids.has_key(full_id):
+            # conflict as there are multiple modules that support
+            # particular pci device
+            all_pci_ids[full_id].append(module)
+        else:
+            all_pci_ids[full_id]= [module]
+
+    pcimap_file.close()
+
+    # parse pcitable, add any more ids for the devices
+    # We make the (potentially risky) assumption that pcitable contains
+    # only unique (vendor,device,subvendor,subdevice) entries.
+    line_num= 0
+    for line in pcitable_file:
+        line_num= line_num+1
+
+        # skip blank lines, or lines that begin with # (comments)
+        line= string.strip(line)
+        if len(line) == 0:
+            continue
+
+        if line[0] == "#":
+            continue
+
+        line_parts= string.split(line)
+        if line_parts is None or len(line_parts) <= 2:
+            sys.stderr.write( "Skipping line %d in pcitable " \
+                              "(incorrect format 1)\n" % line_num )
+            continue
+
+        # vendor id is always the first field, device the second. also,
+        # strip off first two chars (the 0x)
+        try:
+            vendor_id= long(line_parts[0],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping vendor_id %s in %s on line %d\n" \
+                              % (line_parts[0],pcitable_path,line_num))
+            continue
+
+        try:
+            device_id= long(line_parts[1],16)
+        except ValueError, e:
+            sys.stderr.write( "Skipping device %s in %s on line %d\n" \
+                              % (line_parts[1],pcitable_path,line_num))
+            continue
+
+        # if the first char of the third field is a double
+        # quote, the third field is a module, else if the first
+        # char of the third field is a 0 (zero), the fifth field is
+        # the module name. it would nice if there was any easy way
+        # to split a string on spaces, but recognize quoted strings,
+        # so they wouldn't be split up. that is the reason for this wierd check
+        if line_parts[2][0] == '"':
+            module= line_parts[2]
+
+            subvendor_id=PCI_ANY
+            subdevice_id=PCI_ANY
+        elif line_parts[2][0] == '0':
+            try:
+                module= line_parts[4]
+            except ValueError, e:
+                sys.stderr.write( "Skipping line %d in pcitable " \
+                                  "(incorrect format 2)\n" % line_num )
+                continue
+            try:
+                subvendor_id= long(line_parts[2],16)
+            except ValueError, e:
+                sys.stderr.write( "Skipping line %d in pcitable " \
+                                  "(incorrect format 2a)\n" % line_num )
+                continue
+            
+            try:
+                subdevice_id= long(line_parts[3],16)
+            except ValueError, e:
+                sys.stderr.write( "Skipping line %d in pcitable " \
+                                  "(incorrect format 2b)\n" % line_num )
+
+        else:
+            sys.stderr.write( "Skipping line %d in pcitable " \
+                              "(incorrect format 3)\n" % line_num )
+            continue
+
+        # remove the first and last char of module (quote marks)
+        module= module[1:]
+        module= module[:len(module)-1]
+
+        full_id= (vendor_id, device_id, subvendor_id, subdevice_id)
+
+        if not all_modules.has_key(module):
+            # Do not process any modules listed in pcitable for which
+            # we do not have a prebuilt module.
+            continue
+
+        if not full_id in all_modules[module]:
+            all_modules[module].append(full_id)
+
+        if all_pci_ids.has_key(full_id):
+            if not module in all_pci_ids[full_id]:
+                all_pci_ids[full_id].append(module)
+            
+            # check if there are duplicate mappings between modules
+            # and full_ids
+            if len(all_pci_ids[full_id])>1:
+                # collect the set of modules that are different than what
+                # is listed in the pcitables file
+                other_modules = []
+                for other_module in all_pci_ids[full_id]:
+                    if other_module != module:
+                        other_modules.append(other_module)
+
+                # remove full_id from the set of other modules in all_modules {}
+                for other_module in other_modules:
+                    all_modules[other_module].remove(full_id)
+
+                # ensure that there is only one full_id -> module 
+                all_pci_ids[full_id] = [module]
+
+        else:
+            all_pci_ids[full_id] = [module]
+                
+    pcitable_file.close()
+
+    return (all_pci_ids,all_modules)
+
+if __name__ == "__main__":
+    def usage():
+        print( "\nUsage:" )
+        print( "%s <modules.dep> <modules.pcimap> " \
+               "<pcitable> [<output>]" % sys.argv[0] )
+        print( "" )
+        
+    if len(sys.argv) < 4:
+        usage()
+        sys.exit(1)
+
+
+    if len(sys.argv) > 4:
+        output_file_name= sys.argv[4]
+        try:
+            output_file= file(output_file_name,"w")
+        except IOError:
+            sys.stderr.write( "Unable to open %s for writing.\n" % output_file )
+            sys.exit(1)
+    else:
+        output_file= sys.stdout
+
+
+    (all_pci_ids,all_modules)=merge_files( sys.argv[1],
+                                           sys.argv[2],
+                                           sys.argv[3] )
+    if all_modules is not None:
+        for module in all_modules.keys():
+            pci_ids = all_modules[module]
+            if len(pci_ids)>0:
+                output_file.write("%s" % module)
+                for pci_id in pci_ids:
+                    output_file.write(" %x:%x:%x:%x" % pci_id)
+                output_file.write(" \n")
+    else:
+        sys.stderr.write( "Unable to list modules.\n" )
+
+    output_file.close()
diff --git a/source/notify_messages.py b/source/notify_messages.py
new file mode 100644 (file)
index 0000000..6186fa0
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+"""
+this file contains the ids of messages that we can send the contacts
+at a site, through the BootNotifyOwners call
+"""
+
+MSG_INSTALL_FINISHED= "installfinished"
+MSG_INSUFFICIENT_DISK= "insufficientdisk"
+MSG_INSUFFICIENT_MEMORY= "insufficientmemory"
+MSG_NO_NODE_CONFIG_FILE= "noconfig"
+MSG_AUTH_FAIL= "authfail"
+MSG_NODE_NOT_INSTALLED= "notinstalled"
+MSG_HOSTNAME_NOT_RESOLVE= "hostnamenotresolve"
+MSG_NO_DETECTED_NETWORK= "nodetectednetwork"
diff --git a/source/steps/AuthenticateWithPLC.py b/source/steps/AuthenticateWithPLC.py
new file mode 100644 (file)
index 0000000..55511dc
--- /dev/null
@@ -0,0 +1,84 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+from Exceptions import *
+import BootAPI
+
+
+AUTH_FAILURE_COUNT_FILE= "/tmp/authfailurecount"
+
+
+def Run( vars, log ):
+    """
+    Authenticate this node with PLC. This ensures that the node can operate
+    as normal, and that our management authority has authorized it.
+
+    For this, just call the PLC api function BootCheckAuthentication
+
+    Return 1 if authorized, a BootManagerException if not or the
+    call fails entirely.
+
+    If there are two consecutive authentication failures, put the node
+    into debug mode and exit the bootmanager.
+
+    Expect the following variables from the store:
+    NUM_AUTH_FAILURES_BEFORE_DEBUG    How many failures before debug
+    """
+
+    log.write( "\n\nStep: Authenticating node with PLC.\n" )
+
+    # make sure we have the variables we need
+    try:
+        NUM_AUTH_FAILURES_BEFORE_DEBUG= int(vars["NUM_AUTH_FAILURES_BEFORE_DEBUG"])
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        authorized= BootAPI.call_api_function( vars, "BootCheckAuthentication", () )
+        if authorized == 1:
+            log.write( "Authentication successful.\n" )
+
+            try:
+                os.unlink( AUTH_FAILURE_COUNT_FILE )
+            except OSError, e:
+                pass
+            
+            return 1
+    except BootManagerException, e:
+        log.write( "Authentication failed: %s.\n" % e )
+
+    # increment auth failure
+    auth_failure_count= 0
+    try:
+        auth_failure_count= int(file(AUTH_FAILURE_COUNT_FILE,"r").read().strip())
+    except IOError:
+        pass
+    except ValueError:
+        pass
+
+    auth_failure_count += 1
+
+    try:
+        fail_file= file(AUTH_FAILURE_COUNT_FILE,"w")
+        fail_file.write( str(auth_failure_count) )
+        fail_file.close()
+    except IOError:
+        pass
+
+    if auth_failure_count >= NUM_AUTH_FAILURES_BEFORE_DEBUG:
+        log.write( "Maximum number of authentication failures reached.\n" )
+        log.write( "Canceling boot process and going into debug mode.\n" )
+
+    raise BootManagerException, "Unable to authenticate node."
+    
+
diff --git a/source/steps/ChainBootNode.py b/source/steps/ChainBootNode.py
new file mode 100644 (file)
index 0000000..c96ac38
--- /dev/null
@@ -0,0 +1,271 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import string
+import re
+import os
+
+import UpdateBootStateWithPLC
+from Exceptions import *
+import utils
+import compatibility
+import systeminfo
+import BootAPI
+import notify_messages
+
+import ModelOptions
+
+def Run( vars, log ):
+    """
+    Load the kernel off of a node and boot to it.
+    This step assumes the disks are mounted on SYSIMG_PATH.
+    If successful, this function will not return. If it returns, no chain
+    booting has occurred.
+    
+    Expect the following variables:
+    BOOT_CD_VERSION       A tuple of the current bootcd version
+    SYSIMG_PATH           the path where the system image will be mounted
+                          (always starts with TEMP_PATH)
+    ROOT_MOUNTED          the node root file system is mounted
+    NODE_SESSION             the unique session val set when we requested
+                             the current boot state
+    PLCONF_DIR               The directory to store PL configuration files in
+    
+    Sets the following variables:
+    ROOT_MOUNTED          the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Chain booting node.\n" )
+
+    # make sure we have the variables we need
+    try:
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+        # its ok if this is blank
+        NODE_SESSION= vars["NODE_SESSION"]
+
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    ROOT_MOUNTED= 0
+    if 'ROOT_MOUNTED' in vars.keys():
+        ROOT_MOUNTED= vars['ROOT_MOUNTED']
+    
+    if ROOT_MOUNTED == 0:
+        log.write( "Mounting node partitions\n" )
+
+        # old cds need extra utilities to run lvm
+        if BOOT_CD_VERSION[0] == 2:
+            compatibility.setup_lvm_2x_cd( vars, log )
+            
+        # simply creating an instance of this class and listing the system
+        # block devices will make them show up so vgscan can find the planetlab
+        # volume group
+        systeminfo.get_block_device_list(vars, log)
+        
+        utils.sysexec( "vgscan", log )
+        utils.sysexec( "vgchange -ay planetlab", log )
+
+        utils.makedirs( SYSIMG_PATH )
+
+        cmd = "mount %s %s" % (PARTITIONS["root"],SYSIMG_PATH)
+        utils.sysexec( cmd, log )
+        cmd = "mount %s %s/vservers" % (PARTITIONS["vservers"],SYSIMG_PATH)
+        utils.sysexec( cmd, log )
+        cmd = "mount -t proc none %s/proc" % SYSIMG_PATH
+        utils.sysexec( cmd, log )
+
+        ROOT_MOUNTED= 1
+        vars['ROOT_MOUNTED']= 1
+        
+
+    log.write( "Running node update.\n" )
+    cmd = "chroot %s /usr/local/planetlab/bin/NodeUpdate.py start noreboot" \
+          % SYSIMG_PATH
+    utils.sysexec( cmd, log )
+
+    log.write( "Updating ssh public host key with PLC.\n" )
+    ssh_host_key= ""
+    try:
+        ssh_host_key_file= file("%s/etc/ssh/ssh_host_rsa_key.pub"%SYSIMG_PATH,"r")
+        ssh_host_key= ssh_host_key_file.read().strip()
+        ssh_host_key_file.close()
+        ssh_host_key_file= None
+    except IOError, e:
+        pass
+
+    # write out the session value /etc/planetlab/session
+    try:
+        session_file_path= "%s/%s/session" % (SYSIMG_PATH,PLCONF_DIR)
+        session_file= file( session_file_path, "w" )
+        session_file.write( str(NODE_SESSION) )
+        session_file.close()
+        session_file= None
+        log.write( "Updated /etc/planetlab/session\n" )
+    except IOError, e:
+        log.write( "Unable to write out /etc/planetlab/session, continuing anyway\n" )
+
+    update_vals= {}
+    update_vals['ssh_host_key']= ssh_host_key
+    BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+
+    # get the kernel version
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+
+    log.write( "Copying kernel and initrd for booting.\n" )
+    utils.sysexec( "cp %s/boot/kernel-boot%s /tmp/kernel" % (SYSIMG_PATH,option), log )
+    utils.sysexec( "cp %s/boot/initrd-boot%s /tmp/initrd" % (SYSIMG_PATH,option), log )
+
+    log.write( "Unmounting disks.\n" )
+    try:
+        # backwards compat, though, we should never hit this case post PL 3.2
+        os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+        utils.sysexec_noerr( "chroot %s umount /rcfs" % SYSIMG_PATH, log )
+    except OSError, e:
+        pass
+
+    utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH, log )
+    utils.sysexec_noerr( "umount -r %s/vservers" % SYSIMG_PATH, log )
+    utils.sysexec_noerr( "umount -r %s" % SYSIMG_PATH, log )
+    utils.sysexec_noerr( "vgchange -an", log )
+
+    ROOT_MOUNTED= 0
+    vars['ROOT_MOUNTED']= 0
+
+    log.write( "Unloading modules and chain booting to new kernel.\n" )
+
+    # further use of log after Upload will only output to screen
+    log.Upload()
+
+    # regardless of whether kexec works or not, we need to stop trying to
+    # run anything
+    cancel_boot_flag= "/tmp/CANCEL_BOOT"
+    utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+    # on 2.x cds (2.4 kernel) for sure, we need to shutdown everything
+    # to get kexec to work correctly. Even on 3.x cds (2.6 kernel),
+    # there are a few buggy drivers that don't disable their hardware
+    # correctly unless they are first unloaded.
+    
+    utils.sysexec_noerr( "ifconfig eth0 down", log )
+
+    if BOOT_CD_VERSION[0] == 2:
+        utils.sysexec_noerr( "killall dhcpcd", log )
+    elif BOOT_CD_VERSION[0] == 3:
+        utils.sysexec_noerr( "killall dhclient", log )
+        
+    utils.sysexec_noerr( "umount -a -r -t ext2,ext3", log )
+    utils.sysexec_noerr( "modprobe -r lvm-mod", log )
+    
+    try:
+        modules= file("/tmp/loadedmodules","r")
+        
+        for line in modules:
+            module= string.strip(line)
+            if module != "":
+                log.write( "Unloading %s\n" % module )
+                utils.sysexec_noerr( "modprobe -r %s" % module, log )
+
+        modules.close()
+    except IOError:
+        log.write( "Couldn't read /tmp/loadedmodules, continuing.\n" )
+
+    try:
+        modules= file("/proc/modules", "r")
+
+        # Get usage count for USB
+        usb_usage = 0
+        for line in modules:
+            try:
+                # Module Size UsageCount UsedBy State LoadAddress
+                parts= string.split(line)
+
+                if parts[0] == "usb_storage":
+                    usb_usage += int(parts[2])
+            except IndexError, e:
+                log.write( "Couldn't parse /proc/modules, continuing.\n" )
+
+        modules.seek(0)
+
+        for line in modules:
+            try:
+                # Module Size UsageCount UsedBy State LoadAddress
+                parts= string.split(line)
+
+                # While we would like to remove all "unused" modules,
+                # you can't trust usage count, especially for things
+                # like network drivers or RAID array drivers. Just try
+                # and unload a few specific modules that we know cause
+                # problems during chain boot, such as USB host
+                # controller drivers (HCDs) (PL6577).
+                # if int(parts[2]) == 0:
+                if re.search('_hcd$', parts[0]):
+                    if usb_usage > 0:
+                        log.write( "NOT unloading %s since USB may be in use\n" % parts[0] )
+                    else:
+                        log.write( "Unloading %s\n" % parts[0] )
+                        utils.sysexec_noerr( "modprobe -r %s" % parts[0], log )
+            except IndexError, e:
+                log.write( "Couldn't parse /proc/modules, continuing.\n" )
+    except IOError:
+        log.write( "Couldn't read /proc/modules, continuing.\n" )
+
+
+    kargs = "root=%s ramdisk_size=8192" % PARTITIONS["mapper-root"]
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        kargs = kargs + " " + "acpi=off"
+    try:
+        kargsfb = open("/kargs.txt","r")
+        moreargs = kargsfb.readline()
+        kargsfb.close()
+        moreargs = moreargs.strip()
+        log.write( 'Parsed in "%s" kexec args from /kargs.txt\n' % moreargs )
+        kargs = kargs + " " + moreargs
+    except IOError:
+        # /kargs.txt does not exist, which is fine. Just kexec with default
+        # kargs, which is ramdisk_size=8192
+        pass 
+
+    try:
+        utils.sysexec( 'kexec --force --initrd=/tmp/initrd ' \
+                       '--append="%s" /tmp/kernel' % kargs)
+    except BootManagerException, e:
+        # if kexec fails, we've shut the machine down to a point where nothing
+        # can run usefully anymore (network down, all modules unloaded, file
+        # systems unmounted. write out the error, and cancel the boot process
+
+        log.write( "\n\n" )
+        log.write( "-------------------------------------------------------\n" )
+        log.write( "kexec failed with the following error. Please report\n" )
+        log.write( "this problem to support@planet-lab.org.\n\n" )
+        log.write( str(e) + "\n\n" )
+        log.write( "The boot process has been canceled.\n" )
+        log.write( "-------------------------------------------------------\n\n" )
+
+    return
diff --git a/source/steps/CheckForNewDisks.py b/source/steps/CheckForNewDisks.py
new file mode 100644 (file)
index 0000000..0d17cff
--- /dev/null
@@ -0,0 +1,207 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+
+import InstallPartitionDisks
+from Exceptions import *
+import systeminfo
+import compatibility
+import utils
+import os
+
+
+def Run( vars, log ):
+    """
+    Find any new large block devices we can add to the vservers volume group
+    
+    Expect the following variables to be set:
+    SYSIMG_PATH          the path where the system image will be mounted
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    MINIMUM_DISK_SIZE       any disks smaller than this size, in GB, are not used
+    
+    Set the following variables upon successfully running:
+    ROOT_MOUNTED             the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Checking for unused disks to add to LVM.\n" )
+
+    # make sure we have the variables we need
+    try:
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        MINIMUM_DISK_SIZE= int(vars["MINIMUM_DISK_SIZE"])
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+        
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    all_devices= systeminfo.get_block_device_list(vars, log)
+    
+    # find out if there are unused disks in all_devices that are greater
+    # than old cds need extra utilities to run lvm
+    if BOOT_CD_VERSION[0] == 2:
+        compatibility.setup_lvm_2x_cd( vars, log )
+        
+    # will contain the new devices to add to the volume group
+    new_devices= []
+
+    # total amount of new space in gb
+    extended_gb_size= 0
+    
+    for device in all_devices.keys():
+
+        (major,minor,blocks,gb_size,readonly)= all_devices[device]
+
+        if device[:14] == "/dev/planetlab":
+            log.write( "Skipping device %s in volume group.\n" % device )
+            continue
+
+        if readonly:
+            log.write( "Skipping read only device %s\n" % device )
+            continue
+
+        if gb_size < MINIMUM_DISK_SIZE:
+            log.write( "Skipping too small device %s (%4.2f)\n" %
+                       (device,gb_size) )
+            continue
+
+        log.write( "Checking device %s to see if it is part " \
+                   "of the volume group.\n" % device )
+
+        # this is the lvm partition, if it exists on that device
+        lvm_partition= "%s1" % device
+        cmd = "pvdisplay %s | grep -q 'planetlab'" % lvm_partition
+        already_added= utils.sysexec_noerr(cmd, log)
+        
+        if already_added:
+            log.write( "It appears %s is part of the volume group, continuing.\n" %
+                       device )
+            continue
+
+        # just to be extra paranoid, ignore the device if it already has
+        # an lvm partition on it (new disks won't have this, and that is
+        # what this code is for, so it should be ok).
+        cmd = "sfdisk -l %s | grep -q 'Linux LVM'" % device 
+        has_lvm= utils.sysexec_noerr(cmd, log)
+        if has_lvm:
+            log.write( "It appears %s has lvm already setup on it.\n" % device)
+            paranoid = False
+            if paranoid:
+                log.write("To paranoid to add %s to vservers lvm.\n" % device)
+                continue
+        
+        log.write( "Attempting to add %s to the volume group\n" % device )
+
+        if not InstallPartitionDisks.single_partition_device( device, vars, log ):
+            log.write( "Unable to partition %s, not using it.\n" % device )
+            continue
+
+        log.write( "Successfully initialized %s\n" % device )
+
+        part_path= InstallPartitionDisks.get_partition_path_from_device( device,
+                                                                         vars, log )
+        if not InstallPartitionDisks.create_lvm_physical_volume( part_path,
+                                                                 vars, log ):
+            log.write( "Unable to create lvm physical volume %s, not using it.\n" %
+                       part_path )
+            continue
+
+        log.write( "Adding %s to list of devices to add to " \
+                   "planetlab volume group.\n" % device )
+
+        extended_gb_size= extended_gb_size + gb_size
+        new_devices.append( part_path )
+        
+
+    if len(new_devices) > 0:
+
+        log.write( "Extending planetlab volume group.\n" )
+        
+        log.write( "Unmounting disks.\n" )
+        try:
+            # backwards compat, though, we should never hit this case post PL 3.2
+            os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+            utils.sysexec_noerr( "chroot %s umount /rcfs" % SYSIMG_PATH, log )
+        except OSError, e:
+            pass
+
+        # umount in order to extend disk size
+        utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH, log )
+        utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH, log )
+        utils.sysexec_noerr( "umount %s" % SYSIMG_PATH, log )
+        utils.sysexec( "vgchange -an", log )
+        
+        vars['ROOT_MOUNTED']= 0
+
+        while True:
+            cmd = "vgextend planetlab %s" % string.join(new_devices," ")
+            if not utils.sysexec_noerr( cmd, log ):
+                log.write( "Failed to add physical volumes %s to " \
+                           "volume group, continuing.\n" % string.join(new_devices," "))
+                res = 1
+                break
+            
+            # now, get the number of unused extents, and extend the vserver
+            # logical volume by that much.
+            remaining_extents= \
+               InstallPartitionDisks.get_remaining_extents_on_vg( vars, log )
+
+            log.write( "Extending vservers logical volume.\n" )
+            utils.sysexec( "vgchange -ay", log )
+            cmd = "lvextend -l +%s %s" % (remaining_extents, PARTITIONS["vservers"])
+            if not utils.sysexec_noerr(cmd, log):
+                log.write( "Failed to extend vservers logical volume, continuing\n" )
+                res = 1
+                break
+
+            log.write( "making the ext3 filesystem match new logical volume size.\n" )
+            if BOOT_CD_VERSION[0] == 2:
+                cmd = "resize2fs %s" % PARTITIONS["vservers"]
+                resize = utils.sysexec_noerr(cmd,log)
+            elif BOOT_CD_VERSION[0] == 3:
+                vars['ROOT_MOUNTED']= 1
+                cmd = "mount %s %s" % (PARTITIONS["root"],SYSIMG_PATH)
+                utils.sysexec_noerr( cmd, log )
+                cmd = "mount %s %s/vservers" % \
+                      (PARTITIONS["vservers"],SYSIMG_PATH)
+                utils.sysexec_noerr( cmd, log )
+                cmd = "ext2online %s/vservers" % SYSIMG_PATH
+                resize = utils.sysexec_noerr(cmd,log)
+                utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH, log )
+                utils.sysexec_noerr( "umount %s" % SYSIMG_PATH, log )
+                vars['ROOT_MOUNTED']= 0
+
+            utils.sysexec( "vgchange -an", log )
+
+            if not resize:
+                log.write( "Failed to resize vservers partition, continuing.\n" )
+                res = 1
+                break
+            else:
+                log.write( "Extended vservers partition by %4.2f GB\n" %
+                           extended_gb_size )
+                res = 1
+                break
+
+    else:
+        log.write( "No new disk devices to add to volume group.\n" )
+        res = 1
+
+    return res
diff --git a/source/steps/CheckHardwareRequirements.py b/source/steps/CheckHardwareRequirements.py
new file mode 100644 (file)
index 0000000..7739f73
--- /dev/null
@@ -0,0 +1,258 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os
+import popen2
+import string
+
+import systeminfo
+from Exceptions import *
+import utils
+import notify_messages
+import BootAPI
+
+
+def Run( vars, log ):
+    """
+    Make sure the hardware we are running on is sufficient for
+    the PlanetLab OS to be installed on. In the process, identify
+    the list of block devices that may be used for a node installation,
+    and identify the cdrom device that we booted off of.
+
+    Return 1 if requiremenst met, 0 if requirements not met. Raise
+    BootManagerException if any problems occur that prevent the requirements
+    from being checked.
+
+    Expect the following variables from the store:
+
+    MINIMUM_MEMORY          minimum amount of memory in kb required
+                            for install
+    NODE_ID                 the node_id from the database for this node
+    MINIMUM_DISK_SIZE       any disks smaller than this size, in GB, are not used
+    TOTAL_MINIMUM_DISK_SIZE total disk size in GB, if all usable disks
+                            meet this number, there isn't enough disk space for
+                            this node to be usable after install
+    SKIP_HARDWARE_REQUIREMENT_CHECK
+                            If set, don't check if minimum requirements are met
+    BOOT_CD_VERSION          A tuple of the current bootcd version                            
+    Sets the following variables:
+    INSTALL_BLOCK_DEVICES    list of block devices to install onto
+    """
+
+    log.write( "\n\nStep: Checking if hardware requirements met.\n" )        
+        
+    try:
+        MINIMUM_MEMORY= int(vars["MINIMUM_MEMORY"])
+        if MINIMUM_MEMORY == "":
+            raise ValueError, "MINIMUM_MEMORY"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError("NODE_ID")
+
+        MINIMUM_DISK_SIZE= int(vars["MINIMUM_DISK_SIZE"])
+
+        TOTAL_MINIMUM_DISK_SIZE= \
+                   int(vars["TOTAL_MINIMUM_DISK_SIZE"])
+
+        SKIP_HARDWARE_REQUIREMENT_CHECK= \
+                   int(vars["SKIP_HARDWARE_REQUIREMENT_CHECK"])
+        
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing variable in install store: %s" % var
+    except ValueError, var:
+        raise BootManagerException, \
+              "Variable in install store blank, shouldn't be: %s" % var
+
+    # lets see if we have enough memory to run
+    log.write( "Checking for available memory.\n" )
+
+    total_mem= systeminfo.get_total_phsyical_mem(vars, log)
+    if total_mem is None:
+        raise BootManagerException, "Unable to read total physical memory"
+        
+    if total_mem < MINIMUM_MEMORY:
+        if not SKIP_HARDWARE_REQUIREMENT_CHECK:
+            log.write( "Insufficient memory to run node: %s kb\n" % total_mem )
+            log.write( "Required memory: %s kb\n" % MINIMUM_MEMORY )
+
+            include_pis= 0
+            include_techs= 1
+            include_support= 0
+            
+            sent= 0
+            try:
+                sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                         (notify_messages.MSG_INSUFFICIENT_MEMORY,
+                                          include_pis,
+                                          include_techs,
+                                          include_support) )
+            except BootManagerException, e:
+                log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+                
+            if sent == 0:
+                log.write( "Unable to notify site contacts of problem.\n" )
+            else:
+                log.write( "Notified contacts of problem.\n" )
+                
+            return 0
+        else:
+            log.write( "Memory requirements not met, but running anyway: %s kb\n"
+                       % total_mem )
+    else:
+        log.write( "Looks like we have enough memory: %s kb\n" % total_mem )
+
+
+
+    # get a list of block devices to attempt to install on
+    # (may include cdrom devices)
+    install_devices= systeminfo.get_block_device_list(vars, log)
+
+    # save the list of block devices in the log
+    log.write( "Detected block devices:\n" )
+    log.write( repr(install_devices) + "\n" )
+
+    if not install_devices or len(install_devices) == 0:
+        log.write( "No block devices detected.\n" )
+        
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+        
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                       (notify_messages.MSG_INSUFFICIENT_DISK,
+                                        include_pis,
+                                        include_techs,
+                                        include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+        if sent == 0:
+            log.write( "Unable to notify site contacts of problem.\n" )
+
+        return 0
+
+    # now, lets remove any block devices we know won't work (readonly,cdroms),
+    # or could be other writable removable disks (usb keychains, zip disks, etc)
+    # i'm not aware of anything that helps with the latter test, so,
+    # what we'll probably do is simply not use any block device below
+    # some size threshold (set in installstore)
+
+    # also, keep track of the total size for all devices that appear usable
+    total_size= 0
+
+    for device in install_devices.keys():
+
+        (major,minor,blocks,gb_size,readonly)= install_devices[device]
+        
+        # if the device string starts with
+        # planetlab or dm- (device mapper), ignore it (could be old lvm setup)
+        if device[:14] == "/dev/planetlab" or device[:8] == "/dev/dm-":
+            del install_devices[device]
+            continue
+
+        if gb_size < MINIMUM_DISK_SIZE:
+            log.write( "Device is too small to use: %s \n(appears" \
+                           " to be %4.2f GB)\n" % (device,gb_size) )
+            try:
+                del install_devices[device]
+            except KeyError, e:
+                pass
+            continue
+
+        if readonly:
+            log.write( "Device is readonly, not using: %s\n" % device )
+            try:
+                del install_devices[device]
+            except KeyError, e:
+                pass
+            continue
+            
+        # add this sector count to the total count of usable
+        # sectors we've found.
+        total_size= total_size + gb_size
+
+
+    if len(install_devices) == 0:
+        log.write( "No suitable block devices found for install.\n" )
+
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+        
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                       (notify_messages.MSG_INSUFFICIENT_DISK,
+                                        include_pis,
+                                        include_techs,
+                                        include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+        if sent == 0:
+            log.write( "Unable to notify site contacts of problem.\n" )
+
+        return 0
+
+
+    # show the devices we found that are usable
+    log.write( "Usable block devices:\n" )
+    log.write( repr(install_devices.keys()) + "\n" )
+
+    # save the list of devices for the following steps
+    vars["INSTALL_BLOCK_DEVICES"]= install_devices.keys()
+
+
+    # ensure the total disk size is large enough. if
+    # not, we need to email the tech contacts the problem, and
+    # put the node into debug mode.
+    if total_size < TOTAL_MINIMUM_DISK_SIZE:
+        if not SKIP_HARDWARE_REQUIREMENT_CHECK:
+            log.write( "The total usable disk size of all disks is " \
+                       "insufficient to be usable as a PlanetLab node.\n" )
+            include_pis= 0
+            include_techs= 1
+            include_support= 0
+            
+            sent= 0
+            try:
+                sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                            (notify_messages.MSG_INSUFFICIENT_DISK,
+                                             include_pis,
+                                             include_techs,
+                                             include_support) )
+            except BootManagerException, e:
+                log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+            
+            if sent == 0:
+                log.write( "Unable to notify site contacts of problem.\n" )
+
+            return 0
+        
+        else:
+            log.write( "The total usable disk size of all disks is " \
+                       "insufficient, but running anyway.\n" )
+            
+    log.write( "Total size for all usable block devices: %4.2f GB\n" % total_size )
+
+    # turn off UDMA for all block devices on 2.x cds (2.4 kernel)
+    if BOOT_CD_VERSION[0] == 2:
+        for device in install_devices:
+            log.write( "Disabling UDMA on %s\n" % device )
+            utils.sysexec_noerr( "/sbin/hdparm -d0 %s" % device, log )
+
+    return 1
diff --git a/source/steps/ConfirmInstallWithUser.py b/source/steps/ConfirmInstallWithUser.py
new file mode 100644 (file)
index 0000000..7d2a6e3
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+
+welcome_message= \
+"""
+********************************************************************************
+*                                                                              *
+*                             Welcome to PlanetLab                             *
+*                             ~~~~~~~~~~~~~~~~~~~~                             *
+*                                                                              *
+* The PlanetLab boot CD allows you to automatically install this machine as a  *
+* node within the PlanetLab overlay network.                                   *
+*                                                                              *
+* PlanetLab is a global overlay network for developing and accessing new       *
+* network services. Our goal is to grow to 1000 geographically distributed     *
+* nodes, connected by a diverse collection of links. Toward this end, we are   *
+* putting PlanetLab nodes into edge sites, co-location and routing centers,    *
+* and homes (i.e., at the end of DSL lines and cable modems). PlanetLab is     *
+* designed to support both short-term experiments and long-running services.   *
+* Currently running services include network weather maps, network-embedded    *
+* storage, peer-to-peer networks, and content distribution networks.           *
+*                                                                              *
+* Information on joining PlanetLab available at planet-lab.org/consortium/     *
+*                                                                              *
+********************************************************************************
+
+WARNING : Installing PlanetLab will remove any existing operating system and 
+          data from this computer.
+"""
+
+
+def Run( vars, log ):
+    """
+    Ask the user if we really want to wipe this machine.
+
+    Return 1 if the user accept, 0 if the user denied, and
+    a BootManagerException if anything unexpected occurred.
+    """
+
+    log.write( "\n\nStep: Confirming install with user.\n" )
+    
+    try:
+        confirmation= ""
+        install= 0
+        print welcome_message
+        
+        while confirmation not in ("yes","no"):
+            confirmation= \
+                raw_input("Are you sure you wish to continue (yes/no):")
+        install= confirmation=="yes"
+    except EOFError, e:
+        pass
+    except KeyboardInterrupt, e:
+        pass
+    
+    if install:
+        log.write( "\nUser accepted install.\n" )
+    else:
+        log.write( "\nUser canceled install.\n" )
+        
+    return install
diff --git a/source/steps/GetAndUpdateNodeDetails.py b/source/steps/GetAndUpdateNodeDetails.py
new file mode 100644 (file)
index 0000000..3db047d
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import string
+
+from Exceptions import *
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+    """
+
+    Contact PLC and get the attributes for this node. Also, parse in
+    options from the node model strong.
+
+    Also, update any node network settings at PLC, minus the ip address,
+    so, upload the mac (if node_id was in conf file), gateway, network,
+    broadcast, netmask, dns1/2, and the hostname/domainname.
+
+    Expect the following keys to be set:
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    SKIP_HARDWARE_REQUIREMENT_CHECK     Whether or not we should skip hardware
+                                        requirement checks
+                                        
+    The following keys are set/updated:
+    WAS_NODE_ID_IN_CONF      Set to 1 if the node id was in the conf file
+    WAS_NODE_KEY_IN_CONF     Set to 1 if the node key was in the conf file
+    BOOT_STATE               The current node boot state
+    NODE_MODEL               The user specified model of this node
+    NODE_MODEL_OPTIONS       The options extracted from the user specified
+                             model of this node 
+    NETWORK_SETTINGS         A dictionary of the values of the network settings
+    SKIP_HARDWARE_REQUIREMENT_CHECK     Whether or not we should skip hardware
+                                        requirement checks
+    NODE_SESSION             The session value returned from BootGetNodeDetails
+    
+    Return 1 if able to contact PLC and get node info.
+    Raise a BootManagerException if anything fails.
+    """
+
+    log.write( "\n\nStep: Retrieving details of node from PLC.\n" )
+
+    # make sure we have the variables we need
+    try:
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SKIP_HARDWARE_REQUIREMENT_CHECK= vars["SKIP_HARDWARE_REQUIREMENT_CHECK"]
+        if SKIP_HARDWARE_REQUIREMENT_CHECK == "":
+            raise ValueError, "SKIP_HARDWARE_REQUIREMENT_CHECK"
+
+        NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+        if NETWORK_SETTINGS == "":
+            raise ValueError, "NETWORK_SETTINGS"
+
+        WAS_NODE_ID_IN_CONF= vars["WAS_NODE_ID_IN_CONF"]
+        if WAS_NODE_ID_IN_CONF == "":
+            raise ValueError, "WAS_NODE_ID_IN_CONF"
+
+        WAS_NODE_KEY_IN_CONF= vars["WAS_NODE_KEY_IN_CONF"]
+        if WAS_NODE_KEY_IN_CONF == "":
+            raise ValueError, "WAS_NODE_KEY_IN_CONF"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    details= BootAPI.call_api_function( vars, "BootGetNodeDetails", () )
+
+    vars['BOOT_STATE']= details['boot_state']
+    vars['NODE_MODEL']= string.strip(details['model'])
+    vars['NODE_SESSION']= details['session']
+    
+    log.write( "Successfully retrieved node record.\n" )
+    log.write( "Current boot state: %s\n" % vars['BOOT_STATE'] )
+    log.write( "Node make/model: %s\n" % vars['NODE_MODEL'] )
+    
+    # parse in the model options from the node_model string
+    model= vars['NODE_MODEL']
+    options= ModelOptions.Get(model)
+    vars['NODE_MODEL_OPTIONS']=options
+
+    # Check if we should skip hardware requirement check
+    if options & ModelOptions.MINHW:
+        vars['SKIP_HARDWARE_REQUIREMENT_CHECK']=1
+        log.write( "node model indicates override to hardware requirements.\n" )
+
+    # this contains all the node networks, for now, we are only concerned
+    # in the primary network
+    node_networks= details['networks']
+    got_primary= 0
+    for network in node_networks:
+        if network['is_primary'] == 1:
+            got_primary= 1
+            break
+
+    if not got_primary:
+        raise BootManagerException, "Node did not have a primary network."
+    
+    log.write( "Primary network as returned from PLC: %s\n" % str(network) )
+
+    # if we got this far, the ip on the floppy and the ip in plc match,
+    # make the rest of the PLC information match whats on the floppy
+    network['method']= NETWORK_SETTINGS['method']
+
+    # only nodes that have the node_id specified directly in the configuration
+    # file can change their mac address
+    if WAS_NODE_ID_IN_CONF == 1:
+        network['mac']= NETWORK_SETTINGS['mac']
+        
+    network['gateway']= NETWORK_SETTINGS['gateway']
+    network['network']= NETWORK_SETTINGS['network']
+    network['broadcast']= NETWORK_SETTINGS['broadcast']
+    network['netmask']= NETWORK_SETTINGS['netmask']
+    network['dns1']= NETWORK_SETTINGS['dns1']
+    network['dns2']= NETWORK_SETTINGS['dns2']
+    
+    log.write( "Updating network settings at PLC to match floppy " \
+               "(except for node ip).\n" )
+    update_vals= {}
+    update_vals['primary_network']= network
+    BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+    
+    return 1
diff --git a/source/steps/InitializeBootManager.py b/source/steps/InitializeBootManager.py
new file mode 100644 (file)
index 0000000..c916cfd
--- /dev/null
@@ -0,0 +1,214 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+import xmlrpclib
+import socket
+import string
+
+from Exceptions import *
+import utils
+
+
+# locations of boot os version files
+BOOT_VERSION_2X_FILE='/usr/bootme/ID'
+BOOT_VERSION_3X_FILE='/pl_version'
+
+# minimium version of the boot os we need to run, as a (major,minor) tuple
+MINIMUM_BOOT_VERSION= (2,0)
+
+# minimum version of python required to run the boot manager
+MINIMUM_PYTHON_VERSION= (2,2,0)
+
+
+def Run( vars, log ):
+    """
+    Setup the boot manager so it can run, do any extra necessary
+    hardware setup (to fix old cd problems)
+
+    Sets the following variables:
+    PARTITIONS        A dictionary of generic partition types and their
+                      associated devices.
+    BOOT_CD_VERSION   A two number tuple of the boot cd version
+    """
+
+    log.write( "\n\nStep: Initializing the BootManager.\n" )
+
+    # define the basic partition paths
+    PARTITIONS= {}
+    PARTITIONS["root"]= "/dev/planetlab/root"
+    PARTITIONS["swap"]= "/dev/planetlab/swap"
+    PARTITIONS["vservers"]= "/dev/planetlab/vservers"
+    # Linux 2.6 mounts LVM with device mapper
+    PARTITIONS["mapper-root"]= "/dev/mapper/planetlab-root"
+    PARTITIONS["mapper-swap"]= "/dev/mapper/planetlab-swap"
+    PARTITIONS["mapper-vservers"]= "/dev/mapper/planetlab-vservers"
+    vars["PARTITIONS"]= PARTITIONS
+
+    log.write( "Opening connection to API server\n" )
+    try:
+        api_inst= xmlrpclib.Server( vars['BOOT_API_SERVER'], verbose=0 )
+    except KeyError, e:
+        raise BootManagerException, \
+              "configuration file does not specify API server URL"
+
+    vars['API_SERVER_INST']= api_inst
+
+    if not __check_boot_version( vars, log ):
+        raise BootManagerException, \
+              "Boot CD version insufficient to run the Boot Manager"
+    else:
+        log.write( "Running on boot cd version: %s\n" %
+                   str(vars['BOOT_CD_VERSION']) )
+
+    BOOT_CD_VERSION= vars['BOOT_CD_VERSION']
+    
+    # old cds need extra modules loaded for compaq smart array
+    if BOOT_CD_VERSION[0] == 2:
+
+        has_smartarray= utils.sysexec_noerr(
+            'lspci | egrep "0e11:b178|0e11:4070|0e11:4080|0e11:4082|0e11:4083"')
+        
+        if has_smartarray:
+            log.write( "Loading support for Compaq smart array\n" )
+            utils.sysexec_noerr( "modprobe cciss", log )
+            _create_cciss_dev_entries()
+            
+
+        has_fusion= utils.sysexec_noerr('lspci | egrep "1000:0030"')
+        
+        if has_fusion:
+            log.write( "Loading support for Fusion MPT SCSI controllers\n" )
+            utils.sysexec_noerr( "modprobe mptscsih", log )
+
+    # for anything that needs to know we are running under the boot cd and
+    # not the runtime os
+    os.environ['PL_BOOTCD']= "1"
+        
+    return 1
+
+
+
+def __check_boot_version( vars, log ):
+    """
+    identify which version of the boot os we are running on, and whether
+    or not we can run at all on the given version. later, this will be
+    used to identify extra packages to download to enable the boot manager
+    to run on any supported version.
+
+    2.x cds have the version file in /usr/bootme/ID, which looked like:
+    'PlanetLab BootCD v2.0.3'
+
+    3.x cds have the version file in /pl_version, which lookes like:
+    'PlanetLab BootCD 3.0-beta0.3'
+
+    All current known version strings that we support:
+    PlanetLab BootCD 3.0
+    PlanetLab BootCD 3.0-beta0.1
+    PlanetLab BootCD 3.0-beta0.3
+    PlanetLab BootCD v2.0
+    PlanetLab BootCD v2.0.1
+    PlanetLab BootCD v2.0.2
+    PlanetLab BootCD v2.0.3
+
+    Returns 1 if the boot os version is identified and will work
+    to run the boot manager. Two class variables are set:
+    BOOT_OS_MAJOR_VERSION
+    BOOT_OS_MINOR_VERSION
+    version strings with three parts parts to the version ignore the
+    middle number (so 2.0.3 is major 2, minor 3)
+
+    Returns 0 if the boot os is insufficient to run the boot manager
+    """
+
+    try:
+        # check for a 3.x version first
+        version_file= file(BOOT_VERSION_3X_FILE,'r')
+        full_version= string.strip(version_file.read())
+        version_file.close()
+
+        version_parts= string.split(full_version)
+        version= version_parts[-1]
+
+        version_numbers= string.split(version,".")
+        if len(version_numbers) == 2:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[1])
+        else:
+            # for 3.x cds, if there are more than two parts
+            # separated by a ., its one of the beta cds.
+            # hardcode as a 3.0 cd
+            BOOT_OS_MAJOR_VERSION= 3
+            BOOT_OS_MINOR_VERSION= 0
+
+        vars['BOOT_CD_VERSION']= (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION)
+        
+        if (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION) >= \
+               MINIMUM_BOOT_VERSION:
+            return 1
+
+    except IOError, e:
+        pass
+    except IndexError, e:
+        pass
+    except TypeError, e:
+        pass
+
+
+    try:
+        # check for a 2.x version first
+        version_file= file(BOOT_VERSION_2X_FILE,'r')
+        full_version= string.strip(version_file.read())
+        version_file.close()
+
+        version_parts= string.split(full_version)
+        version= version_parts[-1]
+        if version[0] == 'v':
+            version= version[1:]
+
+        version_numbers= string.split(version,".")
+        if len(version_numbers) == 2:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[1])
+        else:
+            BOOT_OS_MAJOR_VERSION= int(version_numbers[0])
+            BOOT_OS_MINOR_VERSION= int(version_numbers[2])
+
+        vars['BOOT_CD_VERSION']= (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION)
+
+        if (BOOT_OS_MAJOR_VERSION,BOOT_OS_MINOR_VERSION) >= \
+           MINIMUM_BOOT_VERSION:
+            return 1
+
+    except IOError, e:
+        pass
+    except IndexError, e:
+        pass
+    except TypeError, e:
+        pass
+
+
+    return 0
+
+
+
+def _create_cciss_dev_entries():
+    def mkccissnod(dev,node):
+        dev = dev + " b 104 %d" % (node)
+       cmd = "mknod /dev/cciss/%s" %dev
+        utils.sysexec_noerr(cmd)
+        node = node + 1
+        return node
+
+    node = 0
+    for i in range(0,16):
+        dev = "c0d%d" % i
+        node = mkccissnod(dev,node)
+        for j in range(1,16):
+            subdev = dev + "p%d" % j
+            node = mkccissnod(subdev,node)
diff --git a/source/steps/InstallBootstrapRPM.py b/source/steps/InstallBootstrapRPM.py
new file mode 100644 (file)
index 0000000..5937f4d
--- /dev/null
@@ -0,0 +1,154 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, string
+import popen2
+import shutil
+
+from Exceptions import *
+import utils
+import BootServerRequest
+
+
+def Run( vars, log ):
+    """
+    Download enough files to run rpm and yum from a chroot in
+    the system image directory
+    
+    Expect the following variables from the store:
+    SYSIMG_PATH          the path where the system image will be mounted
+    PARTITIONS           dictionary of generic part. types (root/swap)
+                         and their associated devices.
+    SUPPORT_FILE_DIR     directory on the boot servers containing
+                         scripts and support files
+    NODE_ID              the id of this machine
+    
+    Sets the following variables:
+    TEMP_BOOTCD_PATH     where the boot cd is remounted in the temp
+                         path
+    ROOT_MOUNTED         set to 1 when the the base logical volumes
+                         are mounted.
+    """
+
+    log.write( "\n\nStep: Install: Bootstrapping RPM.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+        SUPPORT_FILE_DIR= vars["SUPPORT_FILE_DIR"]
+        if SUPPORT_FILE_DIR == None:
+            raise ValueError, "SUPPORT_FILE_DIR"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError, "NODE_ID"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        # make sure the required partitions exist
+        val= PARTITIONS["root"]
+        val= PARTITIONS["swap"]
+        val= PARTITIONS["vservers"]
+    except KeyError, part:
+        log.write( "Missing partition in PARTITIONS: %s\n" % part )
+        return 0   
+
+    bs_request= BootServerRequest.BootServerRequest()
+    
+    log.write( "turning on swap space\n" )
+    utils.sysexec( "swapon %s" % PARTITIONS["swap"], log )
+
+    # make sure the sysimg dir is present
+    utils.makedirs( SYSIMG_PATH )
+
+    log.write( "mounting root file system\n" )
+    utils.sysexec( "mount -t ext3 %s %s" % (PARTITIONS["root"],SYSIMG_PATH), log )
+
+    log.write( "mounting vserver partition in root file system\n" )
+    utils.makedirs( SYSIMG_PATH + "/vservers" )
+    utils.sysexec( "mount -t ext3 %s %s/vservers" % (PARTITIONS["vservers"],
+                                                     SYSIMG_PATH), log )
+
+    vars['ROOT_MOUNTED']= 1
+    
+
+    # download and extract support tarball for
+    # this step, which has everything
+    # we need to successfully run
+    for step_support_file in [ "PlanetLab-Bootstrap.tar.bz2",
+                               "alpina-BootstrapRPM.tar.bz2" ]: 
+        source_file= "%s/%s" % (SUPPORT_FILE_DIR,step_support_file)
+        dest_file= "%s/%s" % (SYSIMG_PATH, step_support_file)
+
+        # 30 is the connect timeout, 7200 is the max transfer time
+        # in seconds (2 hours)
+        log.write( "downloading %s\n" % step_support_file )
+        result= bs_request.DownloadFile( source_file, None, None,
+                                         1, 1, dest_file,
+                                         30, 7200)
+        if result:
+            # New bootstrap tarball contains everything necessary to
+            # boot, no need to bootstrap further.
+            vars['SKIP_INSTALL_BASE']= (step_support_file == "PlanetLab-Bootstrap.tar.bz2")
+            break
+
+    if not result:
+        raise BootManagerException, "Unable to download %s from server." % \
+              source_file
+
+    log.write( "extracting %s in %s\n" % (dest_file,SYSIMG_PATH) )
+    result= utils.sysexec( "tar -C %s -xpjf %s" % (SYSIMG_PATH,dest_file), log )
+    utils.removefile( dest_file )
+
+    # copy resolv.conf from the base system into our temp dir
+    # so DNS lookups work correctly while we are chrooted
+    log.write( "Copying resolv.conf to temp dir\n" )
+    utils.sysexec( "cp /etc/resolv.conf %s/etc/" % SYSIMG_PATH, log )
+
+    # Copy the boot server certificate(s) and GPG public key to
+    # /usr/boot in the temp dir.
+    log.write( "Copying boot server certificates and public key\n" )
+
+    if os.path.exists("/usr/boot"):
+        utils.makedirs(SYSIMG_PATH + "/usr")
+        shutil.copytree("/usr/boot", SYSIMG_PATH + "/usr/boot")
+    elif os.path.exists("/usr/bootme"):
+        utils.makedirs(SYSIMG_PATH + "/usr/boot")
+        boot_server = file("/usr/bootme/BOOTSERVER").readline().strip()
+        shutil.copy("/usr/bootme/cacert/" + boot_server + "/cacert.pem",
+                    SYSIMG_PATH + "/usr/boot/cacert.pem")
+        file(SYSIMG_PATH + "/usr/boot/boot_server", "w").write(boot_server)
+        shutil.copy("/usr/bootme/pubring.gpg", SYSIMG_PATH + "/usr/boot/pubring.gpg")
+        
+    # For backward compatibility
+    if os.path.exists("/usr/bootme"):
+        utils.makedirs(SYSIMG_PATH + "/mnt/cdrom")
+        shutil.copytree("/usr/bootme", SYSIMG_PATH + "/mnt/cdrom/bootme")
+
+    # Import the GPG key into the RPM database so that RPMS can be verified
+    utils.makedirs(SYSIMG_PATH + "/etc/pki/rpm-gpg")
+    utils.sysexec("gpg --homedir=/root --export --armor" \
+                  " --no-default-keyring --keyring %s/usr/boot/pubring.gpg" \
+                  " >%s/etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab" % (SYSIMG_PATH, SYSIMG_PATH))
+    utils.sysexec("chroot %s rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab" % \
+                  SYSIMG_PATH)
+
+    return 1
diff --git a/source/steps/InstallBuildVServer.py b/source/steps/InstallBuildVServer.py
new file mode 100644 (file)
index 0000000..169b0fb
--- /dev/null
@@ -0,0 +1,157 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+
+import os
+import string
+
+from Exceptions import *
+import utils
+
+
+# if this file is present in the vservers /etc directory,
+# the resolv.conf and hosts files will automatically be updated
+# by the bootmanager
+UPDATE_FILE_FLAG= "AUTO_UPDATE_NET_FILES"
+
+# the name of the vserver-reference directory
+VSERVER_REFERENCE_DIR_NAME='vserver-reference'
+
+
+def Run( vars, log ):
+    """
+    Setup directories for building vserver reference image.
+
+    Except the following variables from the store:
+    SYSIMG_PATH        the path where the system image will be mounted
+                       (always starts with TEMP_PATH)
+    NETWORK_SETTINGS   A dictionary of the values from the network
+                       configuration file
+    
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Setting up VServer image.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+        if NETWORK_SETTINGS == "":
+            raise ValueError, "NETWORK_SETTINGS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var    
+
+    vserver_ref_dir= "/vservers/vserver-reference"        
+    full_vserver_ref_path= "%s/%s" % (SYSIMG_PATH,vserver_ref_dir)
+
+    utils.makedirs( full_vserver_ref_path )
+    utils.makedirs( "%s/etc" % full_vserver_ref_path )
+    
+    log.write( "Setting permissions on directories\n" )
+    utils.sysexec( "chmod 0000 %s/vservers/" % SYSIMG_PATH, log )
+
+    return 1
+
+
+
+def update_vserver_network_files( vserver_dir, vars, log ):
+    """
+    Update the /etc/resolv.conf and /etc/hosts files in the specified
+    vserver directory. If the files do not exist, write them out. If they
+    do exist, rewrite them with new values if the file UPDATE_FILE_FLAG
+    exists it /etc. if this is called with the vserver-reference directory,
+    always update the network config files and create the UPDATE_FILE_FLAG.
+
+    This is currently called when setting up the initial vserver reference,
+    and later when nodes boot to update existing vserver images.
+
+    Expect the following variables from the store:
+    SYSIMG_PATH        the path where the system image will be mounted
+                       (always starts with TEMP_PATH)
+    NETWORK_SETTINGS   A dictionary of the values from the network
+                       configuration file
+    """
+
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+        if NETWORK_SETTINGS == "":
+            raise ValueError, "NETWORK_SETTINGS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        ip= NETWORK_SETTINGS['ip']
+        method= NETWORK_SETTINGS['method']
+        hostname= NETWORK_SETTINGS['hostname']
+        domainname= NETWORK_SETTINGS['domainname']
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing network value %s in var NETWORK_SETTINGS\n" % var
+
+    try:
+        os.listdir(vserver_dir)
+    except OSError:
+        log.write( "Directory %s does not exist to write network conf in.\n" %
+                   vserver_dir )
+        return
+
+    file_path= "%s/etc/%s" % (vserver_dir,UPDATE_FILE_FLAG)
+    update_files= 0
+    if os.access(file_path,os.F_OK):
+        update_files= 1
+
+        
+    if vserver_dir.find(VSERVER_REFERENCE_DIR_NAME) != -1:
+        log.write( "Forcing update on vserver-reference directory:\n%s\n" %
+                   vserver_dir )
+        utils.sysexec_noerr( "echo '%s' > %s/etc/%s" %
+                             (UPDATE_FILE_FLAG,vserver_dir,UPDATE_FILE_FLAG),
+                             log )
+        update_files= 1
+        
+
+    if update_files:
+        log.write( "Updating network files in %s.\n" % vserver_dir )
+        
+        file_path= "%s/etc/hosts" % vserver_dir
+        hosts_file= file(file_path, "w" )
+        hosts_file.write( "127.0.0.1       localhost\n" )
+        if method == "static":
+            hosts_file.write( "%s %s.%s\n" % (ip, hostname, domainname) )
+            hosts_file.close()
+            hosts_file= None
+
+
+        file_path= "%s/etc/resolv.conf" % vserver_dir
+        if method == "dhcp":
+            # copy the resolv.conf from the boot cd env.
+            utils.sysexec( "cp /etc/resolv.conf %s/etc" % vserver_dir, log )
+        else:
+            # copy the generated resolv.conf from the system image, since
+            # we generated it via static settings
+            utils.sysexec( "cp %s/etc/resolv.conf %s/etc" % \
+                           (SYSIMG_PATH,vserver_dir), log )
+            
+    return 
diff --git a/source/steps/InstallInit.py b/source/steps/InstallInit.py
new file mode 100644 (file)
index 0000000..70aa994
--- /dev/null
@@ -0,0 +1,85 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, shutil
+import string
+
+import utils
+
+
+def Run( vars, log ):
+    """
+    Setup the install environment:
+    - unmount anything in the temp/sysimg path (possible from previous
+      aborted installs
+    - create temp directories
+    
+    Expect the following variables from the store:
+    TEMP_PATH         the path to download and store temp files to
+    SYSIMG_DIR        the directory name of the system image
+                      contained in TEMP_PATH
+    PLCONF_DIR        The directory to store the configuration file in
+    
+    Sets the following variables:
+    SYSIMG_PATH       the directory where the system image will be mounted,
+                      (= TEMP_PATH/SYSIMG_DIR)
+    """
+
+    log.write( "\n\nStep: Install: Initializing.\n" )
+    
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError("TEMP_PATH")
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError("SYSIMG_PATH")
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    # if this is a fresh install, then nothing should be
+    # here, but we support restarted installs without rebooting
+    # so who knows what the current state is
+
+    log.write( "Unmounting any previous mounts\n" )
+
+    try:
+        # backwards compat, though, we should never hit this case post PL 3.2
+        os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+        utils.sysexec_noerr( "chroot %s umount /rcfs" % SYSIMG_PATH, log )
+    except OSError, e:
+        pass
+
+    utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH )
+    utils.sysexec_noerr( "umount %s/mnt/cdrom" % SYSIMG_PATH )
+    utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH )
+    utils.sysexec_noerr( "umount %s" % SYSIMG_PATH )
+    vars['ROOT_MOUNTED']= 0
+
+    log.write( "Removing any old files, directories\n" )
+    utils.removedir( TEMP_PATH )
+    
+    log.write( "Cleaning up any existing PlanetLab config files\n" )
+    utils.removedir( PLCONF_DIR )
+    
+    # create the temp path and sysimg path. since sysimg
+    # path is in temp path, both are created here
+    log.write( "Creating system image path\n" )
+    utils.makedirs( SYSIMG_PATH )
+
+    return 1
diff --git a/source/steps/InstallNodeInit.py b/source/steps/InstallNodeInit.py
new file mode 100644 (file)
index 0000000..126288c
--- /dev/null
@@ -0,0 +1,80 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+from Exceptions import *
+import utils
+import os
+
+
+def Run( vars, log ):
+    """
+    Initialize the node:
+    - runs planetlabconf
+
+    Except the following variables from the store:
+    SYSIMG_PATH             the path where the system image will be mounted
+    (always starts with TEMP_PATH)
+    NODE_ID                  The db node_id for this machine
+    PLCONF_DIR               The directory to store the configuration file in
+    
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Final node initialization.\n" )
+
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+        
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError, "NODE_ID"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+        
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    log.write( "Running PlanetLabConf to update any configuration files\n" )
+
+    # PlanetLabConf requires /etc/planetlab/node_id, which is normally
+    # maintained in ValidateNodeInstal. so, write out the node_id here
+    # so PlanetLabConf can run.
+    try:
+        node_id_file_path= "%s/%s/node_id" % (SYSIMG_PATH,PLCONF_DIR)
+        node_id_file= file( node_id_file_path, "w" )
+        node_id_file.write( str(NODE_ID) )
+        node_id_file.close()
+        node_id_file= None
+    except IOError, e:
+        raise BootManagerException, \
+                  "Unable to write out /etc/planetlab/node_id for PlanetLabConf"
+
+    if not utils.sysexec( "chroot %s PlanetLabConf.py noscripts" %
+                          SYSIMG_PATH, log ):
+        log.write( "PlanetLabConf failed, install incomplete.\n" )
+        return 0
+                
+    services= [ "netfs", "rawdevices", "cpuspeed", "smartd" ]
+    for service in services:
+        if os.path.exists("%s/etc/init.d/%s" % (SYSIMG_PATH,service)):
+            log.write( "Disabling unneeded service: %s\n" % service )
+            utils.sysexec( "chroot %s chkconfig --level 12345 %s off" %
+                           (SYSIMG_PATH,service), log )
+            
+    return 1
diff --git a/source/steps/InstallPartitionDisks.py b/source/steps/InstallPartitionDisks.py
new file mode 100644 (file)
index 0000000..d7c6468
--- /dev/null
@@ -0,0 +1,317 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys
+import string
+import popen2
+
+
+from Exceptions import *
+import utils
+import BootServerRequest
+import compatibility
+
+import ModelOptions
+
+def Run( vars, log ):
+    """
+    Setup the block devices for install, partition them w/ LVM
+    
+    Expect the following variables from the store:
+    INSTALL_BLOCK_DEVICES    list of block devices to install onto
+    TEMP_PATH                somewhere to store what we need to run
+    ROOT_SIZE                the size of the root logical volume
+    SWAP_SIZE                the size of the swap partition
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    """
+
+    log.write( "\n\nStep: Install: partitioning disks.\n" )
+        
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        INSTALL_BLOCK_DEVICES= vars["INSTALL_BLOCK_DEVICES"]
+        if( len(INSTALL_BLOCK_DEVICES) == 0 ):
+            raise ValueError, "INSTALL_BLOCK_DEVICES is empty"
+
+        ROOT_SIZE= vars["ROOT_SIZE"]
+        if ROOT_SIZE == "" or ROOT_SIZE == 0:
+            raise ValueError, "ROOT_SIZE invalid"
+
+        SWAP_SIZE= vars["SWAP_SIZE"]
+        if SWAP_SIZE == "" or SWAP_SIZE == 0:
+            raise ValueError, "SWAP_SIZE invalid"
+
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    bs_request= BootServerRequest.BootServerRequest()
+
+    
+    # old cds need extra utilities to partition disks and setup lvm
+    if BOOT_CD_VERSION[0] == 2:
+        compatibility.setup_partdisks_2x_cd( vars, log )
+
+    # disable swap if its on
+    utils.sysexec_noerr( "swapoff %s" % PARTITIONS["swap"], log )
+
+    # shutdown and remove any lvm groups/volumes
+    utils.sysexec_noerr( "vgscan", log )
+    utils.sysexec_noerr( "vgchange -ay", log )        
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["root"], log )
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["swap"], log )
+    utils.sysexec_noerr( "lvremove -f %s" % PARTITIONS["vservers"], log )
+    utils.sysexec_noerr( "vgchange -an", log )
+    utils.sysexec_noerr( "vgremove planetlab", log )
+
+    log.write( "Running vgscan for devices\n" )
+    utils.sysexec_noerr( "vgscan", log )
+    
+    used_devices= []
+
+    for device in INSTALL_BLOCK_DEVICES:
+
+        if single_partition_device( device, vars, log ):
+            used_devices.append( device )
+            log.write( "Successfully initialized %s\n" % device )
+        else:
+            log.write( "Unable to partition %s, not using it.\n" % device )
+            continue
+
+    # list of devices to be used with vgcreate
+    vg_device_list= ""
+
+    # initialize the physical volumes
+    for device in used_devices:
+
+        part_path= get_partition_path_from_device( device, vars, log )
+        
+        if not create_lvm_physical_volume( part_path, vars, log ):
+            raise BootManagerException, "Could not create lvm physical volume " \
+                  "on partition %s" % part_path
+        
+        vg_device_list = vg_device_list + " " + part_path
+
+    # create an lvm volume group
+    utils.sysexec( "vgcreate -s32M planetlab %s" % vg_device_list, log)
+
+    # create swap logical volume
+    utils.sysexec( "lvcreate -L%s -nswap planetlab" % SWAP_SIZE, log )
+
+    # create root logical volume
+    utils.sysexec( "lvcreate -L%s -nroot planetlab" % ROOT_SIZE, log )
+
+    # create vservers logical volume with all remaining space
+    # first, we need to get the number of remaining extents we can use
+    remaining_extents= get_remaining_extents_on_vg( vars, log )
+    
+    utils.sysexec( "lvcreate -l%s -nvservers planetlab" % remaining_extents, log )
+
+    # activate volume group (should already be active)
+    #utils.sysexec( TEMP_PATH + "vgchange -ay planetlab", log )
+
+    # make swap
+    utils.sysexec( "mkswap %s" % PARTITIONS["swap"], log )
+
+    # check if badhd option has been set
+    option = ''
+    txt = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.BADHD:
+        option = '-c'
+        txt = " with bad block search enabled, which may take a while"
+    
+    # filesystems partitions names and their corresponding
+    # reserved-blocks-percentages
+    filesystems = {"root":5,"vservers":0}
+
+    # make the file systems
+    for fs in filesystems.keys():
+        # get the reserved blocks percentage
+        rbp = filesystems[fs]
+        devname = PARTITIONS[fs]
+        log.write("formatting %s partition (%s)%s.\n" % (fs,devname,txt))
+        utils.sysexec( "mkfs.ext2 -q %s -m %d -j %s" % (option,rbp,devname), log )
+
+    # save the list of block devices in the log
+    log.write( "Block devices used (in lvm): %s\n" % repr(used_devices))
+
+    # list of block devices used may be updated
+    vars["INSTALL_BLOCK_DEVICES"]= used_devices
+
+    return 1
+
+
+
+def single_partition_device( device, vars, log ):
+    """
+    initialize a disk by removing the old partition tables,
+    and creating a new single partition that fills the disk.
+
+    return 1 if sucessful, 0 otherwise
+    """
+
+    BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+    if BOOT_CD_VERSION[0] == 2:
+        compatibility.setup_partdisks_2x_cd( vars, log )
+
+    import parted
+    
+    lvm_flag= parted.partition_flag_get_by_name('lvm')
+    
+    try:
+        # wipe the old partition table
+        utils.sysexec( "dd if=/dev/zero of=%s bs=512 count=1" % device, log )
+
+        # get the device
+        dev= parted.PedDevice.get(device)
+
+        # 2.x cds have different libparted that 3.x cds, and they have
+        # different interfaces
+        if BOOT_CD_VERSION[0] == 3:
+
+            # create a new partition table
+            disk= dev.disk_new_fresh(parted.disk_type_get("msdos"))
+
+            # create one big partition on each block device
+            constraint= dev.constraint_any()
+
+            new_part= disk.partition_new(
+                parted.PARTITION_PRIMARY,
+                parted.file_system_type_get("ext2"),
+                0, 1 )
+
+            # make it an lvm partition
+            new_part.set_flag(lvm_flag,1)
+
+            # actually add the partition to the disk
+            disk.add_partition(new_part, constraint)
+
+            disk.maximize_partition(new_part,constraint)
+
+            disk.commit()
+            del disk
+        else:
+            # create a new partition table
+            dev.disk_create(parted.disk_type_get("msdos"))
+
+            # get the disk
+            disk= parted.PedDisk.open(dev)
+
+                # create one big partition on each block device
+            part= disk.next_partition()
+            while part:
+                if part.type == parted.PARTITION_FREESPACE:
+                    new_part= disk.partition_new(
+                        parted.PARTITION_PRIMARY,
+                        parted.file_system_type_get("ext2"),
+                        part.geom.start,
+                        part.geom.end )
+
+                    constraint = disk.constraint_any()
+
+                    # make it an lvm partition
+                    new_part.set_flag(lvm_flag,1)
+
+                    # actually add the partition to the disk
+                    disk.add_partition(new_part, constraint)
+
+                    break
+
+                part= disk.next_partition(part)
+
+            disk.write()
+            disk.close()
+            del disk
+            
+    except BootManagerException, e:
+        log.write( "BootManagerException while running: %s\n" % str(e) )
+        return 0
+
+    except parted.error, e:
+        log.write( "parted exception while running: %s\n" % str(e) )
+        return 0
+                   
+    return 1
+
+
+
+def create_lvm_physical_volume( part_path, vars, log ):
+    """
+    make the specificed partition a lvm physical volume.
+
+    return 1 if successful, 0 otherwise.
+    """
+
+    try:
+        # again, wipe any old data, this time on the partition
+        utils.sysexec( "dd if=/dev/zero of=%s bs=512 count=1" % part_path, log )
+        ### patch Thierry Parmentelat, required on some hardware
+        import time
+        time.sleep(1)
+        utils.sysexec( "pvcreate -ffy %s" % part_path, log )
+    except BootManagerException, e:
+        log.write( "create_lvm_physical_volume failed.\n" )
+        return 0
+
+    return 1
+
+
+
+def get_partition_path_from_device( device, vars, log ):
+    """
+    given a device, return the path of the first partition on the device
+    """
+
+    BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        
+    # those who wrote the cciss driver just had to make it difficult
+    if BOOT_CD_VERSION[0] == 3:
+        cciss_test= "/dev/cciss"
+        if device[:len(cciss_test)] == cciss_test:
+            part_path= device + "p1"
+        else:
+            part_path= device + "1"
+    else:
+        # since device ends in /disc, we need to make it end in
+        # /part1 to indicate the first partition (for devfs based 2.x cds)
+        dev_parts= string.split(device,"/")
+        dev_parts[len(dev_parts)-1]= "part1"
+        part_path= string.join(dev_parts,"/")
+
+    return part_path
+
+
+
+def get_remaining_extents_on_vg( vars, log ):
+    """
+    return the free amount of extents on the planetlab volume group
+    """
+    
+    c_stdout, c_stdin = popen2.popen2("vgdisplay -c planetlab")
+    result= string.strip(c_stdout.readline())
+    c_stdout.close()
+    c_stdin.close()
+    remaining_extents= string.split(result,":")[15]
+    
+    return remaining_extents
diff --git a/source/steps/InstallUninitHardware.py b/source/steps/InstallUninitHardware.py
new file mode 100644 (file)
index 0000000..df99515
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os
+
+from Exceptions import *
+import utils
+
+
+
+def Run( vars, log ):
+    """
+    Unitializes hardware:
+    - unmount everything mounted during install, except the
+    /dev/planetlab/root and /dev/planetlab/vservers. This includes
+    calling swapoff for /dev/planetlab/swap.
+
+    Except the following variables from the store:
+    TEMP_PATH         the path to download and store temp files to
+    SYSIMG_PATH       the path where the system image will be mounted
+                      (always starts with TEMP_PATH)
+    PARTITIONS        dictionary of generic part. types (root/swap)
+                      and their associated devices.
+
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Shutting down installer.\n" )
+
+    # make sure we have the variables we need
+    try:
+        TEMP_PATH= vars["TEMP_PATH"]
+        if TEMP_PATH == "":
+            raise ValueError, "TEMP_PATH"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        # make sure the required partitions exist
+        val= PARTITIONS["root"]
+        val= PARTITIONS["swap"]
+        val= PARTITIONS["vservers"]
+    except KeyError, part:
+        raise BootManagerException, "Missing partition in PARTITIONS: %s\n" % part
+
+    try:
+        # backwards compat, though, we should never hit this case post PL 3.2
+        os.stat("%s/rcfs/taskclass"%SYSIMG_PATH)
+        utils.sysexec_noerr( "chroot %s umount /rcfs" % SYSIMG_PATH, log )
+    except OSError, e:
+        pass
+            
+    log.write( "Shutting down swap\n" )
+    utils.sysexec( "swapoff %s" % PARTITIONS["swap"], log )
+
+    return 1
diff --git a/source/steps/InstallWriteConfig.py b/source/steps/InstallWriteConfig.py
new file mode 100644 (file)
index 0000000..ee5a0cc
--- /dev/null
@@ -0,0 +1,163 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, string
+
+from Exceptions import *
+import utils
+import systeminfo
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+
+    """
+    Writes out the following configuration files for the node:
+    /etc/fstab
+    /etc/resolv.conf (if applicable)
+    /etc/ssh/ssh_host_key
+    /etc/ssh/ssh_host_rsa_key
+    /etc/ssh/ssh_host_dsa_key
+    
+    Expect the following variables from the store:
+    VERSION                 the version of the install
+    SYSIMG_PATH             the path where the system image will be mounted
+                            (always starts with TEMP_PATH)
+    PARTITIONS              dictionary of generic part. types (root/swap)
+                            and their associated devices.
+    PLCONF_DIR              The directory to store the configuration file in
+    NETWORK_SETTINGS  A dictionary of the values from the network
+                                configuration file
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    
+    Sets the following variables:
+    None
+    
+    """
+
+    log.write( "\n\nStep: Install: Writing configuration files.\n" )
+    
+    # make sure we have the variables we need
+    try:
+        VERSION= vars["VERSION"]
+        if VERSION == "":
+            raise ValueError, "VERSION"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+
+        NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+        if NETWORK_SETTINGS == "":
+            raise ValueError, "NETWORK_SETTINGS"
+
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    log.write( "Setting local time to UTC\n" )
+    utils.sysexec( "chroot %s ln -sf /usr/share/zoneinfo/UTC /etc/localtime" % \
+                   SYSIMG_PATH, log )
+
+    log.write( "Enabling ntp at boot\n" )
+    utils.sysexec( "chroot %s chkconfig ntpd on" % SYSIMG_PATH, log )
+
+    log.write( "Creating system directory %s\n" % PLCONF_DIR )
+    if not utils.makedirs( "%s/%s" % (SYSIMG_PATH,PLCONF_DIR) ):
+        log.write( "Unable to create directory\n" )
+        return 0
+
+    log.write( "Writing system /etc/fstab\n" )
+    fstab= file( "%s/etc/fstab" % SYSIMG_PATH, "w" )
+    fstab.write( "%s           none        swap      sw        0 0\n" % \
+                 PARTITIONS["mapper-swap"] )
+    fstab.write( "%s           /           ext3      defaults  0 0\n" % \
+                 PARTITIONS["mapper-root"] )
+    fstab.write( "%s           /vservers   ext3      tagxid,defaults  0 0\n" % \
+                 PARTITIONS["mapper-vservers"] )
+    fstab.write( "none         /proc       proc      defaults  0 0\n" )
+    fstab.write( "none         /dev/shm    tmpfs     defaults  0 0\n" )
+    fstab.write( "none         /dev/pts    devpts    defaults  0 0\n" )
+    # no longer needed
+    # fstab.write( "none         /rcfs       rcfs      defaults  0 0\n" )
+    fstab.close()
+
+
+    log.write( "Writing system /etc/issue\n" )
+    issue= file( "%s/etc/issue" % SYSIMG_PATH, "w" )
+    issue.write( "PlanetLab Node: \\n\n" )
+    issue.write( "Kernel \\r on an \\m\n" )
+    issue.write( "http://www.planet-lab.org\n\n" )
+    issue.close()
+
+    log.write( "Setting up authentication (non-ssh)\n" )
+    utils.sysexec( "chroot %s authconfig --nostart --kickstart --enablemd5 " \
+                   "--enableshadow" % SYSIMG_PATH, log )
+    utils.sysexec( "sed -e 's/^root\:\:/root\:*\:/g' " \
+                   "%s/etc/shadow > %s/etc/shadow.new" % \
+                   (SYSIMG_PATH,SYSIMG_PATH), log )
+    utils.sysexec( "chroot %s mv " \
+                   "/etc/shadow.new /etc/shadow" % SYSIMG_PATH, log )
+    utils.sysexec( "chroot %s chmod 400 /etc/shadow" % SYSIMG_PATH, log )
+
+    # if we are setup with dhcp, copy the current /etc/resolv.conf into
+    # the system image so we can run programs inside that need network access
+    method= ""
+    try:
+        method= vars['NETWORK_SETTINGS']['method']
+    except:
+        pass
+    
+    if method == "dhcp":
+        utils.sysexec( "cp /etc/resolv.conf %s/etc/" % SYSIMG_PATH, log )
+
+    log.write( "Writing node install version\n" )
+    utils.makedirs( "%s/etc/planetlab" % SYSIMG_PATH )
+    ver= file( "%s/etc/planetlab/install_version" % SYSIMG_PATH, "w" )
+    ver.write( "%s\n" % VERSION )
+    ver.close()
+
+    log.write( "Creating ssh host keys\n" )
+    key_gen_prog= "/usr/bin/ssh-keygen"
+
+    log.write( "Generating SSH1 RSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_key"
+    utils.sysexec( "chroot %s %s -q -t rsa1 -f %s -C '' -N ''" %
+                   (SYSIMG_PATH,key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+    
+    log.write( "Generating SSH2 RSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_rsa_key"
+    utils.sysexec( "chroot %s %s -q -t rsa -f %s -C '' -N ''" %
+                   (SYSIMG_PATH,key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+    
+    log.write( "Generating SSH2 DSA host key:\n" )
+    key_file= "/etc/ssh/ssh_host_dsa_key"
+    utils.sysexec( "chroot %s %s -q -t dsa -f %s -C '' -N ''" %
+                   (SYSIMG_PATH,key_gen_prog,key_file), log )
+    utils.sysexec( "chmod 600 %s/%s" % (SYSIMG_PATH,key_file), log )
+    utils.sysexec( "chmod 644 %s/%s.pub" % (SYSIMG_PATH,key_file), log )
+
+    return 1
diff --git a/source/steps/MakeInitrd.py b/source/steps/MakeInitrd.py
new file mode 100644 (file)
index 0000000..4854da7
--- /dev/null
@@ -0,0 +1,57 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os, string
+
+from Exceptions import *
+import utils
+import systeminfo
+
+def Run( vars, log ):
+    """
+    Rebuilds the system initrd, on first install or in case the
+    hardware changed.
+    """
+
+    log.write( "\n\nStep: Rebuilding initrd\n" )
+    
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    # mkinitrd attempts to determine if the root fs is on a logical
+    # volume by checking if the root device contains /dev/mapper in
+    # its path. The device node must exist for the check to succeed,
+    # but since it's usually managed by devfs or udev, so is probably
+    # not present, we just create a dummy file.
+    
+    fake_root_lvm= False
+    if not os.path.exists( "%s/%s" % (SYSIMG_PATH,PARTITIONS["mapper-root"]) ):
+        fake_root_lvm= True
+        utils.makedirs( "%s/dev/mapper" % SYSIMG_PATH )
+        rootdev= file( "%s/%s" % (SYSIMG_PATH,PARTITIONS["mapper-root"]), "w" )
+        rootdev.close()
+
+    initrd, kernel_version= systeminfo.getKernelVersion(vars,log)
+    utils.removefile( "%s/boot/%s" % (SYSIMG_PATH, initrd) )
+    utils.sysexec( "chroot %s mkinitrd -v /boot/initrd-%s.img %s" % \
+                   (SYSIMG_PATH, kernel_version, kernel_version), log )
+
+    if fake_root_lvm == True:
+        utils.removefile( "%s/%s" % (SYSIMG_PATH,PARTITIONS["mapper-root"]) )
diff --git a/source/steps/ReadNodeConfiguration.py b/source/steps/ReadNodeConfiguration.py
new file mode 100644 (file)
index 0000000..c04cadf
--- /dev/null
@@ -0,0 +1,650 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import sys, os, traceback
+import string
+import socket
+import re
+
+import utils
+from Exceptions import *
+import BootServerRequest
+import BootAPI
+import notify_messages
+import UpdateBootStateWithPLC
+
+
+# two possible names of the configuration files
+NEW_CONF_FILE_NAME= "plnode.txt"
+OLD_CONF_FILE_NAME= "planet.cnf"
+
+
+def Run( vars, log ):   
+    """
+    read the machines node configuration file, which contains
+    the node key and the node_id for this machine.
+    
+    these files can exist in several different locations with
+    several different names. Below is the search order:
+
+    filename      floppy   flash    ramdisk    cd
+    plnode.txt      1        2      4 (/)      5 (/usr/boot), 6 (/usr)
+    planet.cnf      3
+
+    The locations will be searched in the above order, plnode.txt
+    will be checked first, then planet.cnf. Flash devices will only
+    be searched on 3.0 cds.
+
+    Because some of the earlier
+    boot cds don't validate the configuration file (which results
+    in a file named /tmp/planet-clean.cnf), and some do, lets
+    bypass this, and mount and attempt to read in the conf
+    file ourselves. If it doesn't exist, we cannot continue, and a
+    BootManagerException will be raised. If the configuration file is found
+    and read, return 1.
+
+    Expect the following variables from the store:
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    SUPPORT_FILE_DIR         directory on the boot servers containing
+                             scripts and support files
+    
+    Sets the following variables from the configuration file:
+    WAS_NODE_ID_IN_CONF         Set to 1 if the node id was in the conf file
+    WAS_NODE_KEY_IN_CONF         Set to 1 if the node key was in the conf file
+    NONE_ID                     The db node_id for this machine
+    NODE_KEY                    The key for this node
+    NETWORK_SETTINGS            A dictionary of the values from the network
+                                configuration file. keys set:
+                                   method
+                                   ip        
+                                   mac       
+                                   gateway   
+                                   network   
+                                   broadcast 
+                                   netmask   
+                                   dns1      
+                                   dns2      
+                                   hostname  
+                                   domainname
+
+    the mac address is read from the machine unless it exists in the
+    configuration file.
+    """
+
+    log.write( "\n\nStep: Reading node configuration file.\n" )
+
+
+    # make sure we have the variables we need
+    try:
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SUPPORT_FILE_DIR= vars["SUPPORT_FILE_DIR"]
+        if SUPPORT_FILE_DIR == None:
+            raise ValueError, "SUPPORT_FILE_DIR"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    NETWORK_SETTINGS= {}
+    NETWORK_SETTINGS['method']= "dhcp"
+    NETWORK_SETTINGS['ip']= ""
+    NETWORK_SETTINGS['mac']= ""
+    NETWORK_SETTINGS['gateway']= ""
+    NETWORK_SETTINGS['network']= ""
+    NETWORK_SETTINGS['broadcast']= ""
+    NETWORK_SETTINGS['netmask']= ""
+    NETWORK_SETTINGS['dns1']= ""
+    NETWORK_SETTINGS['dns2']= ""
+    NETWORK_SETTINGS['hostname']= "localhost"
+    NETWORK_SETTINGS['domainname']= "localdomain"
+    vars['NETWORK_SETTINGS']= NETWORK_SETTINGS
+
+    vars['NODE_ID']= 0
+    vars['NODE_KEY']= ""
+
+    vars['WAS_NODE_ID_IN_CONF']= 0
+    vars['WAS_NODE_KEY_IN_CONF']= 0
+
+    # for any devices that need to be mounted to get the configuration
+    # file, mount them here.
+    mount_point= "/tmp/conffilemount"
+    utils.makedirs( mount_point )
+
+    old_conf_file_contents= None
+    conf_file_contents= None
+    
+    
+    # 1. check the regular floppy device
+    log.write( "Checking standard floppy disk for plnode.txt file.\n" )
+
+    log.write( "Mounting /dev/fd0 on %s\n" % mount_point )
+    utils.sysexec_noerr( "mount -o ro -t ext2,msdos /dev/fd0 %s " \
+                         % mount_point, log )
+
+    conf_file_path= "%s/%s" % (mount_point,NEW_CONF_FILE_NAME)
+    
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access( conf_file_path, os.R_OK ):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+
+        utils.sysexec_noerr( "umount %s" % mount_point, log )
+        if __parse_configuration_file( vars, log, conf_file_contents):
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "on floppy, but was unable to parse it." )
+
+
+    # try the old file name, same device. its actually number 3 on the search
+    # order, but do it now to save mounting/unmounting the disk twice.
+    # try to parse it later...
+    conf_file_path= "%s/%s" % (mount_point,OLD_CONF_FILE_NAME)
+
+    log.write( "Checking for existence of %s (used later)\n" % conf_file_path )
+    if os.access( conf_file_path, os.R_OK ):
+        try:
+            old_conf_file= file(conf_file_path,"r")
+            old_conf_file_contents= old_conf_file.read()
+            old_conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+        
+    utils.sysexec_noerr( "umount %s" % mount_point, log )
+
+
+
+    if BOOT_CD_VERSION[0] == 3:
+        # 2. check flash devices on 3.0 based cds
+        log.write( "Checking flash devices for plnode.txt file.\n" )
+
+        # this is done the same way the 3.0 cds do it, by attempting
+        # to mount and sd*1 devices that are removable
+        devices= os.listdir("/sys/block/")
+
+        for device in devices:
+            if device[:2] != "sd":
+                log.write( "Skipping non-scsi device %s\n" % device )
+                continue
+
+            # test removable
+            removable_file_path= "/sys/block/%s/removable" % device
+            try:
+                removable= int(file(removable_file_path,"r").read().strip())
+            except ValueError, e:
+                continue
+            except IOError, e:
+                continue
+
+            if not removable:
+                log.write( "Skipping non-removable device %s\n" % device )
+                continue
+
+            log.write( "Checking removable device %s\n" % device )
+
+            partitions= file("/proc/partitions", "r")
+            for line in partitions:
+                found_file= 0
+                parsed_file= 0
+                
+                if not re.search("%s[0-9]*$" % device, line):
+                    continue
+
+                try:
+                    # major minor  #blocks  name
+                    parts= string.split(line)
+
+                    # ok, try to mount it and see if we have a conf file.
+                    full_device= "/dev/%s" % parts[3]
+                except IndexError, e:
+                    log.write( "Incorrect /proc/partitions line:\n%s\n" % line )
+                    continue
+
+                log.write( "Mounting %s on %s\n" % (full_device,mount_point) )
+                try:
+                    utils.sysexec( "mount -o ro -t ext2,msdos %s %s" \
+                                   % (full_device,mount_point), log )
+                except BootManagerException, e:
+                    log.write( "Unable to mount, trying next partition\n" )
+                    continue
+
+                conf_file_path= "%s/%s" % (mount_point,NEW_CONF_FILE_NAME)
+
+                log.write( "Checking for existence of %s\n" % conf_file_path )
+                if os.access( conf_file_path, os.R_OK ):
+                    try:
+                        conf_file= file(conf_file_path,"r")
+                        conf_file_contents= conf_file.read()
+                        conf_file.close()
+                        found_file= 1
+                        log.write( "Read in contents of file %s\n" % \
+                                   conf_file_path )
+
+                        if __parse_configuration_file( vars, log, \
+                                                       conf_file_contents):
+                            parsed_file= 1
+                    except IOError, e:
+                        log.write( "Unable to read file %s\n" % conf_file_path )
+
+                utils.sysexec_noerr( "umount %s" % mount_point, log )
+                if found_file:
+                    if parsed_file:
+                        return 1
+                    else:
+                        raise BootManagerException( \
+                            "Found configuration file plnode.txt " \
+                            "on floppy, but was unable to parse it.")
+
+
+            
+    # 3. check standard floppy disk for old file name planet.cnf
+    log.write( "Checking standard floppy disk for planet.cnf file " \
+               "(from earlier.\n" )
+
+    if old_conf_file_contents:
+        if __parse_configuration_file( vars, log, old_conf_file_contents):
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file planet.cnf " \
+                                        "on floppy, but was unable to parse it." )
+
+
+    # 4. check for plnode.txt in / (ramdisk)
+    log.write( "Checking / (ramdisk) for plnode.txt file.\n" )
+    
+    conf_file_path= "/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /, but was unable to parse it.")
+
+    
+    # 5. check for plnode.txt in /usr/boot (mounted already)
+    log.write( "Checking /usr/boot (cd) for plnode.txt file.\n" )
+    
+    conf_file_path= "/usr/boot/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /usr/boot, but was unable to parse it.")
+
+
+
+    # 6. check for plnode.txt in /usr (mounted already)
+    log.write( "Checking /usr (cd) for plnode.txt file.\n" )
+    
+    conf_file_path= "/usr/%s" % NEW_CONF_FILE_NAME
+
+    log.write( "Checking for existence of %s\n" % conf_file_path )
+    if os.access(conf_file_path,os.R_OK):
+        try:
+            conf_file= file(conf_file_path,"r")
+            conf_file_contents= conf_file.read()
+            conf_file.close()
+            log.write( "Read in contents of file %s\n" % conf_file_path )
+        except IOError, e:
+            log.write( "Unable to read file %s\n" % conf_file_path )
+            pass    
+    
+        if __parse_configuration_file( vars, log, conf_file_contents):            
+            return 1
+        else:
+            raise BootManagerException( "Found configuration file plnode.txt " \
+                                        "in /usr, but was unable to parse it.")
+
+
+    raise BootManagerException, "Unable to find and read a node configuration file."
+    
+
+
+
+def __parse_configuration_file( vars, log, file_contents ):
+    """
+    parse a configuration file, set keys in var NETWORK_SETTINGS
+    in vars (see comment for function ReadNodeConfiguration). this
+    also reads the mac address from the machine if successful parsing
+    of the configuration file is completed.
+    """
+
+    BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+    SUPPORT_FILE_DIR= vars["SUPPORT_FILE_DIR"]
+    NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+    
+    if file_contents is None:
+        log.write( "__parse_configuration_file called with no file contents\n" )
+        return 0
+    
+    try:
+        line_num= 0
+        for line in file_contents.split("\n"):
+
+            line_num = line_num + 1
+            
+            # if its a comment or a whitespace line, ignore
+            if line[:1] == "#" or string.strip(line) == "":
+                continue
+
+            # file is setup as name="value" pairs
+            parts= string.split(line,"=")
+            if len(parts) != 2:
+                log.write( "Invalid line %d in configuration file:\n" % line_num )
+                log.write( line + "\n" )
+                return 0
+
+            name= string.strip(parts[0])
+            value= string.strip(parts[1])
+
+            # make sure value starts and ends with
+            # single or double quotes
+            quotes= value[0] + value[len(value)-1]
+            if quotes != "''" and quotes != '""':
+                log.write( "Invalid line %d in configuration file:\n" % line_num )
+                log.write( line + "\n" )
+                return 0
+
+            # get rid of the quotes around the value
+            value= string.strip(value[1:len(value)-1])
+
+            if name == "NODE_ID":
+                try:
+                    vars['NODE_ID']= int(value)
+                    vars['WAS_NODE_ID_IN_CONF']= 1
+                except ValueError, e:
+                    log.write( "Non-numeric node_id in configuration file.\n" )
+                    return 0
+
+            if name == "NODE_KEY":
+                vars['NODE_KEY']= value
+                vars['WAS_NODE_KEY_IN_CONF']= 1
+
+            if name == "IP_METHOD":
+                value= string.lower(value)
+                if value != "static" and value != "dhcp":
+                    log.write( "Invalid IP_METHOD in configuration file:\n" )
+                    log.write( line + "\n" )
+                    return 0
+                NETWORK_SETTINGS['method']= value.strip()
+
+            if name == "IP_ADDRESS":
+                NETWORK_SETTINGS['ip']= value.strip()
+
+            if name == "IP_GATEWAY":
+                NETWORK_SETTINGS['gateway']= value.strip()
+
+            if name == "IP_NETMASK":
+                NETWORK_SETTINGS['netmask']= value.strip()
+
+            if name == "IP_NETADDR":
+                NETWORK_SETTINGS['network']= value.strip()
+
+            if name == "IP_BROADCASTADDR":
+                NETWORK_SETTINGS['broadcast']= value.strip()
+
+            if name == "IP_DNS1":
+                NETWORK_SETTINGS['dns1']= value.strip()
+
+            if name == "IP_DNS2":
+                NETWORK_SETTINGS['dns2']= value.strip()
+
+            if name == "HOST_NAME":
+                NETWORK_SETTINGS['hostname']= string.lower(value)
+
+            if name == "DOMAIN_NAME":
+                NETWORK_SETTINGS['domainname']= string.lower(value)
+
+            if name == "NET_DEVICE":
+                NETWORK_SETTINGS['mac']= string.upper(value)
+                
+
+    except IndexError, e:
+        log.write( "Unable to parse configuration file\n" )
+        return 0
+
+    # now if we are set to dhcp, clear out any fields
+    # that don't make sense
+    if NETWORK_SETTINGS["method"] == "dhcp":
+        NETWORK_SETTINGS["ip"]= ""
+        NETWORK_SETTINGS["gateway"]= ""     
+        NETWORK_SETTINGS["netmask"]= ""
+        NETWORK_SETTINGS["network"]= ""
+        NETWORK_SETTINGS["broadcast"]= ""
+        NETWORK_SETTINGS["dns1"]= ""
+        NETWORK_SETTINGS["dns2"]= ""
+
+    log.write("Successfully read and parsed node configuration file.\n" )
+
+    # if the mac wasn't specified, read it in from the system.
+    if NETWORK_SETTINGS["mac"] == "":
+        device= "eth0"
+        mac_addr= utils.get_mac_from_interface(device)
+
+        if mac_addr is None:
+            log.write( "Could not get mac address for device eth0.\n" )
+            return 0
+
+        NETWORK_SETTINGS["mac"]= string.upper(mac_addr)
+
+        log.write( "Got mac address %s for device %s\n" %
+                   (NETWORK_SETTINGS["mac"],device) )
+        
+
+    # now, if the conf file didn't contain a node id, post the mac address
+    # to plc to get the node_id value
+    if vars['NODE_ID'] is None or vars['NODE_ID'] == 0:
+        log.write( "Configuration file does not contain the node_id value.\n" )
+        log.write( "Querying PLC for node_id.\n" )
+
+        bs_request= BootServerRequest.BootServerRequest()
+        
+        postVars= {"mac_addr" : NETWORK_SETTINGS["mac"]}
+        result= bs_request.DownloadFile( "%s/getnodeid.php" %
+                                         SUPPORT_FILE_DIR,
+                                         None, postVars, 1, 1,
+                                         "/tmp/node_id")
+        if result == 0:
+            log.write( "Unable to make request to get node_id.\n" )
+            return 0
+
+        try:
+            node_id_file= file("/tmp/node_id","r")
+            node_id= string.strip(node_id_file.read())
+            node_id_file.close()
+        except IOError:
+            log.write( "Unable to read node_id from /tmp/node_id\n" )
+            return 0
+
+        try:
+            node_id= int(string.strip(node_id))
+        except ValueError:
+            log.write( "Got node_id from PLC, but not numeric: %s" % str(node_id) )
+            return 0
+
+        if node_id == -1:
+            log.write( "Got node_id, but it returned -1\n\n" )
+
+            log.write( "------------------------------------------------------\n" )
+            log.write( "This indicates that this node could not be identified\n" )
+            log.write( "by PLC. You will need to add the node to your site,\n" )
+            log.write( "and regenerate the network configuration file.\n" )
+            log.write( "See the Technical Contact guide for node setup\n" )
+            log.write( "procedures.\n\n" )
+            log.write( "Boot process canceled until this is completed.\n" )
+            log.write( "------------------------------------------------------\n" )
+            
+            cancel_boot_flag= "/tmp/CANCEL_BOOT"
+            # this will make the initial script stop requesting scripts from PLC
+            utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+            return 0
+
+        log.write( "Got node_id from PLC: %s\n" % str(node_id) )
+        vars['NODE_ID']= node_id
+
+
+
+    if vars['NODE_KEY'] is None or vars['NODE_KEY'] == "":
+        log.write( "Configuration file does not contain a node_key value.\n" )
+        log.write( "Using boot nonce instead.\n" )
+
+        # 3.x cds stored the file in /tmp/nonce in ascii form, so they
+        # can be read and used directly. 2.x cds stored in the same place
+        # but in binary form, so we need to convert it to ascii the same
+        # way the old boot scripts did so it matches whats in the db
+        # (php uses bin2hex, 
+        if BOOT_CD_VERSION[0] == 2:
+            read_mode= "rb"
+        else:
+            read_mode= "r"
+            
+        try:
+            nonce_file= file("/tmp/nonce",read_mode)
+            nonce= nonce_file.read()
+            nonce_file.close()
+        except IOError:
+            log.write( "Unable to read nonce from /tmp/nonce\n" )
+            return 0
+
+        if BOOT_CD_VERSION[0] == 2:
+            nonce= nonce.encode('hex')
+
+            # there is this nice bug in the php that currently accepts the
+            # nonce for the old scripts, in that if the nonce contains
+            # null chars (2.x cds sent as binary), then
+            # the nonce is truncated. so, do the same here, truncate the nonce
+            # at the first null ('00'). This could leave us with an empty string.
+            nonce_len= len(nonce)
+            for byte_index in range(0,nonce_len,2):
+                if nonce[byte_index:byte_index+2] == '00':
+                    nonce= nonce[:byte_index]
+                    break
+        else:
+            nonce= string.strip(nonce)
+
+        log.write( "Read nonce, using as key.\n" )
+        vars['NODE_KEY']= nonce
+        
+        
+    # at this point, we've read the network configuration file.
+    # if we were setup using dhcp, get this system's current ip
+    # address and update the vars key ip, because it
+    # is needed for future api calls.
+
+    # at the same time, we can check to make sure that the hostname
+    # in the configuration file matches the ip address. if it fails
+    # notify the owners
+
+    hostname= NETWORK_SETTINGS['hostname'] + "." + \
+              NETWORK_SETTINGS['domainname']
+
+    # set to 0 if any part of the hostname resolution check fails
+    hostname_resolve_ok= 1
+
+    # set to 0 if the above fails, and, we are using dhcp in which
+    # case we don't know the ip of this machine (without having to
+    # parse ifconfig or something). In that case, we won't be able
+    # to make api calls, so printing a message to the screen will
+    # have to suffice.
+    can_make_api_call= 1
+
+    log.write( "Checking that hostname %s resolves\n" % hostname )
+
+    # try a regular dns lookup first
+    try:
+        resolved_node_ip= socket.gethostbyname(hostname)
+    except socket.gaierror, e:
+        hostname_resolve_ok= 0
+        
+
+    if NETWORK_SETTINGS['method'] == "dhcp":
+        if hostname_resolve_ok:
+            NETWORK_SETTINGS['ip']= resolved_node_ip
+            node_ip= resolved_node_ip
+        else:
+            can_make_api_call= 0
+    else:
+        node_ip= NETWORK_SETTINGS['ip']
+
+    # make sure the dns lookup matches what the configuration file says
+    if hostname_resolve_ok:
+        if node_ip != resolved_node_ip:
+            log.write( "Hostname %s does not resolve to %s, but %s:\n" % \
+                       (hostname,node_ip,resolved_node_ip) )
+            hostname_resolve_ok= 0
+        else:
+            log.write( "Hostname %s correctly resolves to %s:\n" %
+                       (hostname,node_ip) )
+
+        
+    vars["NETWORK_SETTINGS"]= NETWORK_SETTINGS
+
+    if not hostname_resolve_ok:
+        log.write( "Hostname does not resolve correctly, will not continue.\n" )
+
+        if can_make_api_call:
+            log.write( "Notifying contacts of problem.\n" )
+
+            vars['BOOT_STATE']= 'dbg'
+            vars['STATE_CHANGE_NOTIFY']= 1
+            vars['STATE_CHANGE_NOTIFY_MESSAGE']= \
+                                     notify_messages.MSG_HOSTNAME_NOT_RESOLVE
+            
+            UpdateBootStateWithPLC.Run( vars, log )
+                    
+        log.write( "\n\n" )
+        log.write( "The hostname and/or ip in the network configuration\n" )
+        log.write( "file do not resolve and match.\n" )
+        log.write( "Please make sure the hostname set in the network\n" )
+        log.write( "configuration file resolves to the ip also specified\n" )
+        log.write( "there.\n\n" )
+        log.write( "Debug mode is being started on this cd. When the above\n" )
+        log.write( "is corrected, reboot the machine to try again.\n" )
+        
+        raise BootManagerException, \
+              "Configured node hostname does not resolve."
+    
+    return 1
diff --git a/source/steps/SendHardwareConfigToPLC.py b/source/steps/SendHardwareConfigToPLC.py
new file mode 100644 (file)
index 0000000..3f9fb9b
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+from Exceptions import *
+
+
+def Run( vars, log ):
+
+    log.write( "\n\nStep: Sending hardware configuration to PLC.\n" )
+
+    log.write( "Not implemented, continuing.\n" )
+    
+    return
diff --git a/source/steps/StartDebug.py b/source/steps/StartDebug.py
new file mode 100644 (file)
index 0000000..6220704
--- /dev/null
@@ -0,0 +1,132 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+
+from Exceptions import *
+import utils
+import compatibility
+
+
+message= \
+"""
+---------------------------------------------------------
+This machine has entered a temporary debug state, so
+Planetlab Support can login and fix any problems that
+might have occurred.
+
+Please do not reboot this machine at this point, unless
+specifically asked to.
+
+Thank you.
+---------------------------------------------------------
+"""
+
+
+def Run( vars, log ):
+    """
+    Bring up sshd inside the boot cd environment for debug purposes.
+
+    Once its running, touch the file /tmp/SSHD_RUNNING so future
+    calls to this function don't do anything.
+
+    Expect the following variables in vars to be set:
+    BM_SOURCE_DIR     The source dir for the boot manager sources that
+                      we are currently running from
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    """
+
+    log.write( "\n\nStep: Starting debug mode.\n" )
+    
+    # make sure we have the variables we need
+    try:
+        BM_SOURCE_DIR= vars["BM_SOURCE_DIR"]
+        if BM_SOURCE_DIR == "":
+            raise ValueError, "BM_SOURCE_DIR"
+
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    log.write( "Starting debug environment\n" )
+
+    ssh_source_files= "%s/debug_files/" % BM_SOURCE_DIR    
+    ssh_dir= "/etc/ssh/"
+    ssh_home= "/root/.ssh"
+    cancel_boot_flag= "/tmp/CANCEL_BOOT"
+    sshd_started_flag= "/tmp/SSHD_RUNNING"
+    
+    sshd_started= 0
+    try:
+        os.stat(sshd_started_flag)
+        sshd_started= 1
+    except OSError, e:
+        pass
+
+    if not sshd_started:
+        log.write( "Creating ssh host keys\n" )
+        
+        utils.makedirs( ssh_dir )
+        utils.sysexec( "ssh-keygen -t rsa1 -b 1024 -f %s/ssh_host_key -N ''" %
+                       ssh_dir, log )
+        utils.sysexec( "ssh-keygen -t rsa -f %s/ssh_host_rsa_key -N ''" %
+                       ssh_dir, log )
+        utils.sysexec( "ssh-keygen -d -f %s/ssh_host_dsa_key -N ''" %
+                       ssh_dir, log )
+
+        if BOOT_CD_VERSION[0] == 3:
+            utils.sysexec( "cp -f %s/sshd_config_v3 %s/sshd_config" %
+                           (ssh_source_files,ssh_dir), log )
+        else:
+            utils.sysexec( "cp -f %s/sshd_config_v2 %s/sshd_config" %
+                           (ssh_source_files,ssh_dir), log )
+    else:
+        log.write( "ssh host keys already created\n" )
+
+
+    # always update the key, may have change in this instance of the bootmanager
+    log.write( "Installing debug ssh key for root user\n" )
+    
+    utils.makedirs( ssh_home )
+    utils.sysexec( "cp -f %s/debug_root_ssh_key %s/authorized_keys" %
+                   (ssh_source_files,ssh_home), log )
+    utils.sysexec( "chmod 700 %s" % ssh_home, log )
+    utils.sysexec( "chmod 600 %s/authorized_keys" % ssh_home, log )
+
+    if not sshd_started:
+        log.write( "Starting sshd\n" )
+        
+        if BOOT_CD_VERSION[0] == 2:
+            utils.sysexec( "/usr/sbin/sshd", log )
+        else:
+            utils.sysexec( "service sshd start", log )
+        
+        # flag that ssh is running
+        utils.sysexec( "touch %s" % sshd_started_flag, log )
+    else:
+        log.write( "sshd already running\n" )
+
+
+    # for ease of use, setup lvm on 2.x cds
+    if BOOT_CD_VERSION[0] == 2:
+        compatibility.setup_lvm_2x_cd(vars,log)
+
+    
+    # this will make the initial script stop requesting scripts from PLC
+    utils.sysexec( "touch %s" % cancel_boot_flag, log )
+
+    print message
+    
+    return
+
diff --git a/source/steps/UpdateBootStateWithPLC.py b/source/steps/UpdateBootStateWithPLC.py
new file mode 100644 (file)
index 0000000..001dcd9
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+from Exceptions import *
+import BootAPI
+import notify_messages
+
+
+def Run( vars, log ):
+    """
+    Change this nodes boot state at PLC.
+
+    The current value of the BOOT_STATE key in vars is used.
+    Optionally, notify the contacts of the boot state change.
+    If this is the case, the following keys/values
+    should be set in vars before calling this step:
+    STATE_CHANGE_NOTIFY= 1
+    STATE_CHANGE_NOTIFY_MESSAGE= "<notify message>"
+    The second value is a message to send the users from notify_messages.py
+
+    Return 1 if succesfull, a BootManagerException otherwise.
+    """
+
+    log.write( "\n\nStep: Updating node boot state at PLC.\n" )
+
+    update_vals= {}
+    update_vals['boot_state']= vars['BOOT_STATE']
+    BootAPI.call_api_function( vars, "BootUpdateNode", (update_vals,) )
+
+    log.write( "Successfully updated boot state for this node at PLC\n" )
+
+
+    notify = vars.get("STATE_CHANGE_NOTIFY",0)
+
+    if notify:
+        message= vars['STATE_CHANGE_NOTIFY_MESSAGE']
+        include_pis= 0
+        include_techs= 1
+        include_support= 0
+
+        sent= 0
+        try:
+            sent= BootAPI.call_api_function( vars, "BootNotifyOwners",
+                                             (message,
+                                              include_pis,
+                                              include_techs,
+                                              include_support) )
+        except BootManagerException, e:
+            log.write( "Call to BootNotifyOwners failed: %s.\n" % e )
+
+        if sent == 0:
+            log.write( "Unable to notify site contacts of state change.\n" )
+
+    return 1
diff --git a/source/steps/UpdateNodeConfiguration.py b/source/steps/UpdateNodeConfiguration.py
new file mode 100644 (file)
index 0000000..ec74bc2
--- /dev/null
@@ -0,0 +1,107 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+import os
+
+import InstallBuildVServer
+from Exceptions import *
+import utils
+
+
+
+def Run( vars, log ):
+    """
+    Reconfigure a node if necessary, including rewriting any network init
+    scripts based on what PLC has. Also, update any slivers on the machine
+    incase their network files are out of date (primarily /etc/hosts).
+
+    Also write out /etc/planetlab/session, a random string that gets
+    a new value at every request of BootGetNodeDetails (ie, every boot)
+
+    This step expects the root to be already mounted on SYSIMG_PATH.
+    
+    Except the following keys to be set:
+    SYSIMG_PATH              the path where the system image will be mounted
+                             (always starts with TEMP_PATH)
+    ROOT_MOUNTED             the node root file system is mounted
+    NETWORK_SETTINGS  A dictionary of the values from the network
+                                configuration file
+    """
+    
+    log.write( "\n\nStep: Updating node configuration.\n" )
+
+    # make sure we have the variables we need
+    try:
+        NETWORK_SETTINGS= vars["NETWORK_SETTINGS"]
+        if NETWORK_SETTINGS == "":
+            raise ValueError, "NETWORK_SETTINGS"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        ROOT_MOUNTED= vars["ROOT_MOUNTED"]
+        if ROOT_MOUNTED == "":
+            raise ValueError, "ROOT_MOUNTED"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    try:
+        ip= NETWORK_SETTINGS['ip']
+        method= NETWORK_SETTINGS['method']
+        hostname= NETWORK_SETTINGS['hostname']
+        domainname= NETWORK_SETTINGS['domainname']
+    except KeyError, var:
+        raise BootManagerException, \
+              "Missing network value %s in var NETWORK_SETTINGS\n" % var
+
+    
+    if not ROOT_MOUNTED:
+        raise BootManagerException, "Root isn't mounted on SYSIMG_PATH\n"
+
+    log.write( "Updating vserver's /etc/hosts and /etc/resolv.conf files\n" )
+
+    # create a list of the full directory paths of all the vserver images that
+    # need to be updated.
+    update_path_list= []
+
+    for base_dir in ('/vservers','/vservers/.vcache'):
+        try:
+            full_dir_path= "%s/%s" % (SYSIMG_PATH,base_dir)
+            slices= os.listdir( full_dir_path )
+
+            try:
+                slices.remove("lost+found")
+            except ValueError, e:
+                pass
+            
+            update_path_list= update_path_list + map(lambda x: \
+                                                     full_dir_path+"/"+x,
+                                                     slices)
+        except OSError, e:
+            continue
+
+
+    log.write( "Updating network configuration in:\n" )
+    if len(update_path_list) == 0:
+        log.write( "No vserver images found to update.\n" )
+    else:
+        for base_dir in update_path_list:
+            log.write( "%s\n" % base_dir )
+
+
+    # now, update /etc/hosts and /etc/resolv.conf in each dir if
+    # the update flag is there
+    for base_dir in update_path_list:
+        InstallBuildVServer.update_vserver_network_files(base_dir,vars,log)
+    
+    return
diff --git a/source/steps/ValidateNodeInstall.py b/source/steps/ValidateNodeInstall.py
new file mode 100644 (file)
index 0000000..30cb0af
--- /dev/null
@@ -0,0 +1,147 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os
+
+from Exceptions import *
+import utils
+import systeminfo
+import compatibility
+import ModelOptions
+
+
+def Run( vars, log ):
+    """
+    See if a node installation is valid. More checks should certainly be
+    done in the future, but for now, make sure that the sym links kernel-boot
+    and initrd-boot exist in /boot
+    
+    Expect the following variables to be set:
+    SYSIMG_PATH              the path where the system image will be mounted
+                             (always starts with TEMP_PATH)
+    BOOT_CD_VERSION          A tuple of the current bootcd version
+    ROOT_MOUNTED             the node root file system is mounted
+    NODE_ID                  The db node_id for this machine
+    PLCONF_DIR               The directory to store the configuration file in
+    
+    Set the following variables upon successfully running:
+    ROOT_MOUNTED             the node root file system is mounted
+    """
+
+    log.write( "\n\nStep: Validating node installation.\n" )
+
+    # make sure we have the variables we need
+    try:
+        BOOT_CD_VERSION= vars["BOOT_CD_VERSION"]
+        if BOOT_CD_VERSION == "":
+            raise ValueError, "BOOT_CD_VERSION"
+
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NODE_ID= vars["NODE_ID"]
+        if NODE_ID == "":
+            raise ValueError, "NODE_ID"
+
+        PLCONF_DIR= vars["PLCONF_DIR"]
+        if PLCONF_DIR == "":
+            raise ValueError, "PLCONF_DIR"
+        
+        NODE_MODEL_OPTIONS= vars["NODE_MODEL_OPTIONS"]
+
+        PARTITIONS= vars["PARTITIONS"]
+        if PARTITIONS == None:
+            raise ValueError, "PARTITIONS"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    ROOT_MOUNTED= 0
+    if 'ROOT_MOUNTED' in vars.keys():
+        ROOT_MOUNTED= vars['ROOT_MOUNTED']
+
+    # mount the root system image if we haven't already.
+    # capture BootManagerExceptions during the vgscan/change and mount
+    # calls, so we can return 0 instead
+    if ROOT_MOUNTED == 0:
+        # old cds need extra utilities to run lvm
+        if BOOT_CD_VERSION[0] == 2:
+            compatibility.setup_lvm_2x_cd( vars, log )
+            
+        # simply creating an instance of this class and listing the system
+        # block devices will make them show up so vgscan can find the planetlab
+        # volume group
+        systeminfo.get_block_device_list(vars, log)
+
+        try:
+            utils.sysexec( "vgscan", log )
+            utils.sysexec( "vgchange -ay planetlab", log )
+        except BootManagerException, e:
+            log.write( "BootManagerException during vgscan/vgchange: %s\n" %
+                       str(e) )
+            return 0
+            
+        utils.makedirs( SYSIMG_PATH )
+
+        try:
+            utils.sysexec("mount %s %s" % (PARTITIONS["root"],SYSIMG_PATH),log)
+            utils.sysexec("mount %s %s/vservers" % \
+                          (PARTITIONS["vservers"], SYSIMG_PATH), log)
+            utils.sysexec( "mount -t proc none %s/proc" % SYSIMG_PATH, log )
+        except BootManagerException, e:
+            log.write( "BootManagerException during vgscan/vgchange: %s\n" %
+                       str(e) )
+            return 0
+
+        ROOT_MOUNTED= 1
+        vars['ROOT_MOUNTED']= 1
+        
+    
+    # check if the base kernel is installed
+    try:
+        os.stat("%s/boot/kernel-boot" % SYSIMG_PATH)
+        os.stat("%s/boot/initrd-boot" % SYSIMG_PATH)
+    except OSError, e:            
+        log.write( "FATAL: Couldn't locate base kernel.\n")                
+        return 0
+
+    # check if the model specified kernel is installed
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+        try:
+            os.stat("%s/boot/kernel-boot%s" % (SYSIMG_PATH,option))
+            os.stat("%s/boot/initrd-boot%s" % (SYSIMG_PATH,option))
+        except OSError, e:
+            # smp kernel is not there; remove option from modeloptions
+            # such that the rest of the code base thinks we are just
+            # using the base kernel.
+            NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP
+            vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS
+            log.write( "WARNING: Couldn't locate smp kernel.\n")
+            
+    # write out the node id to /etc/planetlab/node_id. if this fails, return
+    # 0, indicating the node isn't a valid install.
+    try:
+        node_id_file_path= "%s/%s/node_id" % (SYSIMG_PATH,PLCONF_DIR)
+        node_id_file= file( node_id_file_path, "w" )
+        node_id_file.write( str(NODE_ID) )
+        node_id_file.close()
+        node_id_file= None
+        log.write( "Updated /etc/planetlab/node_id\n" )
+    except IOError, e:
+        log.write( "Unable to write out /etc/planetlab/node_id\n" )
+        return 0
+
+    log.write( "Everything appears to be ok\n" )
+    
+    return 1
diff --git a/source/steps/WriteModprobeConfig.py b/source/steps/WriteModprobeConfig.py
new file mode 100644 (file)
index 0000000..d50ef4d
--- /dev/null
@@ -0,0 +1,101 @@
+#!/usr/bin/python2 -u
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+import os, string
+
+from Exceptions import *
+import utils
+import systeminfo
+import BootAPI
+import ModelOptions
+import notify_messages
+
+def Run( vars, log, filename = "/etc/modprobe.conf"):
+    """
+    write out the system file /etc/modprobe.conf with the current
+    set of modules.
+
+    returns a tuple of the number of network driver lines and storage
+    driver lines written as (networkcount,storagecount)
+    """
+
+    # write out the modprobe.conf file for the system. make sure
+    # the order of the ethernet devices are listed in the same order
+    # as the boot cd loaded the modules. this is found in /tmp/loadedmodules
+    # ultimately, the order will only match the boot cd order if
+    # the kernel modules have the same name - which should be true for the later
+    # version boot cds because they use the same kernel version.
+    # older boot cds use a 2.4.19 kernel, and its possible some of the network
+    # module names have changed, in which case the system might not boot
+    # if the network modules are activated in a different order that the
+    # boot cd.
+
+    # make sure we have this class loaded
+    
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    sysmods= systeminfo.get_system_modules(vars, log)
+    if sysmods is None:
+        raise BootManagerException, "Unable to get list of system modules."
+        
+    eth_count= 0
+    scsi_count= 0
+
+    modulesconf_file= file("%s/%s" % (SYSIMG_PATH,filename), "w" )
+
+    for type in sysmods:
+        if type == systeminfo.MODULE_CLASS_SCSI:
+            for a_mod in sysmods[type]:
+                if scsi_count == 0:
+                    modulesconf_file.write( "alias scsi_hostadapter %s\n" %
+                                            a_mod )
+                else:
+                    modulesconf_file.write( "alias scsi_hostadapter%d %s\n" %
+                                            (scsi_count,a_mod) )
+                scsi_count= scsi_count + 1
+
+        elif type == systeminfo.MODULE_CLASS_NETWORK:
+            for a_mod in sysmods[type]:
+                modulesconf_file.write( "alias eth%d %s\n" %
+                                        (eth_count,a_mod) )
+                eth_count= eth_count + 1
+
+    modulesconf_file.close()
+    modulesconf_file= None
+
+    # dump the modprobe.conf file to the log (not to screen)
+    log.write( "Contents of new modprobe.conf file:\n" )
+    modulesconf_file= file("%s/%s" % (SYSIMG_PATH,filename), "r" )
+    contents= modulesconf_file.read()
+    log.write( contents + "\n" )
+    modulesconf_file.close()
+    modulesconf_file= None
+    log.write( "End contents of new modprobe.conf file.\n" )
+
+    # before we do the real kexec, check to see if we had any
+    # network drivers written to modprobe.conf. if not, return -1,
+    # which will cause this node to be switched to a debug state.
+    if eth_count == 0:
+        log.write( "\nIt appears we don't have any network drivers. Aborting.\n" )
+        
+        vars['BOOT_STATE']= 'dbg'
+        vars['STATE_CHANGE_NOTIFY']= 1
+        vars['STATE_CHANGE_NOTIFY_MESSAGE']= \
+             notify_messages.MSG_NO_DETECTED_NETWORK
+        raise BootManagerException, \
+              notify_messages.MSG_NO_DETECTED_NETWORK
+
+
diff --git a/source/steps/WriteNetworkConfig.py b/source/steps/WriteNetworkConfig.py
new file mode 100644 (file)
index 0000000..2e6867d
--- /dev/null
@@ -0,0 +1,141 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, string
+
+from Exceptions import *
+import utils
+import BootAPI
+import ModelOptions
+
+def Run( vars, log ):
+    """
+    Write out the network configuration for this machine:
+    /etc/hosts
+    /etc/sysconfig/network-scripts/ifcfg-eth0
+    /etc/resolv.conf (if applicable)
+    /etc/sysconfig/network
+
+    It is assumed the caller mounted the root partition and the vserver partition
+    starting on SYSIMG_PATH - it is not checked here.
+
+    The values to be used for the network settings are to be set in vars
+    in the variable 'NETWORK_SETTINGS', which is a dictionary
+    with keys:
+
+     Key               Used by this function
+     -----------------------------------------------
+     node_id
+     node_key
+     method            x
+     ip                x
+     mac               x (optional)
+     gateway           x
+     network           x
+     broadcast         x
+     netmask           x
+     dns1              x
+     dns2              x (optional)
+     hostname          x
+     domainname        x
+
+    Expect the following variables from the store:
+    SYSIMG_PATH             the path where the system image will be mounted
+                            (always starts with TEMP_PATH)
+    NETWORK_SETTINGS  A dictionary of the values from the network
+                                configuration file
+    Sets the following variables:
+    None
+    """
+
+    log.write( "\n\nStep: Install: Writing Network Configuration files.\n" )
+
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+
+    try:
+        network_settings= vars['NETWORK_SETTINGS']
+    except KeyError, e:
+        raise BootManagerException, "No network settings found in vars."
+
+    try:
+        hostname= network_settings['hostname']
+        domainname= network_settings['domainname']
+        method= network_settings['method']
+        ip= network_settings['ip']
+        gateway= network_settings['gateway']
+        network= network_settings['network']
+        netmask= network_settings['netmask']
+        dns1= network_settings['dns1']
+        mac= network_settings['mac']
+    except KeyError, e:
+        raise BootManagerException, "Missing value %s in network settings." % str(e)
+
+    try:
+        dns2= ''
+        dns2= network_settings['dns2']
+    except KeyError, e:
+        pass
+
+        
+    log.write( "Writing /etc/hosts\n" )
+    hosts_file= file("%s/etc/hosts" % SYSIMG_PATH, "w" )    
+    hosts_file.write( "127.0.0.1       localhost\n" )
+    if method == "static":
+        hosts_file.write( "%s %s.%s\n" % (ip, hostname, domainname) )
+    hosts_file.close()
+    hosts_file= None
+    
+
+    log.write( "Writing /etc/sysconfig/network-scripts/ifcfg-eth0\n" )
+    eth0_file= file("%s/etc/sysconfig/network-scripts/ifcfg-eth0" %
+                    SYSIMG_PATH, "w" )
+    eth0_file.write( "DEVICE=eth0\n" )
+    if method == "static":
+        eth0_file.write( "BOOTPROTO=static\n" )
+        eth0_file.write( "IPADDR=%s\n" % ip )
+        eth0_file.write( "NETMASK=%s\n" % netmask )
+        eth0_file.write( "GATEWAY=%s\n" % gateway )
+    else:
+        eth0_file.write( "BOOTPROTO=dhcp\n" )
+        eth0_file.write( "DHCP_HOSTNAME=%s\n" % hostname )
+    if mac != "":
+        eth0_file.write( "HWADDR=%s\n" % mac )
+    eth0_file.write( "ONBOOT=yes\n" )
+    eth0_file.write( "USERCTL=no\n" )
+    eth0_file.close()
+    eth0_file= None
+
+    if method == "static":
+        log.write( "Writing /etc/resolv.conf\n" )
+        resolv_file= file("%s/etc/resolv.conf" % SYSIMG_PATH, "w" )
+        if dns1 != "":
+            resolv_file.write( "nameserver %s\n" % dns1 )
+        if dns2 != "":
+            resolv_file.write( "nameserver %s\n" % dns2 )
+        resolv_file.write( "search %s\n" % domainname )
+        resolv_file.close()
+        resolv_file= None
+
+    log.write( "Writing /etc/sysconfig/network\n" )
+    network_file= file("%s/etc/sysconfig/network" % SYSIMG_PATH, "w" )
+    network_file.write( "NETWORKING=yes\n" )
+    network_file.write( "HOSTNAME=%s.%s\n" % (hostname, domainname) )
+    if method == "static":
+        network_file.write( "GATEWAY=%s\n" % gateway )
+    network_file.close()
+    network_file= None
diff --git a/source/steps/__init__.py b/source/steps/__init__.py
new file mode 100644 (file)
index 0000000..16550f3
--- /dev/null
@@ -0,0 +1,36 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+
+
+"""
+This directory contains individual step classes
+"""
+
+__all__ = ["ReadNodeConfiguration",
+           "AuthenticateWithPLC",
+           "GetAndUpdateNodeDetails",
+           "ConfirmInstallWithUser",
+           "UpdateBootStateWithPLC",
+           "CheckHardwareRequirements",
+           "SendHardwareConfigToPLC",
+           "InitializeBootManager",
+           "UpdateNodeConfiguration",
+           "CheckForNewDisks",
+           "ChainBootNode",
+           "ValidateNodeInstall",
+           "StartDebug",
+           "InstallBootstrapRPM",
+           "InstallBuildVServer",
+           "InstallInit",
+           "InstallNodeInit",
+           "InstallPartitionDisks",
+           "InstallUninitHardware",
+           "InstallWriteConfig",
+           "MakeInitrd",
+           "WriteNetworkConfig",
+           "WriteModprobeConfig"]
diff --git a/source/systeminfo.py b/source/systeminfo.py
new file mode 100755 (executable)
index 0000000..966c149
--- /dev/null
@@ -0,0 +1,451 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+
+#----------------------------------------------------
+#major minor  #blocks  name
+#
+#3     0   40017915 hda
+#3     1     208813 hda1
+#3     2   20482875 hda2
+#3     3     522112 hda3
+#3     4   18804082 hda4
+#----------------------------------------------------
+
+
+import string
+import sys
+import os
+import popen2
+import merge_hw_tables
+import re
+import errno
+import ModelOptions
+from Exceptions import *
+
+hwdatapath = "usr/share/hwdata"
+"""
+a utility class for finding and returning information about
+block devices, memory, and other hardware on the system
+"""
+
+PROC_MEMINFO_PATH= "/proc/meminfo"
+PROC_PARTITIONS_PATH= "/proc/partitions"
+
+# set when the sfdisk -l <dev> trick has been done to make
+# all devices show up
+DEVICES_SCANNED_FLAG= "/tmp/devices_scanned"
+
+# a /proc/partitions block is 1024 bytes
+# a GB to a HDD manufacturer is 10^9 bytes
+BLOCKS_PER_GB = pow(10, 9) / 1024.0;
+
+
+# -n is numeric ids (no lookup), -m is machine readable
+LSPCI_CMD= "/sbin/lspci -nm"
+
+MODULE_CLASS_NETWORK= "network"
+MODULE_CLASS_SCSI= "scsi"
+
+PCI_CLASS_NETWORK_ETHERNET=0x0200L
+PCI_CLASS_STORAGE_SCSI=0x0100L
+PCI_CLASS_STORAGE_IDE=0x0101L
+PCI_CLASS_STORAGE_FLOPPY=0x0102L
+PCI_CLASS_STORAGE_IPI=0x0103L
+PCI_CLASS_STORAGE_RAID=0x0104L
+PCI_CLASS_STORAGE_OTHER=0x0180L
+
+PCI_ANY=0xffffffffL
+
+def get_total_phsyical_mem(vars = {}, log = sys.stderr):
+    """
+    return the total physical memory of the machine, in kilobytes.
+
+    Return None if /proc/meminfo not readable.
+    """
+
+    try:
+        meminfo_file= file(PROC_MEMINFO_PATH,"r")
+    except IOError, e:
+        return
+
+    total_memory= None
+
+    for line in meminfo_file:
+
+        try:
+            (fieldname,value)= string.split(line,":")
+        except ValueError, e:
+            # this will happen for lines that don't have two values
+            # (like the first line on 2.4 kernels)
+            continue
+
+        fieldname= string.strip(fieldname)
+        value= string.strip(value)
+        
+        if fieldname == "MemTotal":
+            try:
+                (total_memory,units)= string.split(value)
+            except ValueError, e:
+                return
+
+            if total_memory == "" or total_memory == None or \
+                   units == "" or units == None:
+                return
+
+            if string.lower(units) != "kb":
+                return
+
+            try:
+                total_memory= int(total_memory)
+            except ValueError, e:
+                return
+
+            break
+
+    meminfo_file.close()
+    return total_memory
+
+def get_block_device_list(vars = {}, log = sys.stderr):
+    """
+    get a list of block devices from this system.
+    return an associative array, where the device name
+    (full /dev/device path) is the key, and the value
+    is a tuple of (major,minor,numblocks,gb_size,readonly)
+    """
+
+    # make sure we can access to the files/directories in /proc
+    if not os.access(PROC_PARTITIONS_PATH, os.F_OK):
+        return None
+
+    # table with valid scsi/sata/ide/raid block device names
+    valid_blk_names = {}
+    # add in valid sd and hd block device names
+    for blk_prefix in ('sd','hd'):
+        for blk_num in map ( \
+            lambda x: chr(x), range(ord('a'),ord('z')+1)):
+            devicename="%s%c" % (blk_prefix, blk_num)
+            valid_blk_names[devicename]=None
+
+    # add in valid scsi raid block device names
+    for M in range(0,1+1):
+        for N in range(0,7+1):
+            devicename = "cciss/c%dd%d" % (M,N)
+            valid_blk_names[devicename]=None
+
+    # only do this once every system boot
+    if not os.access(DEVICES_SCANNED_FLAG, os.R_OK):
+
+        # this is ugly. under devfs, device
+        # entries in /dev/scsi/.. and /dev/ide/...
+        # don't show up until you attempt to read
+        # from the associated device at /dev (/dev/sda).
+        # so, lets run sfdisk -l (list partitions) against
+        # most possible block devices, that way they show
+        # up when it comes time to do the install.
+        devicenames = valid_blk_names.keys()
+        devicenames.sort()
+        for devicename in devicenames:
+            os.system( "sfdisk -l /dev/%s > /dev/null 2>&1" % devicename )
+
+        # touch file
+        fb = open(DEVICES_SCANNED_FLAG,"w")
+        fb.close()
+
+    devicelist= {}
+
+    partitions_file= file(PROC_PARTITIONS_PATH,"r")
+    line_count= 0
+    for line in partitions_file:
+        line_count= line_count + 1
+
+        # skip the first two lines always
+        if line_count < 2:
+            continue
+
+        parts= string.split(line)
+
+        if len(parts) < 4:
+            continue
+
+        device= parts[3]
+
+        # skip and ignore any partitions
+        if not valid_blk_names.has_key(device):
+            continue
+
+        try:
+            major= int(parts[0])
+            minor= int(parts[1])
+            blocks= int(parts[2])
+        except ValueError, err:
+            continue
+
+        gb_size= blocks/BLOCKS_PER_GB
+
+        # check to see if the blk device is readonly
+        try:
+            # can we write to it?
+            dev_name= "/dev/%s" % device
+            fb = open(dev_name,"w")
+            fb.close()
+            readonly=False
+        except IOError, e:
+            # check if EROFS errno
+            if errno.errorcode.get(e.errno,None) == 'EROFS':
+                readonly=True
+            else:
+                # got some other errno, pretend device is readonly
+                readonly=True
+
+        devicelist[dev_name]= (major,minor,blocks,gb_size,readonly)
+
+    return devicelist
+
+
+def get_system_modules( vars = {}, log = sys.stderr):
+    """
+    Return a list of kernel modules that this system requires.
+    This requires access to the installed system's root
+    directory, as the following files must exist and are used:
+    <install_root>/usr/share/hwdata/pcitable
+    <install_root>/lib/modules/(first entry if kernel_version unspecified)/modules.pcimap
+    <install_root>/lib/modules/(first entry if kernel version unspecified)/modules.dep
+
+    If there are more than one kernels installed, and the kernel
+    version is not specified, then only the first one in
+    /lib/modules is used.
+
+    Returns a dictionary, keys being the type of module:
+        - scsi       MODULE_CLASS_SCSI
+        - network    MODULE_CLASS_NETWORK
+    The value being the kernel module name to load.
+
+    Some sata devices show up under an IDE device class,
+    hence the reason for checking for ide devices as well.
+    If there actually is a match in the pci -> module lookup
+    table, and its an ide device, its most likely sata,
+    as ide modules are built in to the kernel.
+    """
+
+    if not vars.has_key("SYSIMG_PATH"):
+        vars["SYSIMG_PATH"]="/"
+    SYSIMG_PATH=vars["SYSIMG_PATH"]
+
+    if not vars.has_key("NODE_MODEL_OPTIONS"):
+        vars["NODE_MODEL_OPTIONS"] = 0;
+
+    initrd, kernel_version = getKernelVersion(vars, log)
+
+    # get the kernel version we are assuming
+    if kernel_version is None:
+        try:
+            kernel_version= os.listdir( "%s/lib/modules/" % SYSIMG_PATH )
+        except OSError, e:
+            return
+
+        if len(kernel_version) == 0:
+            return
+
+        if len(kernel_version) > 1:
+            print( "WARNING: We may be returning modules for the wrong kernel." )
+
+        kernel_version= kernel_version[0]
+
+    print( "Using kernel version %s" % kernel_version )
+
+    # test to make sure the three files we need are present
+    pcitable_path = "%s/%s/pcitable" % (SYSIMG_PATH,hwdatapath)
+    modules_pcimap_path = "%s/lib/modules/%s/modules.pcimap" % \
+                          (SYSIMG_PATH,kernel_version)
+    modules_dep_path = "%s/lib/modules/%s/modules.dep" % \
+                       (SYSIMG_PATH,kernel_version)
+
+    for path in (pcitable_path,modules_pcimap_path,modules_dep_path):
+        if not os.access(path,os.R_OK):
+            print( "Unable to read %s" % path )
+            return
+
+    # now, with those three files, merge them all into one easy to
+    # use lookup table
+    (all_pci_ids, all_modules) = merge_hw_tables.merge_files( modules_dep_path,
+                                                              modules_pcimap_path,
+                                                              pcitable_path )
+    if all_modules is None:
+        print( "Unable to merge pci id tables." )
+        return
+
+    # this is the actual data structure we return
+    system_mods= {}
+
+    # these are the lists that will be in system_mods
+    network_mods= []
+    scsi_mods= []
+
+
+    # get all the system devices from lspci
+    lspci_prog= popen2.Popen3( LSPCI_CMD, 1 )
+    if lspci_prog is None:
+        print( "Unable to run %s with popen2.Popen3" % LSPCI_CMD )
+        return
+
+    returncode= lspci_prog.wait()
+    if returncode != 0:
+        print( "Running %s failed" % LSPCI_CMD )
+        return
+    else:
+        print( "Successfully ran %s" % LSPCI_CMD )
+
+    # for every lspci line, parse in the four tuple PCI id and the
+    # search for the corresponding driver from the dictionary
+    # generated by merge_hw_tables
+    for line in lspci_prog.fromchild:
+        # A sample line:
+        #
+        # 00:1f.1 "Class 0101" "8086" "2411" -r02 -p80 "8086" "2411"
+        #
+        # Remove '"', 'Class ', and anything beginning with '-'
+        # (usually revisions and prog-if flags) so that we can
+        # split on whitespace:
+        #
+        # 00:1f.1 0101 8086 2411 8086 2411
+        #
+        line = line.strip()
+        line = line.replace('"', '')
+        line = line.replace('Class ', '')
+        line = re.sub('-[^ ]*', '', line)
+
+        parts = line.split()
+        try:
+            if len(parts) < 4:
+                raise
+            classid = long(parts[1], 16)
+            vendorid = long(parts[2], 16)
+            deviceid = long(parts[3], 16)
+        except:
+            print "Invalid line:", line
+            continue
+
+        if classid not in (PCI_CLASS_NETWORK_ETHERNET,
+                           PCI_CLASS_STORAGE_SCSI,
+                           PCI_CLASS_STORAGE_RAID,
+                           PCI_CLASS_STORAGE_OTHER,
+                           PCI_CLASS_STORAGE_IDE):
+            continue
+
+        # Device may have a subvendorid and subdeviceid
+        try:
+            subvendorid = long(parts[4], 16)
+            subdeviceid = long(parts[5], 16)
+        except:
+            subvendorid = PCI_ANY
+            subdeviceid = PCI_ANY
+
+        # search for driver that most closely matches the full_id
+        # to drivers that can handle any subvendor/subdevice
+        # version of the hardware.
+        full_ids = ((vendorid,deviceid,subvendorid,subdeviceid),
+                    (vendorid,deviceid,subvendorid,PCI_ANY),
+                    (vendorid,deviceid,PCI_ANY,PCI_ANY))
+
+        for full_id in full_ids:
+            module = all_pci_ids.get(full_id, None)
+            if module is not None:
+                if classid == PCI_CLASS_NETWORK_ETHERNET:
+                    network_mods.append(module[0])
+                elif classid in (PCI_CLASS_STORAGE_SCSI,
+                                 PCI_CLASS_STORAGE_RAID,
+                                 PCI_CLASS_STORAGE_OTHER,
+                                 PCI_CLASS_STORAGE_IDE):
+                    scsi_mods.append(module[0])
+                else:
+                    print "not network or scsi: 0x%x" % classid
+                break
+
+    system_mods[MODULE_CLASS_SCSI]= scsi_mods
+    system_mods[MODULE_CLASS_NETWORK]= network_mods
+
+    return system_mods
+
+
+def getKernelVersion( vars = {} , log = sys.stderr):
+    # make sure we have the variables we need
+    try:
+        SYSIMG_PATH= vars["SYSIMG_PATH"]
+        if SYSIMG_PATH == "":
+            raise ValueError, "SYSIMG_PATH"
+
+        NODE_MODEL_OPTIONS=vars["NODE_MODEL_OPTIONS"]
+    except KeyError, var:
+        raise BootManagerException, "Missing variable in vars: %s\n" % var
+    except ValueError, var:
+        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var
+
+    option = ''
+    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
+        option = 'smp'
+        try:
+            os.stat("%s/boot/kernel-boot%s" % (SYSIMG_PATH,option))
+            os.stat("%s/boot/initrd-boot%s" % (SYSIMG_PATH,option))
+        except OSError, e:
+            # smp kernel is not there; remove option from modeloptions
+            # such that the rest of the code base thinks we are just
+            # using the base kernel.
+            NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP
+            vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS
+            log.write( "WARNING: Couldn't locate smp kernel.\n")
+            option = ''
+    try:
+        initrd= os.readlink( "%s/boot/initrd-boot%s" % (SYSIMG_PATH,option) )
+        kernel_version= initrd.replace("initrd-", "").replace(".img", "")    
+    except OSError, e:
+        initrd = None
+        kernel_version = None
+        
+    return (initrd, kernel_version)
+
+
+if __name__ == "__main__":
+    devices= get_block_device_list()
+    print "block devices detected:"
+    if not devices:
+        print "no devices found!"
+    else:
+        for dev in devices.keys():
+            print "%s %s" % (dev, repr(devices[dev]))
+            
+
+    print ""
+    memory= get_total_phsyical_mem()
+    if not memory:
+        print "unable to read /proc/meminfo for memory"
+    else:
+        print "total physical memory: %d kb" % memory
+        
+
+    print ""
+
+    kernel_version = None
+    if len(sys.argv) > 2:
+        kernel_version = sys.argv[1]
+        
+    modules= get_system_modules()
+    if not modules:
+        print "unable to list system modules"
+    else:
+        for type in modules:
+            if type == MODULE_CLASS_SCSI:
+                print( "all scsi modules:" )
+                for a_mod in modules[type]:
+                    print a_mod
+            elif type == MODULE_CLASS_NETWORK:
+                print( "all network modules:" )
+                for a_mod in modules[type]:
+                    print a_mod
+                
diff --git a/source/utils.py b/source/utils.py
new file mode 100644 (file)
index 0000000..9efea4d
--- /dev/null
@@ -0,0 +1,160 @@
+#!/usr/bin/python2
+
+# Copyright (c) 2003 Intel Corporation
+# All rights reserved.
+#
+# Copyright (c) 2004-2006 The Trustees of Princeton University
+# All rights reserved.
+# expected /proc/partitions format
+
+import os, sys, shutil
+import popen2
+import socket
+import fcntl
+import string
+import exceptions
+
+from Exceptions import *
+
+
+def makedirs( path ):
+    """
+    from python docs for os.makedirs:
+    Throws an error exception if the leaf directory
+    already exists or cannot be created.
+
+    That is real useful. Instead, we'll create the directory, then use a
+    separate function to test for its existance.
+
+    Return 1 if the directory exists and/or has been created, a BootManagerException
+    otherwise. Does not test the writability of said directory.
+    """
+    try:
+        os.makedirs( path )
+    except OSError:
+        pass
+    try:
+        os.listdir( path )
+    except OSError:
+        raise BootManagerException, "Unable to create directory tree: %s" % path
+    
+    return 1
+
+
+
+def removedir( path ):
+    """
+    remove a directory tree, return 1 if successful, a BootManagerException
+    if failure.
+    """
+    try:
+        os.listdir( path )
+    except OSError:
+        return 1
+
+    try:
+        shutil.rmtree( path )
+    except OSError, desc:
+        raise BootManagerException, "Unable to remove directory tree: %s" % path
+    
+    return 1
+
+
+
+def sysexec( cmd, log= None ):
+    """
+    execute a system command, output the results to the logger
+    if log <> None
+
+    return 1 if command completed (return code of non-zero),
+    0 if failed. A BootManagerException is raised if the command
+    was unable to execute or was interrupted by the user with Ctrl+C
+    """
+    prog= popen2.Popen4( cmd, 0 )
+    if prog is None:
+        raise BootManagerException, \
+              "Unable to create instance of popen2.Popen3 " \
+              "for command: %s" % cmd
+
+    if log is not None:
+        try:
+            for line in prog.fromchild:
+                log.write( line )
+        except KeyboardInterrupt:
+            raise BootManagerException, "Interrupted by user"
+
+    returncode= prog.wait()
+    if returncode != 0:
+        raise BootManagerException, "Running %s failed (rc=%d)" % (cmd,returncode)
+
+    prog= None
+    return 1
+
+
+def sysexec_noerr( cmd, log= None ):
+    """
+    same as sysexec, but capture boot manager exceptions
+    """
+    try:
+        rc= 0
+        rc= sysexec( cmd, log )
+    except BootManagerException, e:
+        pass
+
+    return rc
+
+
+
+def chdir( dir ):
+    """
+    change to a directory, return 1 if successful, a BootManagerException if failure
+    """
+    try:
+        os.chdir( dir )
+    except OSError:
+        raise BootManagerException, "Unable to change to directory: %s" % dir
+
+    return 1
+
+
+
+def removefile( filepath ):
+    """
+    removes a file, return 1 if successful, 0 if failure
+    """
+    try:
+        os.remove( filepath )
+    except OSError:
+        raise BootManagerException, "Unable to remove file: %s" % filepath
+
+    return 1
+
+
+
+# from: http://forums.devshed.com/archive/t-51149/
+#              Ethernet-card-address-Through-Python-or-C
+
+def hexy(n):
+    return "%02x" % (ord(n))
+
+def get_mac_from_interface(ifname):
+    """
+    given a device name, like eth0, return its mac_address.
+    return None if the device doesn't exist.
+    """
+    
+    SIOCGIFHWADDR = 0x8927 # magic number
+
+    s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
+    ifname = string.strip(ifname)
+    ifr = ifname + '\0'*(32-len(ifname))
+
+    try:
+        r= fcntl.ioctl(s.fileno(),SIOCGIFHWADDR,ifr)
+        addr = map(hexy,r[18:24])
+        ret = (':'.join(map(str, addr)))
+    except IOError, e:
+        ret = None
+        
+    return ret
+
diff --git a/support-files/Makefile b/support-files/Makefile
new file mode 100644 (file)
index 0000000..43d6c3e
--- /dev/null
@@ -0,0 +1,19 @@
+#
+# Build bootstrap tarballs. The alpina-* files are deprecated and are
+# only included for historical purposes. Version 2 BootCDs still
+# require them; Version 3 BootCDs come with the tools pre-installed.
+#
+# Aaron Klingaman <alk@absarokasoft.com>
+# Mark Huang <mlhuang@cs.princeton.edu>
+# Copyright (C) 2005 The Trustees of Princeton University
+#
+# $Id: Makefile,v 1.4 2006/08/20 21:21:13 thierry Exp $
+# 
+
+all:   PlanetLab-Bootstrap.tar.bz2
+
+PlanetLab-Bootstrap.tar.bz2:
+       ./buildnode.sh
+
+clean:
+       rm -f PlanetLab-Bootstrap.tar.bz2
diff --git a/support-files/buildnode.sh b/support-files/buildnode.sh
new file mode 100755 (executable)
index 0000000..3a36962
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# Build PlanetLab-Bootstrap.tar.bz2, the reference image for PlanetLab
+# nodes.
+#
+# Mark Huang <mlhuang@cs.princeton.edu>
+# Copyright (C) 2005-2006 The Trustees of Princeton University
+#
+# $Id: buildnode.sh,v 1.12 2006/08/11 13:04:03 thierry Exp $
+#
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin
+
+# In both a normal CVS environment and a PlanetLab RPM
+# build environment, all of our dependencies are checked out into
+# directories at the same level as us.
+if [ -d ../../build ] ; then
+    PATH=$PATH:../../build
+    srcdir=../..
+else
+    echo "Error: Could not find $(cd ../.. && pwd -P)/build/"
+    exit 1
+fi
+
+export PATH
+
+# Release and architecture to install
+releasever=4
+basearch=i386
+
+usage()
+{
+    echo "Usage: build.sh [OPTION]..."
+    echo "     -r release      Fedora release number (default: $releasever)"
+    echo "     -a arch         Fedora architecture (default: $basearch)"
+    echo "     -h              This message"
+    exit 1
+}
+
+# Get options
+while getopts "r:a:h" opt ; do
+    case $opt in
+       r)
+           releasever=$OPTARG
+           ;;
+       a)
+           basearch=$OPTARG
+           ;;
+       h|*)
+           usage
+           ;;
+    esac
+done
+
+# Do not tolerate errors
+set -e
+
+VROOT=$PWD/PlanetLab-Bootstrap
+install -d -m 755 $VROOT
+
+# Some of the PlanetLab RPMs attempt to (re)start themselves in %post,
+# unless the installation is running inside the BootCD environment. We
+# would like to pretend that we are.
+export PL_BOOTCD=1
+
+# Install the "PlanetLab" group. This requires that the PlanetLab
+# build system install the appropriate yumgroups.xml file (currently
+# build/groups/v3_yumgroups.xml) in $RPM_BUILD_DIR/../RPMS/ and that
+# mkfedora runs either yum-arch or createrepo on that directory. dev
+# is specified explicitly because of a stupid bug in its %post script
+# that causes its installation to fail; see the mkfedora script for a
+# full explanation. coreutils and python are specified explicitly
+# because groupinstall does not honor Requires(pre) dependencies
+# properly, most %pre scripts require coreutils to be installed first,
+# and some of our %post scripts require python.
+mkfedora -v -r $releasever -a $basearch -k -p udev -p coreutils -p python -g PlanetLab $VROOT
+
+# Disable unnecessary services
+echo "* Disabling unnecessary services"
+for service in netfs rawdevices cpuspeed smartd ; do
+    if [ -x $VROOT/etc/init.d/$service ] ; then
+       /usr/sbin/chroot $VROOT /sbin/chkconfig $service off
+    fi
+done
+
+# Build tarball
+echo "* Building bootstrap tarball"
+tar -cpjf PlanetLab-Bootstrap.tar.bz2 -C $VROOT .
+
+exit 0
diff --git a/support-files/desc b/support-files/desc
new file mode 100644 (file)
index 0000000..c19c2a3
--- /dev/null
@@ -0,0 +1 @@
+The BuildSupport directory is used to build tarballs of files necessary during the install, including files to bootstrap RPM, handle booting off of lvm volumes, and partitioning disks.
diff --git a/support-files/uudecode.gz b/support-files/uudecode.gz
new file mode 100755 (executable)
index 0000000..040c34f
Binary files /dev/null and b/support-files/uudecode.gz differ