From: Mark Huang Date: Mon, 21 Aug 2006 21:21:12 +0000 (+0000) Subject: merge to HEAD as of 2006-08-21 X-Git-Tag: myplc-0_4-rc3~2 X-Git-Url: http://git.onelab.eu/?p=myplc.git;a=commitdiff_plain;h=13c17a98f3919a7fb214199db2b8cd0a1d87230b merge to HEAD as of 2006-08-21 --- diff --git a/build.functions b/build.functions index edc6b95..5b7ad07 100644 --- a/build.functions +++ b/build.functions @@ -6,7 +6,7 @@ # Mark Huang # Copyright (C) 2006 The Trustees of Princeton University # -# $Id: build.functions,v 1.2 2006/07/24 19:32:23 mlhuang Exp $ +# $Id: build.functions,v 1.4 2006/08/16 01:27:16 mlhuang Exp $ # PATH=/sbin:/bin:/usr/sbin:/usr/bin @@ -29,13 +29,16 @@ PLC_DEVEL_FEDORA_RELEASE=4 PLC_DEVEL_FEDORA_ARCH=i386 # Fedora Core mirror from which to install filesystems -PLC_DEVEL_FEDORA_URL=file:///usr/share/mirrors/fedora +PLC_DEVEL_FEDORA_URL=file:///data/fedora # Build myplc inside myplc-devel PLC_DEVEL_BOOTSTRAP=true # Source tag to use for initial import of sources into local CVS -TAG=planetlab-$(date +%Y-%m-%d) +IMPORT_TAG=planetlab-$(date +%Y-%m-%d) + +# Source tag to use for building from local CVS +BUILD_TAG=HEAD # We may be running inside a myplc-devel environment, which can # override these defaults. Specifically, whether to build myplc inside @@ -68,7 +71,8 @@ while getopts "l:r:a:t:h" opt ; do PLC_DEVEL_FEDORA_ARCH=$OPTARG ;; t) - TAG=$OPTARG + IMPORT_TAG=$OPTARG + BUILD_TAG=$OPTARG ;; h|*) usage diff --git a/build.sh b/build.sh index 5520aee..3635abf 100755 --- a/build.sh +++ b/build.sh @@ -10,11 +10,12 @@ # root/ (mount point) # data/ (various data files) # data/etc/planetlab/ (configuration files) +# data/root (root's homedir) # # Mark Huang # Copyright (C) 2006 The Trustees of Princeton University # -# $Id: build.sh,v 1.27 2006/07/24 19:32:23 mlhuang Exp $ +# $Id: build.sh,v 1.33 2006/08/18 14:35:52 thierry Exp $ # . build.functions @@ -34,14 +35,14 @@ if [ "$PLC_DEVEL_BOOTSTRAP" = "true" ] ; then # If we used a local mirror, bind mount it into the chroot so that # we can use it again. if [ "${PLC_DEVEL_FEDORA_URL:0:7}" = "file://" ] ; then - mkdir -p devel/root/usr/share/mirrors/fedora - mount -o bind,ro ${PLC_DEVEL_FEDORA_URL#file://} devel/root/usr/share/mirrors/fedora + mkdir -p devel/root/data/fedora + mount -o bind,ro ${PLC_DEVEL_FEDORA_URL#file://} devel/root/data/fedora fi # Clean up before exiting if anything goes wrong - trap "umount $PWD/devel/root/data; - umount $PWD/devel/root/proc; - umount $PWD/devel/root/usr/share/mirrors/fedora" ERR INT + trap "umount $PWD/devel/root/data/fedora; + umount $PWD/devel/root/data; + umount $PWD/devel/root/proc" ERR INT # Build myplc inside myplc-devel. Make sure PLC_DEVEL_BOOTSTRAP is # false to avoid infinite recursion. @@ -51,17 +52,17 @@ service plc start plc-config --category=plc_devel --variable=bootstrap --value="false" --save service plc reload cd / -cvs -d /cvs checkout -r $TAG build -make TAG=$TAG -C /build myplc +cvs -d /cvs checkout -r $BUILD_TAG build +make TAG=$BUILD_TAG -C /build myplc EOF # Yoink the image that was just built mv devel/data/build/BUILD/myplc-*/myplc/root{,.img} devel/data/build/BUILD/myplc-*/myplc/data . # Clean up + umount devel/root/data/fedora || : umount devel/root/data umount devel/root/proc - umount devel/root/usr/share/mirrors/fedora || : rm -rf devel/data/build mkdir -p devel/data/build @@ -87,6 +88,7 @@ echo "* myplc: Installing configuration scripts" install -D -m 755 plc_config.py root/tmp/plc_config.py chroot root sh -c 'cd /tmp; python plc_config.py build; python plc_config.py install' install -D -m 755 plc-config root/usr/bin/plc-config +install -D -m 755 plc-config-tty root/usr/bin/plc-config-tty install -D -m 755 api-config root/usr/bin/api-config install -D -m 755 db-config root/usr/bin/db-config install -D -m 755 dns-config root/usr/bin/dns-config @@ -125,9 +127,20 @@ echo "* myplc: Installing configuration file" install -D -m 444 $config data/etc/planetlab/default_config.xml install -D -m 444 plc_config.dtd data/etc/planetlab/plc_config.dtd +# handle root's homedir and tweak root prompt +echo "* myplc: root's homedir and prompt" +roothome=data/root +mkdir -p $roothome +cat << EOF > $roothome/.profile +export PS1=" \$PS1" +EOF +chmod 644 $roothome/.profile + # Move "data" directories out of the installation +echo "* myplc: Moving data directories out of the installation" datadirs=( /etc/planetlab +/root /var/lib/pgsql /var/www/html/alpina-logs /var/www/html/boot diff --git a/build_devel.sh b/build_devel.sh index 4e9a8b3..d091ff2 100755 --- a/build_devel.sh +++ b/build_devel.sh @@ -10,11 +10,12 @@ # devel/data/cvs/ (local CVS repository) # devel/data/build/ (build area) # devel/data/etc/planetlab/ (configuration) +# devel/data/root (root's home dir) # # Mark Huang # Copyright (C) 2006 The Trustees of Princeton University # -# $Id: build_devel.sh,v 1.2 2006/07/24 19:32:23 mlhuang Exp $ +# $Id: build_devel.sh,v 1.5 2006/08/18 14:35:52 thierry Exp $ # . build.functions @@ -40,7 +41,7 @@ for dir in * ; do else ignore="-I !" fi - cvs -d $cvsroot import -m "Initial import" -ko $ignore $dir planetlab $TAG + cvs -d $cvsroot import -m "Initial import" -ko $ignore $dir planetlab $IMPORT_TAG popd fi done @@ -56,6 +57,7 @@ echo "* myplc-devel: Installing configuration scripts" install -D -m 755 plc_config.py devel/root/tmp/plc_config.py chroot devel/root sh -c 'cd /tmp; python plc_config.py build; python plc_config.py install' install -D -m 755 plc-config devel/root/usr/bin/plc-config +install -D -m 755 plc-config-tty devel/root/usr/bin/plc-config-tty # Install initscripts echo "* myplc-devel: Installing initscripts" @@ -63,10 +65,19 @@ find plc.d/functions | cpio -p -d -u devel/root/etc/ install -D -m 755 guest.init devel/root/etc/init.d/plc chroot devel/root sh -c 'chkconfig --add plc; chkconfig plc on' +# handle root's homedir and tweak root prompt +echo "* myplc-devel: root's homedir and prompt" +roothome=devel/data/root +mkdir -p $roothome +cat << EOF > $roothome/.profile +export PS1=" \$PS1" +EOF +chmod 644 $roothome/.profile + # Move "data" directories out of the installation echo "* myplc-devel: Moving data directories out of the installation" move_datadirs devel/root devel/data \ - /etc/planetlab /build /cvs + /etc/planetlab /build /cvs /root # Make image out of directory echo "* myplc-devel: Building loopback image" diff --git a/doc/Makefile b/doc/Makefile index f1cb444..db5736c 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -4,7 +4,7 @@ # Mark Huang # Copyright (C) 2006 The Trustees of Princeton University # -# $Id: Makefile,v 1.5 2006/07/18 22:41:44 mlhuang Exp $ +# $Id: Makefile,v 1.6 2006/08/11 12:39:20 thierry Exp $ # vpath GenDoc.xsl ../../plc_www/doc @@ -12,6 +12,8 @@ vpath %_config.xml .. all: myplc.pdf myplc.php +.PHONY: all + # Dependencies .myplc.xml.valid: architecture.eps architecture.png plc_variables.xml plc_devel_variables.xml @@ -43,7 +45,7 @@ endef $(foreach format,$(FORMATS),$(eval $(call docbook2,$(format)))) docclean: - rm -f $(patsubst %,*.%,$(FORMATS)) *.pdf .*.xml.valid variables.xml + rm -f $(patsubst %,*.%,$(FORMATS)) *.pdf *.php .*.xml.valid variables.xml clean: docclean diff --git a/doc/myplc.pdf b/doc/myplc.pdf index 5937bd9..09412d2 100644 Binary files a/doc/myplc.pdf and b/doc/myplc.pdf differ diff --git a/doc/myplc.php b/doc/myplc.php index d70807b..be76869 100644 --- a/doc/myplc.php +++ b/doc/myplc.php @@ -13,7 +13,7 @@

-MyPLC User's Guide

+MyPLC User's Guide

Mark Huang

@@ -29,6 +29,15 @@ + + + + + +
Revision History
MLH

Add development environment.

Revision 1.2August 18, 2006TPT
+

Review section on configuration and introduce plc-config-tty.

+

Present implementation details last.

+

MyPLC is a complete PlanetLab Central (PLC) portable installation contained within a chroot jail. The default installation consists of a web server, an @@ -89,241 +109,202 @@ system.

+
+

+1.1.  Purpose of the myplc-devel + package

+

The myplc package comes with all + required node software, rebuilt from the public PlanetLab CVS + repository. If for any reason you need to implement your own + customized version of this software, you can use the + myplc-devel package instead, for setting up + your own development environment, including a local CVS + repository; you can then freely manage your changes and rebuild + your customized version of myplc. We also + provide good practices, that will then allow you to resync your local + CVS repository with any further evolution on the mainstream public + PlanetLab software.

+

-2. Installation

+2.  Requirements
+

myplc and + myplc-devel were designed as + chroot jails so as to reduce the requirements on + your host operating system. So in theory, these distributions should + work on virtually any Linux 2.6 based distribution, whether it + supports rpm or not.

+

However, things are never that simple and there indeed are + some known limitations to this, so here are a couple notes as a + recommended reading before you proceed with the installation.

+

As of 17 August 2006 (i.e myplc-0.5-2) :

+
    +
  • The software is vastly based on Fedora + Core 4. Please note that the build server at Princeton + runs Fedora Core 2, togother with a upgraded + version of yum. +

  • +
  • +

    myplc and myplc-devel are known to work on both + Fedora Core 2 and Fedora Core + 4. Please note however that, on fc4 at least, it is + highly recommended to use the Security Level + Configuration utility and to switch off + SElinux on your box because :

    +
      +
    • + myplc requires you to run SElinux as 'Permissive' at most +

    • +
    • + myplc-devel requires you to turn SElinux Off. +

    • +
    +
  • +
  • In addition, as far as myplc is concerned, you + need to check your firewall configuration since you need, of course, + to open up the http and + https ports, so as to accept connections from + the managed nodes and from the users desktops.

  • +
+ +
+

+3. Installating and using MyPLC

Though internally composed of commodity software subpackages, MyPLC should be treated as a monolithic software application. MyPLC is distributed as single RPM package that has no external dependencies, allowing it to be installed on - practically any Linux 2.6 based distribution:

-
-

Example 1. Installing MyPLC.

-
# If your distribution supports RPM
-rpm -U http://build.planet-lab.org/build/myplc-0_4-rc1/RPMS/i386/myplc-0.4-1.planetlab.i386.rpm
-
-# If your distribution does not support RPM
-cd /tmp
-wget http://build.planet-lab.org/build/myplc-0_4-rc1/RPMS/i386/myplc-0.4-1.planetlab.i386.rpm
-cd /
-rpm2cpio /tmp/myplc-0.4-1.planetlab.i386.rpm | cpio -diu
-
-

MyPLC installs the following files and directories:

+ practically any Linux 2.6 based distribution.

+
+

+3.1. Installing MyPLC.

    -
  • /plc/root.img: The main - root filesystem of the MyPLC application. This file is an - uncompressed ext3 filesystem that is loopback mounted on - /plc/root when MyPLC starts. This - filesystem, even when mounted, should be treated as an opaque - binary that can and will be replaced in its entirety by any - upgrade of MyPLC.

  • -
  • /plc/root: The mount point - for /plc/root.img. Once the root filesystem - is mounted, all MyPLC services run in a - chroot jail based in this - directory.

  • -

    /plc/data: The directory where user - data and generated files are stored. This directory is bind - mounted onto /plc/root/data so that it is - accessible as /data from within the - chroot jail. Files in this directory are - marked with %config(noreplace) in the - RPM. That is, during an upgrade of MyPLC, if a file has not - changed since the last installation or upgrade of MyPLC, it is - subject to upgrade and replacement. If the file has changed, - the new version of the file will be created with a - .rpmnew extension. Symlinks within the - MyPLC root filesystem ensure that the following directories - (relative to /plc/root) are stored - outside the MyPLC filesystem image:

    -
      -
    • /etc/planetlab: This - directory contains the configuration files, keys, and - certificates that define your MyPLC - installation.

    • -
    • /var/lib/pgsql: This - directory contains PostgreSQL database - files.

    • -
    • /var/www/html/alpina-logs: This - directory contains node installation logs.

    • -
    • /var/www/html/boot: This - directory contains the Boot Manager, customized for your MyPLC - installation, and its data files.

    • -
    • /var/www/html/download: This - directory contains Boot CD images, customized for your MyPLC - installation.

    • -
    • /var/www/html/install-rpms: This - directory is where you should install node package updates, - if any. By default, nodes are installed from the tarball - located at - /var/www/html/boot/PlanetLab-Bootstrap.tar.bz2, - which is pre-built from the latest PlanetLab Central - sources, and installed as part of your MyPLC - installation. However, nodes will attempt to install any - newer RPMs located in - /var/www/html/install-rpms/planetlab, - after initial installation and periodically thereafter. You - must run yum-arch and - createrepo to update the - yum caches in this directory after - installing a new RPM. PlanetLab Central cannot support any - changes to this directory.

    • -
    • /var/www/html/xml: This - directory contains various XML files that the Slice Creation - Service uses to determine the state of slices. These XML - files are refreshed periodically by cron - jobs running in the MyPLC root.

    • -
    +

    If your distribution supports RPM:

    +
    # rpm -U http://build.planet-lab.org/build/myplc-0_4-rc1/RPMS/i386/myplc-0.4-1.planetlab.i386.rpm
  • -

    /etc/init.d/plc: This file - is a System V init script installed on your host filesystem, - that allows you to start up and shut down MyPLC with a single - command. On a Red Hat or Fedora host system, it is customary to - use the service command to invoke System V - init scripts:

    +

    If your distribution does not support RPM:

    +
    # cd /tmp
    +# wget http://build.planet-lab.org/build/myplc-0_4-rc1/RPMS/i386/myplc-0.4-1.planetlab.i386.rpm
    +# cd /
    +# rpm2cpio /tmp/myplc-0.4-1.planetlab.i386.rpm | cpio -diu
    +
  • +
+

The Section 3.9, “ Files and directories + involved in myplc” below explains in + details the installation strategy and the miscellaneous files and + directories involved.

+
+
+

+3.2.  QuickStart

+

On a Red Hat or Fedora host system, it is customary to use + the service command to invoke System V init + scripts. As the examples suggest, the service must be started as root:

-

Example 2. Starting and stopping MyPLC.

-
# Starting MyPLC
-service plc start
-
-# Stopping MyPLC
-service plc stop
+

Example 1. Starting MyPLC:

+
# service plc start
-

Like all other registered System V init services, MyPLC is - started and shut down automatically when your host system boots - and powers off. You may disable automatic startup by invoking - the chkconfig command on a Red Hat or Fedora - host system:

-

Example 3. Disabling automatic startup of MyPLC.

-
# Disable automatic startup
-chkconfig plc off
-
-# Enable automatic startup
-chkconfig plc on
+

Example 2. Stopping MyPLC:

+
# service plc stop
- -
  • /etc/sysconfig/plc: This - file is a shell script fragment that defines the variables - PLC_ROOT and PLC_DATA. By default, - the values of these variables are /plc/root - and /plc/data, respectively. If you wish, - you may move your MyPLC installation to another location on your - host filesystem and edit the values of these variables - appropriately, but you will break the RPM upgrade - process. PlanetLab Central cannot support any changes to this - file.

  • -
  • /etc/planetlab: This - symlink to /plc/data/etc/planetlab is - installed on the host system for convenience.

  • -
    +

    In Section 3.8, “Understanding the startup sequence”, we provide greater + details that might be helpful in the case where the service does + not seem to take off correctly.

    +

    Like all other registered System V init services, MyPLC is + started and shut down automatically when your host system boots + and powers off. You may disable automatic startup by invoking the + chkconfig command on a Red Hat or Fedora host + system:

    +
    +

    Example 3. Disabling automatic startup of MyPLC.

    +
    # chkconfig plc off
    -
    -

    -3. Quickstart

    -

    Once installed, start MyPLC (see Example 2, “Starting and stopping MyPLC.”). MyPLC must be started as - root. Observe the output of this command for any failures. If no - failures occur, you should see output similar to the - following:

    -

    Example 4. A successful MyPLC startup.

    -
    Mounting PLC:                                              [  OK  ]
    -PLC: Generating network files:                             [  OK  ]
    -PLC: Starting system logger:                               [  OK  ]
    -PLC: Starting database server:                             [  OK  ]
    -PLC: Generating SSL certificates:                          [  OK  ]
    -PLC: Configuring the API:                                  [  OK  ]
    -PLC: Updating GPG keys:                                    [  OK  ]
    -PLC: Generating SSH keys:                                  [  OK  ]
    -PLC: Starting web server:                                  [  OK  ]
    -PLC: Bootstrapping the database:                           [  OK  ]
    -PLC: Starting DNS server:                                  [  OK  ]
    -PLC: Starting crond:                                       [  OK  ]
    -PLC: Rebuilding Boot CD:                                   [  OK  ]
    -PLC: Rebuilding Boot Manager:                              [  OK  ]
    -PLC: Signing node packages:                                [  OK  ]
    -
    +

    Example 4. Re-enabling automatic startup of MyPLC.

    +
    # chkconfig plc on
    +
    -

    If /plc/root is mounted successfully, a - complete log file of the startup process may be found at - /plc/root/var/log/boot.log. Possible reasons - for failure of each step include:

    -
      -
    • Mounting PLC: If this step - fails, first ensure that you started MyPLC as root. Check - /etc/sysconfig/plc to ensure that - PLC_ROOT and PLC_DATA refer to the - right locations. You may also have too many existing loopback - mounts, or your kernel may not support loopback mounting, bind - mounting, or the ext3 filesystem. Try freeing at least one - loopback device, or re-compiling your kernel to support loopback - mounting, bind mounting, and the ext3 filesystem. If you see an - error similar to Permission denied while trying to open - /plc/root.img, then SELinux may be enabled. If you - installed MyPLC on Fedora Core 4 or 5, use the - Security Level Configuration utility - to configure SELinux to be - Permissive.

    • -
    • Starting database server: If - this step fails, check - /plc/root/var/log/pgsql and - /plc/root/var/log/boot.log. The most common - reason for failure is that the default PostgreSQL port, TCP port - 5432, is already in use. Check that you are not running a - PostgreSQL server on the host system.

    • -
    • Starting web server: If this - step fails, check - /plc/root/var/log/httpd/error_log and - /plc/root/var/log/boot.log for obvious - errors. The most common reason for failure is that the default - web ports, TCP ports 80 and 443, are already in use. Check that - you are not running a web server on the host - system.

    • -
    • Bootstrapping the database: - If this step fails, it is likely that the previous step - (Starting web server) also failed. Another - reason that it could fail is if PLC_API_HOST (see - Section 3.1, “Changing the configuration”) does not resolve to - the host on which the API server has been enabled. By default, - all services, including the API server, are enabled and run on - the same host, so check that PLC_API_HOST is - either localhost or resolves to a local IP - address.

    • -
    • Starting crond: If this step - fails, it is likely that the previous steps (Starting - web server and Bootstrapping the - database) also failed. If not, check - /plc/root/var/log/boot.log for obvious - errors. This step starts the cron service and - generates the initial set of XML files that the Slice Creation - Service uses to determine slice state.

    • -
    -

    If no failures occur, then MyPLC should be active with a - default configuration. Open a web browser on the host system and - visit http://localhost/, which should bring you - to the front page of your PLC installation. The password of the - default administrator account - root@localhost.localdomain (set by - PLC_ROOT_USER) is root (set by - PLC_ROOT_PASSWORD).

    -3.1. Changing the configuration

    +3.3. Changing the configuration

    After verifying that MyPLC is working correctly, shut it down and begin changing some of the default variable values. Shut down MyPLC with service plc stop - (see Example 2, “Starting and stopping MyPLC.”). With a text - editor, open the file - /etc/planetlab/plc_config.xml. This file is - a self-documenting configuration file written in XML. Variables - are divided into categories. Variable identifiers must be - alphanumeric, plus underscore. A variable is referred to - canonically as the uppercase concatenation of its category - identifier, an underscore, and its variable identifier. Thus, a - variable with an id of + (see Section 3.2, “ QuickStart ”).

    +

    The preferred option for changing the configuration is to + use the plc-config-tty tool. This tools comes + with the root image, so you need to have it mounted first. The + full set of applicable variables is described in Appendix B, Development configuration variables (for myplc-devel), but using the u + guides you to the most useful ones. Here is sample session: +

    +
    +

    Example 5. Using plc-config-tty for configuration:

    +
    # service plc mount
    +Mounting PLC:                                              [  OK  ]
    +# chroot /plc/root su - 
    +<plc> # plc-config-tty
    +Config file /etc/planetlab/configs/site.xml located under a non-existing directory
    +Want to create /etc/planetlab/configs [y]/n ? y
    +Created directory /etc/planetlab/configs
    +Enter command (u for usual changes, w to save, ? for help) u
    +== PLC_NAME : [PlanetLab Test] OneLab
    +== PLC_ROOT_USER : [root@localhost.localdomain] root@odie.inria.fr
    +== PLC_ROOT_PASSWORD : [root] plain-passwd
    +== PLC_MAIL_SUPPORT_ADDRESS : [root+support@localhost.localdomain] support@one-lab.org
    +== PLC_DB_HOST : [localhost.localdomain] odie.inria.fr
    +== PLC_API_HOST : [localhost.localdomain] odie.inria.fr
    +== PLC_WWW_HOST : [localhost.localdomain] odie.inria.fr
    +== PLC_BOOT_HOST : [localhost.localdomain] odie.inria.fr
    +== PLC_NET_DNS1 : [127.0.0.1] 138.96.250.248
    +== PLC_NET_DNS2 : [None] 138.96.250.249
    +Enter command (u for usual changes, w to save, ? for help) w
    +Wrote /etc/planetlab/configs/site.xml
    +Merged
    +        /etc/planetlab/default_config.xml
    +and     /etc/planetlab/configs/site.xml
    +into    /etc/planetlab/plc_config.xml
    +You might want to type 'r' (restart plc) or 'q' (quit)
    +Enter command (u for usual changes, w to save, ? for help) r
    +==================== Stopping plc
    +...
    +==================== Starting plc
    +...
    +Enter command (u for usual changes, w to save, ? for help) q
    +<plc> # exit
    +# 
    +
    +
    +

    If you used this method for configuring, you can skip to + the Section 3.4, “ Login as a real user ”. As an alternative to using + plc-config-tty, you may also use a text + editor, but this requires some understanding on how the + configuration files are used within myplc. The + default configuration is stored in a file + named /etc/planetlab/default_config.xml, + that is designed to remain intact. You may store your local + changes in any file located in the configs/ + sub-directory, that are loaded on top of the defaults. Finally + the file /etc/planetlab/plc_config.xml is + loaded, and the resulting configuration is stored in the latter + file, that is used as a reference.

    +

    Using a separate file for storing local changes only, as + plc-config-tty does, is not a workable option + with a text editor because it would involve tedious xml + re-assembling. So your local changes should go in + /etc/planetlab/plc_config.xml. Be warned + however that any change you might do this way could be lost if + you use plc-config-tty later on.

    +

    This file is a self-documenting configuration file written + in XML. Variables are divided into categories. Variable + identifiers must be alphanumeric, plus underscore. A variable is + referred to canonically as the uppercase concatenation of its + category identifier, an underscore, and its variable + identifier. Thus, a variable with an id of slice_prefix in the plc category is referred to canonically as PLC_SLICE_PREFIX.

    @@ -351,16 +332,33 @@ PLC: Signing node packages: [ OK ] preferred FQDN and external IP address of your host system.

    -

    After changing these variables, save the file, then - restart MyPLC with service plc start. You - should notice that the password of the default administrator - account is no longer root, and that the - default site name includes the name of your PLC installation - instead of PlanetLab.

    +

    After changing these variables, + save the file, then restart MyPLC with service plc + start. You should notice that the password of the + default administrator account is no longer + root, and that the default site name includes + the name of your PLC installation instead of PlanetLab. As a + side effect of these changes, the ISO images for the boot CDs + now have new names, so that you can freely remove the ones names + after 'PlanetLab Test', which is the default value of + PLC_NAME

    + +
    +

    +3.4.  Login as a real user

    +

    Now that myplc is up and running, you can connect to the + web site that by default runs on port 80. You can either + directly use the default administrator user that you configured + in PLC_ROOT_USER and + PLC_ROOT_PASSWORD, or create a real user through + the 'Joining' tab. Do not forget to select both PI and tech + roles, and to select the only site created at this stage. + Login as the administrator to enable this user, then login as + the real user.

    -3.2. Installing nodes

    +3.5. Installing nodes

    Install your first node by clicking Add Node under the Nodes tab. Fill in all the appropriate details, then click @@ -384,12 +382,12 @@ PLC: Signing node packages: [ OK ]

    -3.3. Administering nodes

    +3.6. Administering nodes

    You may administer nodes as root by using the SSH key stored in /etc/planetlab/root_ssh_key.rsa.

    -

    Example 5. Accessing nodes via SSH. Replace +

    Example 6. Accessing nodes via SSH. Replace node with the hostname of the node.

    ssh -i /etc/planetlab/root_ssh_key.rsa root@node
    @@ -412,7 +410,7 @@ PLC: Signing node packages: [ OK ]

    -3.4. Creating a slice

    +3.7. Creating a slice

    Create a slice by clicking Create Slice under the Slices tab. Fill in all the appropriate details, then click Create. Add @@ -427,7 +425,7 @@ PLC: Signing node packages: [ OK ] to determine if it needs to create or delete any slices. You may accelerate this process manually if desired.

    -

    Example 6. Forcing slice creation on a node.

    +

    Example 7. Forcing slice creation on a node.

    # Update slices.xml immediately
     service plc start crond
     
    @@ -436,10 +434,193 @@ ssh -i /etc/planetlab/root_ssh_key.rsa root@node \
     vserver pl_conf exec service pl_conf restart
    +
    +

    +3.8. Understanding the startup sequence

    +

    During service startup described in Section 3.2, “ QuickStart ”, observe the output of this command for + any failures. If no failures occur, you should see output similar + to the following:

    +
    +

    Example 8. A successful MyPLC startup.

    +
    Mounting PLC:                                              [  OK  ]
    +PLC: Generating network files:                             [  OK  ]
    +PLC: Starting system logger:                               [  OK  ]
    +PLC: Starting database server:                             [  OK  ]
    +PLC: Generating SSL certificates:                          [  OK  ]
    +PLC: Configuring the API:                                  [  OK  ]
    +PLC: Updating GPG keys:                                    [  OK  ]
    +PLC: Generating SSH keys:                                  [  OK  ]
    +PLC: Starting web server:                                  [  OK  ]
    +PLC: Bootstrapping the database:                           [  OK  ]
    +PLC: Starting DNS server:                                  [  OK  ]
    +PLC: Starting crond:                                       [  OK  ]
    +PLC: Rebuilding Boot CD:                                   [  OK  ]
    +PLC: Rebuilding Boot Manager:                              [  OK  ]
    +PLC: Signing node packages:                                [  OK  ]
    +
    +
    +

    If /plc/root is mounted successfully, a + complete log file of the startup process may be found at + /plc/root/var/log/boot.log. Possible reasons + for failure of each step include:

    +
      +
    • Mounting PLC: If this step + fails, first ensure that you started MyPLC as root. Check + /etc/sysconfig/plc to ensure that + PLC_ROOT and PLC_DATA refer to the + right locations. You may also have too many existing loopback + mounts, or your kernel may not support loopback mounting, bind + mounting, or the ext3 filesystem. Try freeing at least one + loopback device, or re-compiling your kernel to support loopback + mounting, bind mounting, and the ext3 filesystem. If you see an + error similar to Permission denied while trying to open + /plc/root.img, then SELinux may be enabled. See Section 2, “ Requirements ” above for details.

    • +
    • Starting database server: If + this step fails, check + /plc/root/var/log/pgsql and + /plc/root/var/log/boot.log. The most common + reason for failure is that the default PostgreSQL port, TCP port + 5432, is already in use. Check that you are not running a + PostgreSQL server on the host system.

    • +
    • Starting web server: If this + step fails, check + /plc/root/var/log/httpd/error_log and + /plc/root/var/log/boot.log for obvious + errors. The most common reason for failure is that the default + web ports, TCP ports 80 and 443, are already in use. Check that + you are not running a web server on the host + system.

    • +
    • Bootstrapping the database: + If this step fails, it is likely that the previous step + (Starting web server) also failed. Another + reason that it could fail is if PLC_API_HOST (see + Section 3.3, “Changing the configuration”) does not resolve to + the host on which the API server has been enabled. By default, + all services, including the API server, are enabled and run on + the same host, so check that PLC_API_HOST is + either localhost or resolves to a local IP + address. Also check that PLC_ROOT_USER looks like + an e-mail address.

    • +
    • Starting crond: If this step + fails, it is likely that the previous steps (Starting + web server and Bootstrapping the + database) also failed. If not, check + /plc/root/var/log/boot.log for obvious + errors. This step starts the cron service and + generates the initial set of XML files that the Slice Creation + Service uses to determine slice state.

    • +
    +

    If no failures occur, then MyPLC should be active with a + default configuration. Open a web browser on the host system and + visit http://localhost/, which should bring you + to the front page of your PLC installation. The password of the + default administrator account + root@localhost.localdomain (set by + PLC_ROOT_USER) is root (set by + PLC_ROOT_PASSWORD).

    +
    +
    +

    +3.9.  Files and directories + involved in myplc

    +

    MyPLC installs the following files and directories:

    +
      +
    1. /plc/root.img: The main + root filesystem of the MyPLC application. This file is an + uncompressed ext3 filesystem that is loopback mounted on + /plc/root when MyPLC starts. This + filesystem, even when mounted, should be treated as an opaque + binary that can and will be replaced in its entirety by any + upgrade of MyPLC.

    2. +
    3. /plc/root: The mount point + for /plc/root.img. Once the root filesystem + is mounted, all MyPLC services run in a + chroot jail based in this + directory.

    4. +
    5. +

      /plc/data: The directory where user + data and generated files are stored. This directory is bind + mounted onto /plc/root/data so that it is + accessible as /data from within the + chroot jail. Files in this directory are + marked with %config(noreplace) in the + RPM. That is, during an upgrade of MyPLC, if a file has not + changed since the last installation or upgrade of MyPLC, it is + subject to upgrade and replacement. If the file has changed, + the new version of the file will be created with a + .rpmnew extension. Symlinks within the + MyPLC root filesystem ensure that the following directories + (relative to /plc/root) are stored + outside the MyPLC filesystem image:

      +
        +
      • /etc/planetlab: This + directory contains the configuration files, keys, and + certificates that define your MyPLC + installation.

      • +
      • /var/lib/pgsql: This + directory contains PostgreSQL database + files.

      • +
      • /var/www/html/alpina-logs: This + directory contains node installation logs.

      • +
      • /var/www/html/boot: This + directory contains the Boot Manager, customized for your MyPLC + installation, and its data files.

      • +
      • /var/www/html/download: This + directory contains Boot CD images, customized for your MyPLC + installation.

      • +
      • /var/www/html/install-rpms: This + directory is where you should install node package updates, + if any. By default, nodes are installed from the tarball + located at + /var/www/html/boot/PlanetLab-Bootstrap.tar.bz2, + which is pre-built from the latest PlanetLab Central + sources, and installed as part of your MyPLC + installation. However, nodes will attempt to install any + newer RPMs located in + /var/www/html/install-rpms/planetlab, + after initial installation and periodically thereafter. You + must run yum-arch and + createrepo to update the + yum caches in this directory after + installing a new RPM. PlanetLab Central cannot support any + changes to this directory.

      • +
      • /var/www/html/xml: This + directory contains various XML files that the Slice Creation + Service uses to determine the state of slices. These XML + files are refreshed periodically by cron + jobs running in the MyPLC root.

      • +
      • /root: this is the + location of the root-user's homedir, and for your + convenience is stored under /data so + that your local customizations survive across + updates - this feature is inherited from the + myplc-devel package, where it is probably + more useful.

      • +
      +
    6. +
    7. /etc/init.d/plc: This file + is a System V init script installed on your host filesystem, + that allows you to start up and shut down MyPLC with a single + command, as described in Section 3.2, “ QuickStart ”.

    8. +
    9. /etc/sysconfig/plc: This + file is a shell script fragment that defines the variables + PLC_ROOT and PLC_DATA. By default, + the values of these variables are /plc/root + and /plc/data, respectively. If you wish, + you may move your MyPLC installation to another location on your + host filesystem and edit the values of these variables + appropriately, but you will break the RPM upgrade + process. PlanetLab Central cannot support any changes to this + file.

    10. +
    11. /etc/planetlab: This + symlink to /plc/data/etc/planetlab is + installed on the host system for convenience.

    12. +
    +

    -4. Rebuilding and customizing MyPLC

    +4. Rebuilding and customizing MyPLC

    The MyPLC package, though distributed as an RPM, is not a traditional package that can be easily rebuilt from SRPM. The requisite build environment is quite extensive and numerous @@ -456,7 +637,7 @@ vserver pl_conf exec service pl_conf restart repository.

    -4.1. Installation

    +4.1. Installation

    Install the MyPLC development environment similarly to how you would install MyPLC. You may install both packages on the same host system if you wish. As with MyPLC, the MyPLC development @@ -464,17 +645,35 @@ vserver pl_conf exec service pl_conf restart application, and any files present in the chroot jail should not be modified directly, as they are subject to upgrade.

    -
    -

    Example 7. Installing the MyPLC development environment.

    -
    # If your distribution supports RPM
    -rpm -U http://build.planet-lab.org/build/myplc-0_4-rc2/RPMS/i386/myplc-devel-0.4-2.planetlab.i386.rpm
    -
    -# If your distribution does not support RPM
    -cd /tmp
    -wget http://build.planet-lab.org/build/myplc-0_4-rc2/RPMS/i386/myplc-devel-0.4-2.planetlab.i386.rpm
    -cd /
    -rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu
    +
      +
    • +

      If your distribution supports RPM:

      +
      # rpm -U http://build.planet-lab.org/build/myplc-0_4-rc2/RPMS/i386/myplc-devel-0.4-2.planetlab.i386.rpm
      +
    • +
    • +

      If your distribution does not support RPM:

      +
      # cd /tmp
      +# wget http://build.planet-lab.org/build/myplc-0_4-rc2/RPMS/i386/myplc-devel-0.4-2.planetlab.i386.rpm
      +# cd /
      +# rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu
      +
    • +
    +
    +
    +

    +4.2. Configuration

    +

    The default configuration should work as-is on most + sites. Configuring the development package can be achieved in a + similar way as for myplc, as described in + Section 3.3, “Changing the configuration”. plc-config-tty supports a + -d option for supporting the + myplc-devel case, that can be useful in a + context where it would not guess it by itself. Refer to Appendix B, Development configuration variables (for myplc-devel) for a list of variables.

    +
    +

    +4.3.  Files and directories + involved in myplc-devl

    The MyPLC development environment installs the following files and directories:

      @@ -508,15 +707,20 @@ rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu snapshot of the PlanetLab source code is stored as a CVS repository in this directory. Files in this directory will not be updated by an upgrade of - myplc-devel. See Section 4.4, “Updating CVS” for more information about updating + myplc-devel. See Section 4.6, “Updating CVS” for more information about updating PlanetLab source code.

    • /build: Builds are stored in this directory. This directory is bind mounted onto /plc/devel/root/build so that it is accessible as /build from within the chroot jail. The build scripts in this - directory are themselves source controlled; see Section 4.3, “Building MyPLC” for more information about executing + directory are themselves source controlled; see Section 4.5, “Building MyPLC” for more information about executing builds.

    • +
    • /root: this is the + location of the root-user's homedir, and for your + convenience is stored under /data so + that your local customizations survive across + updates.

  • /etc/init.d/plc-devel: This file is @@ -527,7 +731,7 @@ rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu

  • -4.2. Fedora Core 4 mirror requirement

    +4.4. Fedora Core 4 mirror requirement

    The MyPLC development environment requires access to a complete Fedora Core 4 i386 RPM repository, because several different filesystems based upon Fedora Core 4 are constructed @@ -559,13 +763,13 @@ rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu such as wget or rsync to download the RPMS from a public mirror:

    -

    Example 8. Setting up a local Fedora Core 4 repository.

    -
    mkdir -p /plc/devel/data/fedora
    -cd /plc/devel/data/fedora
    +

    Example 9. Setting up a local Fedora Core 4 repository.

    +
    # mkdir -p /plc/devel/data/fedora
    +# cd /plc/devel/data/fedora
     
    -for repo in core/4/i386/os core/updates/4/i386 extras/4/i386 ; do
    -    wget -m -nH --cut-dirs=3 http://coblitz.planet-lab.org/pub/fedora/linux/$repo
    -done
    +# for repo in core/4/i386/os core/updates/4/i386 extras/4/i386 ; do +> wget -m -nH --cut-dirs=3 http://coblitz.planet-lab.org/pub/fedora/linux/$repo +> done

    Change the repository URI and --cut-dirs level as needed to produce a hierarchy that resembles:

    @@ -577,17 +781,25 @@ done

    -4.3. Building MyPLC

    +4.5. Building MyPLC

    All PlanetLab source code modules are built and installed as RPMS. A set of build scripts, checked into the build/ directory of the PlanetLab CVS repository, eases the task of rebuilding PlanetLab source code.

    +

    Before you try building MyPLC, you might check the + configuration, in a file named + plc_config.xml that relies on a very + similar model as MyPLC, located in + /etc/planetlab within the chroot jail, or + in /plc/devel/data/etc/planetlab from the + root context. The set of applicable variables is described in + Appendix B, Development configuration variables (for myplc-devel).

    To build MyPLC, or any PlanetLab source code module, from within the MyPLC development environment, execute the following commands as root:

    -

    Example 9. Building MyPLC.

    +

    Example 10. Building MyPLC.

    # Initialize MyPLC development environment
     service plc-devel start
     
    @@ -610,11 +822,11 @@ make -C $DATE
    /plc/devel/data/build/$DATE/RPMS/ that you may copy to the /var/www/html/install-rpms/planetlab - directory of your MyPLC installation (see Section 2, “Installation”).

    + directory of your MyPLC installation (see Section 3, “Installating and using MyPLC”).

    -4.4. Updating CVS

    +4.6. Updating CVS

    A complete snapshot of the PlanetLab source code is included with the MyPLC development environment as a CVS repository in /plc/devel/data/cvs. This CVS repository may @@ -631,18 +843,19 @@ make -C $DATE

    Because the CVS repository is not automatically upgraded, if you wish to keep your local repository synchronized with the public PlanetLab repository, it is highly recommended that you - use CVS's support for vendor - branches to track changes. Vendor branches ease the task - of merging upstream changes with your local modifications. To - import a new snapshot into your local repository (for example, - if you have just upgraded from + use CVS's support for vendor branches to track changes, as + described here + and here. + Vendor branches ease the task of merging upstream changes with + your local modifications. To import a new snapshot into your + local repository (for example, if you have just upgraded from myplc-devel-0.4-2 to myplc-devel-0.4-3 and you notice the new repository in /plc/devel/data/cvs-0.4-3), execute the following commands as root from within the MyPLC development environment:

    -

    Example 10. Updating /data/cvs from /data/cvs-0.4-3.

    +

    Example 11. Updating /data/cvs from /data/cvs-0.4-3.

    Warning: This may cause severe, irreversible changes to be made to your local repository. Always tag your local repository before @@ -660,25 +873,40 @@ cvs -d /cvs rtag before-myplc-0_4-3-merge TMP=$(mktemp -d /data/export.XXXXXX) pushd $TMP cvs -d /data/cvs-0.4-3 export -r HEAD . -cvs -d /cvs import -m "PlanetLab sources from myplc-0.4-3" -ko -I ! . planetlab myplc-0_4-3 +cvs -d /cvs import -m "Merging myplc-0.4-3" -ko -I ! . planetlab myplc-0_4-3 popd rm -rf $TMP

    -

    If there any merge conflicts, use the command suggested by - CVS to help the merge. Explaining how to fix merge conflicts is - beyond the scope of this document; consult the CVS documentation - for more information on how to use CVS.

    +

    If there are any merge conflicts, use the command + suggested by CVS to help the merge. Explaining how to fix merge + conflicts is beyond the scope of this document; consult the CVS + documentation for more information on how to use CVS.

    -A. Configuration variables

    +A. Configuration variables (for myplc)

    Listed below is the set of standard configuration variables and their default values, defined in the template /etc/planetlab/default_config.xml. Additional variables and their defaults may be defined in site-specific XML templates that should be placed in /etc/planetlab/configs/.

    +

    This information is available online within + plc-config-tty, e.g.:

    +
    +

    Example A.1. Advanced usage of plc-config-tty

    +
    <plc> # plc-config-tty
    +Enter command (u for usual changes, w to save, ? for help) V plc_dns
    +========== Category = PLC_DNS
    +### Enable DNS
    +# Enable the internal DNS server. The server does not provide reverse
    +# resolution and is not a production quality or scalable DNS solution.
    +# Use the internal DNS server only for small deployments or for testing.
    +PLC_DNS_ENABLED
    +
    +
    +

    List of the myplc configuration variables:

    PLC_NAME
    @@ -1271,7 +1499,7 @@ rm -rf $TMP

    -B. Development environment configuration variables

    +B. Development configuration variables (for myplc-devel)
    PLC_DEVEL_FEDORA_RELEASE
    @@ -1298,7 +1526,7 @@ rm -rf $TMP

    Type: string

    - Default: file:///usr/share/mirrors/fedora

    + Default: file:///data/fedora

    Fedora Core mirror from which to install filesystems.

    @@ -1323,7 +1551,7 @@ rm -rf $TMP

    -Bibliography

    +Bibliography

    [1] Mark Huang. PlanetLab Technical Contact's Guide.

    diff --git a/doc/myplc.xml b/doc/myplc.xml index fa35162..07e96a8 100644 --- a/doc/myplc.xml +++ b/doc/myplc.xml @@ -38,6 +38,15 @@ MLH Add development environment. + + 1.2 + August 18, 2006 + TPT + + Review section on configuration and introduce plc-config-tty. + Present implementation details last. + + @@ -76,276 +85,212 @@ + +
    Purpose of the <emphasis> myplc-devel + </emphasis> package + The myplc package comes with all + required node software, rebuilt from the public PlanetLab CVS + repository. If for any reason you need to implement your own + customized version of this software, you can use the + myplc-devel package instead, for setting up + your own development environment, including a local CVS + repository; you can then freely manage your changes and rebuild + your customized version of myplc. We also + provide good practices, that will then allow you to resync your local + CVS repository with any further evolution on the mainstream public + PlanetLab software.
    + + + + +
    Requirements + + myplc and + myplc-devel were designed as + chroot jails so as to reduce the requirements on + your host operating system. So in theory, these distributions should + work on virtually any Linux 2.6 based distribution, whether it + supports rpm or not. + + However, things are never that simple and there indeed are + some known limitations to this, so here are a couple notes as a + recommended reading before you proceed with the installation. + + As of 17 August 2006 (i.e myplc-0.5-2) : + + + The software is vastly based on Fedora + Core 4. Please note that the build server at Princeton + runs Fedora Core 2, togother with a upgraded + version of yum. + + + myplc and myplc-devel are known to work on both + Fedora Core 2 and Fedora Core + 4. Please note however that, on fc4 at least, it is + highly recommended to use the Security Level + Configuration utility and to switch off + SElinux on your box because : + + + + myplc requires you to run SElinux as 'Permissive' at most + + + myplc-devel requires you to turn SElinux Off. + + + + + In addition, as far as myplc is concerned, you + need to check your firewall configuration since you need, of course, + to open up the http and + https ports, so as to accept connections from + the managed nodes and from the users desktops. + +
    - Installation + Installating and using MyPLC Though internally composed of commodity software subpackages, MyPLC should be treated as a monolithic software application. MyPLC is distributed as single RPM package that has no external dependencies, allowing it to be installed on - practically any Linux 2.6 based distribution: + practically any Linux 2.6 based distribution. - +
    Installing MyPLC. - - - - MyPLC installs the following files and directories: - - - - /plc/root.img: The main - root filesystem of the MyPLC application. This file is an - uncompressed ext3 filesystem that is loopback mounted on - /plc/root when MyPLC starts. This - filesystem, even when mounted, should be treated as an opaque - binary that can and will be replaced in its entirety by any - upgrade of MyPLC. - - /plc/root: The mount point - for /plc/root.img. Once the root filesystem - is mounted, all MyPLC services run in a - chroot jail based in this - directory. - - - /plc/data: The directory where user - data and generated files are stored. This directory is bind - mounted onto /plc/root/data so that it is - accessible as /data from within the - chroot jail. Files in this directory are - marked with %config(noreplace) in the - RPM. That is, during an upgrade of MyPLC, if a file has not - changed since the last installation or upgrade of MyPLC, it is - subject to upgrade and replacement. If the file has changed, - the new version of the file will be created with a - .rpmnew extension. Symlinks within the - MyPLC root filesystem ensure that the following directories - (relative to /plc/root) are stored - outside the MyPLC filesystem image: - - - /etc/planetlab: This - directory contains the configuration files, keys, and - certificates that define your MyPLC - installation. - - /var/lib/pgsql: This - directory contains PostgreSQL database - files. - - /var/www/html/alpina-logs: This - directory contains node installation logs. - - /var/www/html/boot: This - directory contains the Boot Manager, customized for your MyPLC - installation, and its data files. - - /var/www/html/download: This - directory contains Boot CD images, customized for your MyPLC - installation. - - /var/www/html/install-rpms: This - directory is where you should install node package updates, - if any. By default, nodes are installed from the tarball - located at - /var/www/html/boot/PlanetLab-Bootstrap.tar.bz2, - which is pre-built from the latest PlanetLab Central - sources, and installed as part of your MyPLC - installation. However, nodes will attempt to install any - newer RPMs located in - /var/www/html/install-rpms/planetlab, - after initial installation and periodically thereafter. You - must run yum-arch and - createrepo to update the - yum caches in this directory after - installing a new RPM. PlanetLab Central cannot support any - changes to this directory. - - /var/www/html/xml: This - directory contains various XML files that the Slice Creation - Service uses to determine the state of slices. These XML - files are refreshed periodically by cron - jobs running in the MyPLC root. - - - - - /etc/init.d/plc: This file - is a System V init script installed on your host filesystem, - that allows you to start up and shut down MyPLC with a single - command. On a Red Hat or Fedora host system, it is customary to - use the service command to invoke System V - init scripts: - - - Starting and stopping MyPLC. - - - - - Like all other registered System V init services, MyPLC is - started and shut down automatically when your host system boots - and powers off. You may disable automatic startup by invoking - the chkconfig command on a Red Hat or Fedora - host system: - - - Disabling automatic startup of MyPLC. - - - - - - /etc/sysconfig/plc: This - file is a shell script fragment that defines the variables - PLC_ROOT and PLC_DATA. By default, - the values of these variables are /plc/root - and /plc/data, respectively. If you wish, - you may move your MyPLC installation to another location on your - host filesystem and edit the values of these variables - appropriately, but you will break the RPM upgrade - process. PlanetLab Central cannot support any changes to this - file. - - /etc/planetlab: This - symlink to /plc/data/etc/planetlab is - installed on the host system for convenience. + + If your distribution supports RPM: + + + If your distribution does not support RPM: + -
    -
    - Quickstart + The below explains in + details the installation strategy and the miscellaneous files and + directories involved. + +
    - Once installed, start MyPLC (see ). MyPLC must be started as - root. Observe the output of this command for any failures. If no - failures occur, you should see output similar to the - following: +
    QuickStart - - A successful MyPLC startup. + On a Red Hat or Fedora host system, it is customary to use + the service command to invoke System V init + scripts. As the examples suggest, the service must be started as root: - + Starting MyPLC: + + + Stopping MyPLC: + - If /plc/root is mounted successfully, a - complete log file of the startup process may be found at - /plc/root/var/log/boot.log. Possible reasons - for failure of each step include: - - - Mounting PLC: If this step - fails, first ensure that you started MyPLC as root. Check - /etc/sysconfig/plc to ensure that - PLC_ROOT and PLC_DATA refer to the - right locations. You may also have too many existing loopback - mounts, or your kernel may not support loopback mounting, bind - mounting, or the ext3 filesystem. Try freeing at least one - loopback device, or re-compiling your kernel to support loopback - mounting, bind mounting, and the ext3 filesystem. If you see an - error similar to Permission denied while trying to open - /plc/root.img, then SELinux may be enabled. If you - installed MyPLC on Fedora Core 4 or 5, use the - Security Level Configuration utility - to configure SELinux to be - Permissive. - - Starting database server: If - this step fails, check - /plc/root/var/log/pgsql and - /plc/root/var/log/boot.log. The most common - reason for failure is that the default PostgreSQL port, TCP port - 5432, is already in use. Check that you are not running a - PostgreSQL server on the host system. + In , we provide greater + details that might be helpful in the case where the service does + not seem to take off correctly. - Starting web server: If this - step fails, check - /plc/root/var/log/httpd/error_log and - /plc/root/var/log/boot.log for obvious - errors. The most common reason for failure is that the default - web ports, TCP ports 80 and 443, are already in use. Check that - you are not running a web server on the host - system. + Like all other registered System V init services, MyPLC is + started and shut down automatically when your host system boots + and powers off. You may disable automatic startup by invoking the + chkconfig command on a Red Hat or Fedora host + system: - Bootstrapping the database: - If this step fails, it is likely that the previous step - (Starting web server) also failed. Another - reason that it could fail is if PLC_API_HOST (see - ) does not resolve to - the host on which the API server has been enabled. By default, - all services, including the API server, are enabled and run on - the same host, so check that PLC_API_HOST is - either localhost or resolves to a local IP - address. + Disabling automatic startup of MyPLC. + + Re-enabling automatic startup of MyPLC. + - Starting crond: If this step - fails, it is likely that the previous steps (Starting - web server and Bootstrapping the - database) also failed. If not, check - /plc/root/var/log/boot.log for obvious - errors. This step starts the cron service and - generates the initial set of XML files that the Slice Creation - Service uses to determine slice state. - - - If no failures occur, then MyPLC should be active with a - default configuration. Open a web browser on the host system and - visit http://localhost/, which should bring you - to the front page of your PLC installation. The password of the - default administrator account - root@localhost.localdomain (set by - PLC_ROOT_USER) is root (set by - PLC_ROOT_PASSWORD). +
    -
    +
    Changing the configuration After verifying that MyPLC is working correctly, shut it down and begin changing some of the default variable values. Shut down MyPLC with service plc stop - (see ). With a text - editor, open the file - /etc/planetlab/plc_config.xml. This file is - a self-documenting configuration file written in XML. Variables - are divided into categories. Variable identifiers must be - alphanumeric, plus underscore. A variable is referred to - canonically as the uppercase concatenation of its category - identifier, an underscore, and its variable identifier. Thus, a - variable with an id of + (see ). + + The preferred option for changing the configuration is to + use the plc-config-tty tool. This tools comes + with the root image, so you need to have it mounted first. The + full set of applicable variables is described in , but using the u + guides you to the most useful ones. Here is sample session: + + + Using plc-config-tty for configuration: + # plc-config-tty +Config file /etc/planetlab/configs/site.xml located under a non-existing directory +Want to create /etc/planetlab/configs [y]/n ? y +Created directory /etc/planetlab/configs +Enter command (u for usual changes, w to save, ? for help) u +== PLC_NAME : [PlanetLab Test] OneLab +== PLC_ROOT_USER : [root@localhost.localdomain] root@odie.inria.fr +== PLC_ROOT_PASSWORD : [root] plain-passwd +== PLC_MAIL_SUPPORT_ADDRESS : [root+support@localhost.localdomain] support@one-lab.org +== PLC_DB_HOST : [localhost.localdomain] odie.inria.fr +== PLC_API_HOST : [localhost.localdomain] odie.inria.fr +== PLC_WWW_HOST : [localhost.localdomain] odie.inria.fr +== PLC_BOOT_HOST : [localhost.localdomain] odie.inria.fr +== PLC_NET_DNS1 : [127.0.0.1] 138.96.250.248 +== PLC_NET_DNS2 : [None] 138.96.250.249 +Enter command (u for usual changes, w to save, ? for help) w +Wrote /etc/planetlab/configs/site.xml +Merged + /etc/planetlab/default_config.xml +and /etc/planetlab/configs/site.xml +into /etc/planetlab/plc_config.xml +You might want to type 'r' (restart plc) or 'q' (quit) +Enter command (u for usual changes, w to save, ? for help) r +==================== Stopping plc +... +==================== Starting plc +... +Enter command (u for usual changes, w to save, ? for help) q + # exit +# +]]> + + + If you used this method for configuring, you can skip to + the . As an alternative to using + plc-config-tty, you may also use a text + editor, but this requires some understanding on how the + configuration files are used within myplc. The + default configuration is stored in a file + named /etc/planetlab/default_config.xml, + that is designed to remain intact. You may store your local + changes in any file located in the configs/ + sub-directory, that are loaded on top of the defaults. Finally + the file /etc/planetlab/plc_config.xml is + loaded, and the resulting configuration is stored in the latter + file, that is used as a reference. + + Using a separate file for storing local changes only, as + plc-config-tty does, is not a workable option + with a text editor because it would involve tedious xml + re-assembling. So your local changes should go in + /etc/planetlab/plc_config.xml. Be warned + however that any change you might do this way could be lost if + you use plc-config-tty later on. + + This file is a self-documenting configuration file written + in XML. Variables are divided into categories. Variable + identifiers must be alphanumeric, plus underscore. A variable is + referred to canonically as the uppercase concatenation of its + category identifier, an underscore, and its variable + identifier. Thus, a variable with an id of slice_prefix in the plc category is referred to canonically as PLC_SLICE_PREFIX. @@ -379,13 +324,30 @@ PLC: Signing node packages: [ OK ] system. - After changing these variables, save the file, then - restart MyPLC with service plc start. You - should notice that the password of the default administrator - account is no longer root, and that the - default site name includes the name of your PLC installation - instead of PlanetLab. -
    + After changing these variables, + save the file, then restart MyPLC with service plc + start. You should notice that the password of the + default administrator account is no longer + root, and that the default site name includes + the name of your PLC installation instead of PlanetLab. As a + side effect of these changes, the ISO images for the boot CDs + now have new names, so that you can freely remove the ones names + after 'PlanetLab Test', which is the default value of + PLC_NAME +
    + +
    Login as a real user + + Now that myplc is up and running, you can connect to the + web site that by default runs on port 80. You can either + directly use the default administrator user that you configured + in PLC_ROOT_USER and + PLC_ROOT_PASSWORD, or create a real user through + the 'Joining' tab. Do not forget to select both PI and tech + roles, and to select the only site created at this stage. + Login as the administrator to enable this user, then login as + the real user. +
    Installing nodes @@ -481,9 +443,221 @@ ssh -i /etc/planetlab/root_ssh_key.rsa root@node \ vserver pl_conf exec service pl_conf restart]]>
    + +
    + Understanding the startup sequence + + During service startup described in , observe the output of this command for + any failures. If no failures occur, you should see output similar + to the following: + + + A successful MyPLC startup. + + + + + If /plc/root is mounted successfully, a + complete log file of the startup process may be found at + /plc/root/var/log/boot.log. Possible reasons + for failure of each step include: + + + Mounting PLC: If this step + fails, first ensure that you started MyPLC as root. Check + /etc/sysconfig/plc to ensure that + PLC_ROOT and PLC_DATA refer to the + right locations. You may also have too many existing loopback + mounts, or your kernel may not support loopback mounting, bind + mounting, or the ext3 filesystem. Try freeing at least one + loopback device, or re-compiling your kernel to support loopback + mounting, bind mounting, and the ext3 filesystem. If you see an + error similar to Permission denied while trying to open + /plc/root.img, then SELinux may be enabled. See above for details. + + Starting database server: If + this step fails, check + /plc/root/var/log/pgsql and + /plc/root/var/log/boot.log. The most common + reason for failure is that the default PostgreSQL port, TCP port + 5432, is already in use. Check that you are not running a + PostgreSQL server on the host system. + + Starting web server: If this + step fails, check + /plc/root/var/log/httpd/error_log and + /plc/root/var/log/boot.log for obvious + errors. The most common reason for failure is that the default + web ports, TCP ports 80 and 443, are already in use. Check that + you are not running a web server on the host + system. + + Bootstrapping the database: + If this step fails, it is likely that the previous step + (Starting web server) also failed. Another + reason that it could fail is if PLC_API_HOST (see + ) does not resolve to + the host on which the API server has been enabled. By default, + all services, including the API server, are enabled and run on + the same host, so check that PLC_API_HOST is + either localhost or resolves to a local IP + address. Also check that PLC_ROOT_USER looks like + an e-mail address. + + Starting crond: If this step + fails, it is likely that the previous steps (Starting + web server and Bootstrapping the + database) also failed. If not, check + /plc/root/var/log/boot.log for obvious + errors. This step starts the cron service and + generates the initial set of XML files that the Slice Creation + Service uses to determine slice state. + + + If no failures occur, then MyPLC should be active with a + default configuration. Open a web browser on the host system and + visit http://localhost/, which should bring you + to the front page of your PLC installation. The password of the + default administrator account + root@localhost.localdomain (set by + PLC_ROOT_USER) is root (set by + PLC_ROOT_PASSWORD). +
    + +
    Files and directories + involved in <emphasis>myplc</emphasis> + MyPLC installs the following files and directories: + + + + /plc/root.img: The main + root filesystem of the MyPLC application. This file is an + uncompressed ext3 filesystem that is loopback mounted on + /plc/root when MyPLC starts. This + filesystem, even when mounted, should be treated as an opaque + binary that can and will be replaced in its entirety by any + upgrade of MyPLC. + + /plc/root: The mount point + for /plc/root.img. Once the root filesystem + is mounted, all MyPLC services run in a + chroot jail based in this + directory. + + + /plc/data: The directory where user + data and generated files are stored. This directory is bind + mounted onto /plc/root/data so that it is + accessible as /data from within the + chroot jail. Files in this directory are + marked with %config(noreplace) in the + RPM. That is, during an upgrade of MyPLC, if a file has not + changed since the last installation or upgrade of MyPLC, it is + subject to upgrade and replacement. If the file has changed, + the new version of the file will be created with a + .rpmnew extension. Symlinks within the + MyPLC root filesystem ensure that the following directories + (relative to /plc/root) are stored + outside the MyPLC filesystem image: + + + /etc/planetlab: This + directory contains the configuration files, keys, and + certificates that define your MyPLC + installation. + + /var/lib/pgsql: This + directory contains PostgreSQL database + files. + + /var/www/html/alpina-logs: This + directory contains node installation logs. + + /var/www/html/boot: This + directory contains the Boot Manager, customized for your MyPLC + installation, and its data files. + + /var/www/html/download: This + directory contains Boot CD images, customized for your MyPLC + installation. + + /var/www/html/install-rpms: This + directory is where you should install node package updates, + if any. By default, nodes are installed from the tarball + located at + /var/www/html/boot/PlanetLab-Bootstrap.tar.bz2, + which is pre-built from the latest PlanetLab Central + sources, and installed as part of your MyPLC + installation. However, nodes will attempt to install any + newer RPMs located in + /var/www/html/install-rpms/planetlab, + after initial installation and periodically thereafter. You + must run yum-arch and + createrepo to update the + yum caches in this directory after + installing a new RPM. PlanetLab Central cannot support any + changes to this directory. + + /var/www/html/xml: This + directory contains various XML files that the Slice Creation + Service uses to determine the state of slices. These XML + files are refreshed periodically by cron + jobs running in the MyPLC root. + + /root: this is the + location of the root-user's homedir, and for your + convenience is stored under /data so + that your local customizations survive across + updates - this feature is inherited from the + myplc-devel package, where it is probably + more useful. + + + + + + /etc/init.d/plc: This file + is a System V init script installed on your host filesystem, + that allows you to start up and shut down MyPLC with a single + command, as described in . + + + /etc/sysconfig/plc: This + file is a shell script fragment that defines the variables + PLC_ROOT and PLC_DATA. By default, + the values of these variables are /plc/root + and /plc/data, respectively. If you wish, + you may move your MyPLC installation to another location on your + host filesystem and edit the values of these variables + appropriately, but you will break the RPM upgrade + process. PlanetLab Central cannot support any changes to this + file. + + /etc/planetlab: This + symlink to /plc/data/etc/planetlab is + installed on the host system for convenience. + +
    -
    +
    Rebuilding and customizing MyPLC The MyPLC package, though distributed as an RPM, is not a @@ -513,18 +687,34 @@ vserver pl_conf exec service pl_conf restart]]> chroot jail should not be modified directly, as they are subject to upgrade. - - Installing the MyPLC development environment. - - + If your distribution supports RPM: + + + If your distribution does not support RPM: + + +
    -# If your distribution does not support RPM -cd /tmp -wget http://build.planet-lab.org/build/myplc-0_4-rc2/RPMS/i386/myplc-devel-0.4-2.planetlab.i386.rpm -cd / -rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu]]> - +
    + Configuration + + The default configuration should work as-is on most + sites. Configuring the development package can be achieved in a + similar way as for myplc, as described in + . plc-config-tty supports a + -d option for supporting the + myplc-devel case, that can be useful in a + context where it would not guess it by itself. Refer to for a list of variables. +
    + +
    Files and directories + involved in <emphasis>myplc-devl</emphasis> The MyPLC development environment installs the following files and directories: @@ -576,8 +766,12 @@ rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu]]> for more information about executing builds. - - + + /root: this is the + location of the root-user's homedir, and for your + convenience is stored under /data so + that your local customizations survive across + updates. /etc/init.d/plc-devel: This file is @@ -627,12 +821,12 @@ rpm2cpio /tmp/myplc-devel-0.4-2.planetlab.i386.rpm | cpio -diu]]> Setting up a local Fedora Core 4 repository. - +# for repo in core/4/i386/os core/updates/4/i386 extras/4/i386 ; do +> wget -m -nH --cut-dirs=3 http://coblitz.planet-lab.org/pub/fedora/linux/$repo +> done]]> Change the repository URI and --cut-dirs @@ -655,6 +849,15 @@ done]]> repository, eases the task of rebuilding PlanetLab source code. + Before you try building MyPLC, you might check the + configuration, in a file named + plc_config.xml that relies on a very + similar model as MyPLC, located in + /etc/planetlab within the chroot jail, or + in /plc/devel/data/etc/planetlab from the + root context. The set of applicable variables is described in + . + To build MyPLC, or any PlanetLab source code module, from within the MyPLC development environment, execute the following commands as root: @@ -711,12 +914,14 @@ make -C $DATE]]> Because the CVS repository is not automatically upgraded, if you wish to keep your local repository synchronized with the public PlanetLab repository, it is highly recommended that you - use CVS's support for vendor - branches to track changes. Vendor branches ease the task - of merging upstream changes with your local modifications. To - import a new snapshot into your local repository (for example, - if you have just upgraded from + use CVS's support for vendor branches to track changes, as + described here + and here. + Vendor branches ease the task of merging upstream changes with + your local modifications. To import a new snapshot into your + local repository (for example, if you have just upgraded from myplc-devel-0.4-2 to myplc-devel-0.4-3 and you notice the new repository in /plc/devel/data/cvs-0.4-3), @@ -744,20 +949,19 @@ cvs -d /cvs rtag before-myplc-0_4-3-merge TMP=$(mktemp -d /data/export.XXXXXX) pushd $TMP cvs -d /data/cvs-0.4-3 export -r HEAD . -cvs -d /cvs import -m "PlanetLab sources from myplc-0.4-3" -ko -I ! . planetlab myplc-0_4-3 +cvs -d /cvs import -m "Merging myplc-0.4-3" -ko -I ! . planetlab myplc-0_4-3 popd rm -rf $TMP]]> - If there any merge conflicts, use the command suggested by - CVS to help the merge. Explaining how to fix merge conflicts is - beyond the scope of this document; consult the CVS documentation - for more information on how to use CVS. -
    -
    + If there are any merge conflicts, use the command + suggested by CVS to help the merge. Explaining how to fix merge + conflicts is beyond the scope of this document; consult the CVS + documentation for more information on how to use CVS. + - - Configuration variables + + Configuration variables (for <emphasis>myplc</emphasis>) Listed below is the set of standard configuration variables and their default values, defined in the template @@ -766,11 +970,27 @@ rm -rf $TMP]]> templates that should be placed in /etc/planetlab/configs/. - &Variables; + This information is available online within + plc-config-tty, e.g.: + +Advanced usage of plc-config-tty + # plc-config-tty +Enter command (u for usual changes, w to save, ? for help) V plc_dns +========== Category = PLC_DNS +### Enable DNS +# Enable the internal DNS server. The server does not provide reverse +# resolution and is not a production quality or scalable DNS solution. +# Use the internal DNS server only for small deployments or for testing. +PLC_DNS_ENABLED +]]> + + + List of the myplc configuration variables: + &Variables; - - Development environment configuration variables + + Development configuration variables (for <emphasis>myplc-devel</emphasis>) &DevelVariables; diff --git a/doc/plc_devel_variables.xml b/doc/plc_devel_variables.xml index 263aa3c..4dacd1c 100644 --- a/doc/plc_devel_variables.xml +++ b/doc/plc_devel_variables.xml @@ -29,7 +29,7 @@ Type: string - Default: file:///usr/share/mirrors/fedora + Default: file:///data/fedora Fedora Core mirror from which to install filesystems. diff --git a/guest.init b/guest.init index 5d14608..28e6836 100755 --- a/guest.init +++ b/guest.init @@ -6,7 +6,7 @@ # # description: Manages all PLC services on this machine # -# $Id: guest.init,v 1.19 2006/07/10 21:10:21 mlhuang Exp $ +# $Id: guest.init,v 1.20 2006/08/08 23:19:52 mlhuang Exp $ # # Source function library and configuration @@ -33,6 +33,8 @@ nsteps=${#steps[@]} # Regenerate configuration files reload () { + force=$1 + # Regenerate the main configuration file from default values # overlaid with site-specific and current values. files=( @@ -41,7 +43,7 @@ reload () /etc/planetlab/plc_config.xml ) for file in "${files[@]}" ; do - if [ $file -nt /etc/planetlab/plc_config.xml ] ; then + if [ -n "$force" -o $file -nt /etc/planetlab/plc_config.xml ] ; then tmp=$(mktemp /tmp/plc_config.xml.XXXXXX) plc-config --xml "${files[@]}" >$tmp if [ $? -eq 0 ] ; then @@ -56,10 +58,10 @@ reload () done # Convert configuration to various formats - if [ /etc/planetlab/plc_config.xml -nt /etc/planetlab/plc_config ] ; then + if [ -n "$force" -o /etc/planetlab/plc_config.xml -nt /etc/planetlab/plc_config ] ; then plc-config --shell >/etc/planetlab/plc_config fi - if [ /etc/planetlab/plc_config.xml -nt /etc/planetlab/php/plc_config.php ] ; then + if [ -n "$force" -o /etc/planetlab/plc_config.xml -nt /etc/planetlab/php/plc_config.php ] ; then mkdir -p /etc/planetlab/php plc-config --php >/etc/planetlab/php/plc_config.php fi @@ -117,7 +119,7 @@ command=$1 shift 1 if [ -z "$1" ] ; then # Start or stop everything. Regenerate configuration first. - reload + reload force else # Start or stop a particular step steps=("$@") @@ -166,6 +168,7 @@ case "$command" in ;; reload) + reload force ;; *) diff --git a/myplc.spec b/myplc.spec index bd82763..028f677 100644 --- a/myplc.spec +++ b/myplc.spec @@ -5,13 +5,18 @@ URL: http://cvs.planet-lab.org/cvs/myplc Summary: PlanetLab Central (PLC) Portable Installation Name: myplc -Version: 0.4 +Version: 0.5 Release: 2%{?pldistro:.%{pldistro}}%{?date:.%{date}} License: PlanetLab Group: Applications/Systems Source0: %{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +# for preventing myplc-devel from being built +# *should* support invokation like rpmbuild --define "build_level 0" +# *but* this does not seem to work : so just set to 0 here +%{!?build_level: %define build_devel 1} + %define debug_package %{nil} %description @@ -23,24 +28,33 @@ through a graphical interface. All PLC services are started up and shut down through a single System V init script installed in the host system. +%if %{build_devel} %package devel Summary: PlanetLab Central (PLC) Development Environment Group: Development/Tools AutoReqProv: no +%endif +%if %{build_devel} %description devel This package install a complete PlanetLab development environment contained within a chroot jail. The default installation consists of a local CVS repository bootstrapped with a snapshot of all PlanetLab source code, and all the tools necessary to compile it. +%endif %prep %setup -q %build pushd myplc +%if %{build_devel} +echo -n "XXXXXXXXXXXXXXX myplc::build_devel " ; date ./build_devel.sh %{?cvstag:-t %{cvstag}} +%endif +echo -n "XXXXXXXXXXXXXXX myplc::build " ; date ./build.sh %{?cvstag:-t %{cvstag}} +echo -n "XXXXXXXXXXXXXXX myplc::endbuild " ; date popd %install @@ -71,6 +85,8 @@ find data | cpio -p -d -u $RPM_BUILD_ROOT/plc/ # myplc-devel # +%if %{build_devel} + # Install host startup script and configuration file install -D -m 755 host.init $RPM_BUILD_ROOT/%{_sysconfdir}/init.d/plc-devel install -D -m 644 plc-devel.sysconfig $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/plc-devel @@ -86,6 +102,8 @@ find devel/data | cpio -p -d -u $RPM_BUILD_ROOT/plc/ # unique name. A hard-linked copy is made in %post. mv $RPM_BUILD_ROOT/plc/devel/data/{cvs,cvs-%{version}-%{release}} +%endif + popd %clean @@ -124,9 +142,15 @@ fi # 0 = install, 1 = upgrade if [ $1 -gt 0 ] ; then for dir in /var/lib/pgsql/data /etc/planetlab ; do - if [ -d /plc/data/$dir -a ! -d /plc/data/$dir.rpmsave ] ; then + if [ -d /plc/data/$dir ] ; then echo "Preserving /plc/data/$dir" - cp -ra /plc/data/$dir{,.rpmsave} + mkdir -p /plc/data/$dir.rpmsave + tar -C /plc/data/$dir -cpf - . | \ + tar -C /plc/data/$dir.rpmsave -xpf - + + # Except for the default configuration file and DTD, which + # really should be considered for upgrade. + rm -f /plc/data/$dir.rpmsave/{default_config.xml,plc_config.dtd} fi done fi @@ -137,19 +161,6 @@ if [ -x /sbin/chkconfig ] ; then /sbin/chkconfig plc on fi -for dir in /var/lib/pgsql/data /etc/planetlab ; do - if [ -d /plc/data/$dir.rpmsave -a -d /plc/data/$dir ] ; then - echo "Merging /plc/data/$dir" - if tar -C /plc/data/$dir.rpmsave -cpf - . | \ - tar -C /plc/data/$dir -xpf - ; then - rm -rf /plc/data/$dir.rpmsave - fi - fi -done - -# Force a regeneration to take into account new variables -touch /plc/data/etc/planetlab/default_config.xml - %triggerpostun -- %{name} # 0 = erase, 1 = upgrade if [ $1 -gt 0 ] ; then @@ -174,11 +185,14 @@ if [ $1 -eq 0 ] ; then fi fi +%if %{build_devel} %pre devel if [ -x %{_sysconfdir}/init.d/plc-devel ] ; then %{_sysconfdir}/init.d/plc-devel stop fi +%endif +%if %{build_devel} %post devel if [ -x /sbin/chkconfig ] ; then /sbin/chkconfig --add plc-devel @@ -190,7 +204,9 @@ fi if [ ! -d /plc/devel/data/cvs ] ; then cp -rl /plc/devel/data/{cvs-%{version}-%{release},cvs} fi +%endif +%if %{build_devel} %preun devel # 0 = erase, 1 = upgrade if [ $1 -eq 0 ] ; then @@ -200,6 +216,7 @@ if [ $1 -eq 0 ] ; then /sbin/chkconfig --del plc-devel fi fi +%endif %files %defattr(-,root,root,-) @@ -218,6 +235,7 @@ fi %dir /plc/data %config(noreplace) /plc/data/* +%if %{build_devel} %files devel %defattr(-,root,root,-) # Host startup script and configuration file @@ -231,8 +249,13 @@ fi # Data directory %dir /plc/devel/data %config(noreplace) /plc/devel/data/* +%endif %changelog +* Wed Aug 09 2006 Thierry Parmentelat +- introduces variable %{build_devel} to allow custom sites to skip building + the myplc-devel package. + * Thu Jul 13 2006 Mark Huang - 0.4-2, 0.5-2 - MyPLC 0.4 RC2. - Fix many spec files (License replaces Copyright). diff --git a/plc-config-tty b/plc-config-tty index 95c1e2f..6a2c873 100755 --- a/plc-config-tty +++ b/plc-config-tty @@ -2,40 +2,95 @@ # Interactively prompts for variable values # expected arguments are -# command [default-xml [custom-xml]] +# command -d [default-xml [custom-xml [ consolidated-xml ]]] # -# Two-steps logic: -# (1) scans all variables (todo: pass categories as arguments) -# and prompts for value -# current value proposed as default -# also allows to remove site-dependent setting -# (2) epilogue : allows to -# list the site-dependent vars with values -# and to locally (re-)edit a variable from its shell name -# quit with or without saving +# -d is for the myplc-devel package + +# we use 3 instances of PLCConfiguration throughout: +# cdef : models the defaults, from plc_default.xml +# cread : merged from plc_default & configs/site.xml +# cwrite : site.xml + pending changes import sys import os import re import readline +import getopt from plc_config import PLCConfiguration #################### -release = "$Id" +release_id = "$Id: plc-config-tty,v 1.8 2006/08/18 14:34:45 thierry Exp $" +release_rev = "$Revision: 1.8 $" + +def init_flavour (flavour): + global service + global common_variables + if (flavour == "devel"): + service="plc-devel" + common_variables=("PLC_DEVEL_FEDORA_URL", + "PLC_DEVEL_CVSROOT") + config_dir = "/plc/devel/data/etc/planetlab" + else: + service="plc" + common_variables=("PLC_NAME", + "PLC_ROOT_USER", + "PLC_ROOT_PASSWORD", + "PLC_MAIL_SUPPORT_ADDRESS", + "PLC_DB_HOST", + "PLC_API_HOST", + "PLC_WWW_HOST", + "PLC_BOOT_HOST", + "PLC_NET_DNS1", + "PLC_NET_DNS2") + config_dir = "/etc/planetlab" + global def_default_config + def_default_config= "%s/default_config.xml" % config_dir + global def_site_config + def_site_config = "%s/configs/site.xml" % config_dir + global def_consolidated_config + def_consolidated_config = "%s/plc_config.xml" % config_dir -def_main_config = "/etc/planetlab/default_config.xml" -def_site_config = "/etc/planetlab/configs/site.xml" -def_consolidated_config = "/etc/planetlab/plc_config.xml" + global mainloop_usage + mainloop_usage= """Available commands: + Uppercase versions give variables comments, when available +-u/U\t\t\tEdit usual variables +-w\t\t\tWrite & consolidate +-r\t\t\tRestart %s service +-q\t\t\tQuit (without saving) +-h/?\t\t\tThis help +--- +l/L [|]\tShow Locally modified variables/values +-s/S [|]\tShow variables/values (all, in category, single) +-e/E [|]\tEdit variables (all, in category, single) +--- +-c\t\t\tList categories +-v/V [|]List Variables (all, in category, single) +--- +Typical usage involves: u, [l,] w, r, q +""" % service -command_usage="""Usage: %s [default-xml [site-xml [consolidated-xml]]] +def usage (): + command_usage="Usage: %s [-d] [-v] [default-xml [site-xml [consolidated-xml]]]"% sys.argv[0] + init_flavour ("boot") + command_usage +=""" + -v shows version and exits +\t default-xml defaults to %s +\t site-xml defaults to %s +\t consolidated-xml defaults to %s""" % (def_default_config,def_site_config, def_consolidated_config) + command_usage += """ + Unless you specify the -d option, meaning you want to configure + myplc-devel instead of regular myplc, in which case""" + init_flavour ("devel") + command_usage +=""" \t default-xml defaults to %s \t site-xml defaults to %s -\t consolidated-xml defaults to %s -""" % (sys.argv[0],def_main_config,def_site_config, def_consolidated_config) +\t consolidated-xml defaults to %s""" % (def_default_config,def_site_config, def_consolidated_config) + print(command_usage) + sys.exit(1) #################### -variable_usage= """Special answers : +variable_usage= """Edit Commands : #\tShow variable comments .\tStops prompting, return to mainloop /\tCleans any site-defined value, reverts to default @@ -44,21 +99,97 @@ variable_usage= """Special answers : ?\tThis help """ -def usage (): - print(command_usage) - sys.exit(1) - #################### def get_value (config, category_id, variable_id): (category, variable) = config.get (category_id, variable_id) return variable['value'] +def get_current_value (cread, cwrite, category_id, variable_id): + # the value stored in cwrite, if present, is the one we want + try: + result=get_value (cwrite,category_id,variable_id) + except: + result=get_value (cread,category_id,variable_id) + return result + # refrain from using plc_config's _sanitize def get_varname (config, category_id, variable_id): (category, variable) = config.get (category_id, variable_id) return (category_id+"_"+variable['id']).upper() -def prompt_variable (cdef, cread, cwrite, category, variable): +# could not avoid using _sanitize here.. +def get_name_comments (config, cid, vid): + try: + (category, variable) = config.get (cid, vid) + (id, name, value, comments) = config._sanitize_variable (cid,variable) + return (name,comments) + except: + return (None,[]) + +def print_name_comments (config, cid, vid): + (name,comments)=get_name_comments(config,cid,vid) + if name: + print "### %s" % name + if comments: + for line in comments: + print "# %s" % line + else: + print "!!! No comment associated to %s_%s" % (cid,vid) + +#################### +def list_categories (config): + result=[] + for (category_id, (category, variables)) in config.variables().iteritems(): + result += [category_id] + return result + +def print_categories (config): + print "Known categories" + for cid in list_categories(config): + print "%s" % (cid.upper()) + +#################### +def list_category (config, cid): + result=[] + for (category_id, (category, variables)) in config.variables().iteritems(): + if (cid == category_id): + for variable in variables.values(): + result += ["%s_%s" %(cid,variable['id'])] + return result + +def print_category (config, cid, show_comments=True): + cid=cid.lower() + CID=cid.upper() + vids=list_category(config,cid) + if (len(vids) == 0): + print "%s : no such category"%CID + else: + print "Category %s contains" %(CID) + for vid in vids: + print vid.upper() + +#################### +def consolidate (default_config, site_config, consolidated_config): + try: + conso = PLCConfiguration (default_config) + conso.load (site_config) + conso.save (consolidated_config) + except Exception, inst: + print "Could not consolidate, %s" % (str(inst)) + return + print ("Merged\n\t%s\nand\t%s\ninto\t%s"%(default_config,site_config, + consolidated_config)) + +#################### +def restart_plc (): + print ("==================== Stopping %s" % service) + os.system("service %s stop" % service) + print ("==================== Starting %s" % service) + os.system("service %s start" % service) + +#################### +def prompt_variable (cdef, cread, cwrite, category, variable, + show_comments, support_next=False): assert category.has_key('id') assert variable.has_key('id') @@ -68,9 +199,11 @@ def prompt_variable (cdef, cread, cwrite, category, variable): while True: default_value = get_value(cdef,category_id,variable_id) - current_value = get_value(cread,category_id, variable_id) + current_value = get_current_value(cread,cwrite,category_id, variable_id) varname = get_varname (cread,category_id, variable_id) + if show_comments : + print_name_comments (cdef, category_id, variable_id) prompt = "== %s : [%s] " % (varname,current_value) try: answer = raw_input(prompt).strip() @@ -82,16 +215,8 @@ def prompt_variable (cdef, cread, cwrite, category, variable): return None elif (answer == "."): raise Exception ('BailOut') - elif (answer == ">"): - raise Exception ('NextCategory') elif (answer == "#"): - if friendly_name is not None: - print ("### " + friendly_name) - if comments == None: - print ("!!! No comment associated to %s" % varname) - else: - for line in comments: - print ("# " + line) + print_name_comments(cread,category_id,variable_id) elif (answer == "?"): print variable_usage.strip() elif (answer == "="): @@ -101,19 +226,24 @@ def prompt_variable (cdef, cread, cwrite, category, variable): cwrite.delete(category_id,variable_id) print ("%s reverted to %s" %(varname,default_value)) return + elif (answer == ">"): + if support_next: + raise Exception ('NextCategory') + else: + print "No support for next category" else: variable['value'] = answer cwrite.set(category,variable) return -#################### -def prompt_all_variables (cdef, cread, cwrite): +def prompt_variables_all (cdef, cread, cwrite, show_comments): try: for (category_id, (category, variables)) in cread.variables().iteritems(): - print ("========== Category = %s" % category_id) + print ("========== Category = %s" % category_id.upper()) for variable in variables.values(): try: - newvar = prompt_variable (cdef, cread, cwrite, category, variable) + newvar = prompt_variable (cdef, cread, cwrite, category, variable, + show_comments, True) except Exception, inst: if (str(inst) == 'NextCategory'): break else: raise @@ -122,137 +252,214 @@ def prompt_all_variables (cdef, cread, cwrite): if (str(inst) == 'BailOut'): return else: raise - -#################### -def consolidate (main_config, site_config, consolidated_config): +def prompt_variables_category (cdef, cread, cwrite, cid, show_comments): + cid=cid.lower() + CID=cid.upper() try: - conso = PLCConfiguration (main_config) - conso.load (site_config) - conso.save (consolidated_config) + print ("========== Category = %s" % CID) + for vid in list_category(cdef,cid): + (category,variable) = cdef.locate_varname(vid.upper()) + newvar = prompt_variable (cdef, cread, cwrite, category, variable, + show_comments, False) except Exception, inst: - print "Could not consolidate, %s" % (str(inst)) - return - print ("Merged\n\t%s\nand\t%s\ninto\t%s"%(main_config,site_config,consolidated_config)) - + if (str(inst) == 'BailOut'): return + else: raise + #################### -def restart_plc (): - print ("==================== Stopping plc") - os.system("service plc stop") - print ("==================== Starting plc") - os.system("service plc start") +def show_variable (cdef, cread, cwrite, + category, variable,show_value,show_comments): + assert category.has_key('id') + assert variable.has_key('id') + + category_id = category ['id'] + variable_id = variable['id'] + + default_value = get_value(cdef,category_id,variable_id) + current_value = get_current_value(cread,cwrite,category_id,variable_id) + varname = get_varname (cread,category_id, variable_id) + if show_comments : + print_name_comments (cdef, category_id, variable_id) + if show_value: + print "%s = %s" % (varname,current_value) + else: + print "%s" % (varname) + +def show_variables_all (cdef, cread, cwrite, show_value, show_comments): + for (category_id, (category, variables)) in cread.variables().iteritems(): + print ("========== Category = %s" % category_id.upper()) + for variable in variables.values(): + show_variable (cdef, cread, cwrite, + category, variable,show_value,show_comments) + +def show_variables_category (cdef, cread, cwrite, cid, show_value,show_comments): + cid=cid.lower() + CID=cid.upper() + print ("========== Category = %s" % CID) + for vid in list_category(cdef,cid): + (category,variable) = cdef.locate_varname(vid.upper()) + show_variable (cdef, cread, cwrite, category, variable, + show_value,show_comments) #################### -mainloop_usage= """Available commands -c\tEdits commonly tuned variables -e\tEdits all variables -p\tPrints all locally-customized vars and values -e \tPrompts (edit) fro variable -p \tShows current setting for -l\tlists all known variables -w\tsaves & consolidates -r\trestarts plc service -q\tQuits without saving ---- -Typical usage involves: c, [p,] w, r -""" +re_mainloop_0arg="^(?P[uUwrqlLsSeEcvVhH\?])[ \t]*$" +re_mainloop_1arg="^(?P[sSeEvV])[ \t]+(?P\w+)$" +matcher_mainloop_0arg=re.compile(re_mainloop_0arg) +matcher_mainloop_1arg=re.compile(re_mainloop_1arg) -re_mainloop_var="^(?P[pe])[ \t]+(?P\w+)$" -matcher_mainloop_var=re.compile(re_mainloop_var) - -common_variables=("PLC_NAME", - "PLC_ROOT_USER", - "PLC_ROOT_PASSWORD", - "PLC_MAIL_SUPPORT_ADDRESS", - "PLC_DB_HOST", - "PLC_API_HOST", - "PLC_WWW_HOST", - "PLC_BOOT_HOST", - "PLC_NET_DNS1", - "PLC_NET_DNS2") - -def mainloop (cdef, cread, cwrite,main_config, site_config, consolidated_config): +def mainloop (cdef, cread, cwrite, default_config, site_config, consolidated_config): while True: try: - answer = raw_input("Enter command (c for usual changes, w to save, ? for help) ").strip() + answer = raw_input("Enter command (u for usual changes, w to save, ? for help) ").strip() except EOFError: answer ="" - answer=answer.lower() - if (answer == "") or (answer == "?") or (answer == "h"): + if (answer == "") or (answer in "?hH"): print mainloop_usage - elif (answer == "q"): + continue + groups_parse = matcher_mainloop_0arg.match(answer) + command=None + if (groups_parse): + command = groups_parse.group('command') + arg=None + else: + groups_parse = matcher_mainloop_1arg.match(answer) + if (groups_parse): + command = groups_parse.group('command') + arg=groups_parse.group('arg') + if not command: + print ("Unknown command >%s< -- use h for help" % answer) + continue + + show_comments=command.isupper() + command=command.lower() + + mode='ALL' + if arg: + mode=None + arg=arg.lower() + variables=list_category (cdef,arg) + if len(variables): + # category_id as the category name + # variables as the list of variable names + mode='CATEGORY' + category_id=arg + arg=arg.upper() + (category,variable)=cdef.locate_varname(arg) + if variable: + # category/variable as output by locate_varname + mode='VARIABLE' + if not mode: + print "%s: no such category or variable" % arg + continue + + if (command in "qQ"): # todo check confirmation return - elif (answer == "e"): - prompt_all_variables(cdef, cread, cwrite) - elif (answer == "w"): + elif (command in "wW"): try: cwrite.save(site_config) except: print ("Could not save -- fix write access on %s" % site_config) break print ("Wrote %s" % site_config) - consolidate(main_config, site_config, consolidated_config) + consolidate(default_config, site_config, consolidated_config) print ("You might want to type 'r' (restart plc) or 'q' (quit)") - elif (answer == "l"): - print ("Config involves the following variables") - sys.stdout.write(cread.output_variables()) - elif (answer == "p"): - print ("Current site config") - sys.stdout.write(cwrite.output_shell(False)) - elif (answer == "c"): + elif (command == "u"): try: for varname in common_variables: (category,variable) = cdef.locate_varname(varname) - prompt_variable(cdef, cread, cwrite, category, variable) + prompt_variable(cdef, cread, cwrite, category, variable, False) except Exception, inst: if (str(inst) != 'BailOut'): raise - elif (answer == "r"): + elif (command == "r"): restart_plc() + elif (command == "c"): + print_categories(cread) + elif (command in "eE"): + if mode == 'ALL': + prompt_variables_all(cdef, cread, cwrite,show_comments) + elif mode == 'CATEGORY': + prompt_variables_category(cdef,cread,cwrite,category_id,show_comments) + elif mode == 'VARIABLE': + try: + prompt_variable (cdef,cread,cwrite,category,variable, + show_comments,False) + except Exception, inst: + if (str(inst) != 'BailOut'): + raise + elif (command in "vVsSlL"): + show_value=(command in "sSlL") + (c1,c2,c3) = (cdef, cread, cwrite) + if (command in "lL"): + (c1,c2,c3) = (cwrite,cwrite,cwrite) + if mode == 'ALL': + show_variables_all(c1,c2,c3,show_value,show_comments) + elif mode == 'CATEGORY': + show_variables_category(c1,c2,c3,category_id,show_value,show_comments) + elif mode == 'VARIABLE': + show_variable (c1,c2,c3,category,variable,show_value,show_comments) + else: + print ("Unknown command >%s< -- use h for help" % answer) + +#################### +def check_dir (config_file): + dirname = os.path.dirname (config_file) + if (not os.path.exists (dirname)): + print "Config file %s located under a non-existing directory" % config_file + answer=raw_input("Want to create %s [y]/n ? " % dirname) + answer = answer.lower() + if (answer == 'n'): + print "Cannot proceed - good bye" + sys.exit(1) else: - groups_var = matcher_mainloop_var.match(answer) - if (groups_var): - command = groups_var.group('command') - varname = groups_var.group('varname') - (category,variable) = cdef.locate_varname(varname) - if not category: - print "Unknown variable %s" % varname - elif (command == 'p'): - print ("%s = %s" % (varname,get_value(cwrite, - category['id'], - variable['id']))) - else: - try: - prompt_variable(cdef, cread, cwrite, category,variable) - except Exception, inst: - if (str(inst) != 'BailOut'): - raise + os.makedirs(dirname,0755) + if (not os.path.exists (dirname)): + print "Cannot create dir %s - exiting" % dirname + sys.exit(1) else: - print ("Unknown command >%s<" % answer) + print "Created directory %s" % dirname + #################### def main (): - save = True command=sys.argv[0] argv = sys.argv[1:] + + save = True + # default is myplc (non -devel) unless -d is specified + init_flavour("boot") + optlist,list = getopt.getopt(argv,":dhv") + for opt in optlist: + if opt[0] == "-h": + usage() + if opt[0] == "-v": + print ("This is %s - %s" %(command,release_rev)) + sys.exit(1) + if opt[0] == "-d": + init_flavour("devel") + argv=argv[1:] + if len(argv) == 0: - (main_config,site_config,consolidated_config) = (def_main_config, def_site_config, def_consolidated_config) + (default_config,site_config,consolidated_config) = (def_default_config, def_site_config, def_consolidated_config) elif len(argv) == 1: - (main_config,site_config,consolidated_config) = (argv[1], def_site_config, def_consolidated_config) + (default_config,site_config,consolidated_config) = (argv[0], def_site_config, def_consolidated_config) elif len(argv) == 2: - (main_config, site_config,consolidated_config) = (argv[1], argv[2], def_consolidated_config) + (default_config, site_config,consolidated_config) = (argv[0], argv[1], def_consolidated_config) elif len(argv) == 3: - (main_config, site_config,consolidated_config) = argv + (default_config, site_config,consolidated_config) = argv else: usage() + for c in (default_config,site_config,consolidated_config): + check_dir (c) + try: # the default settings only - read only - cdef = PLCConfiguration(main_config) + cdef = PLCConfiguration(default_config) # in effect : default settings + local settings - read only - cread = PLCConfiguration(main_config) + cread = PLCConfiguration(default_config) except: print ("default config files not found, is myplc installed ?") @@ -267,8 +474,7 @@ def main (): except: cwrite = PLCConfiguration() - print ("This is %s - %s -- Type ? at the prompt for help" %(command,release)) - mainloop (cdef, cread, cwrite,main_config, site_config, consolidated_config) + mainloop (cdef, cread, cwrite,default_config, site_config, consolidated_config) return 0 if __name__ == '__main__': diff --git a/plc_devel_config.xml b/plc_devel_config.xml index 5c14e3a..bf24a3f 100644 --- a/plc_devel_config.xml +++ b/plc_devel_config.xml @@ -6,7 +6,7 @@ Default PLC build environment configuration file Mark Huang Copyright (C) 2006 The Trustees of Princeton University -$Id: plc_devel_config.xml,v 1.2 2006/07/18 17:37:53 mlhuang Exp $ +$Id: plc_devel_config.xml,v 1.3 2006/08/16 01:27:16 mlhuang Exp $ --> @@ -37,7 +37,7 @@ $Id: plc_devel_config.xml,v 1.2 2006/07/18 17:37:53 mlhuang Exp $ Fedora Core Mirror URL - file:///usr/share/mirrors/fedora + file:///data/fedora Fedora Core mirror from which to install filesystems.