CVS User Account cvsuser
Tue Apr 11 13:22:33 PDT 2006
Log Message:
-----------
Added two new management scripts:

- mkslonconf.sh - which rummages thru a Slony-I cluster and generates
  a slon-compatible .conf file for each node

- launch_clusters.sh - which will invoke slons based on the above
  created .conf files

Documentation included...

Modified Files:
--------------
    slony1-engine/doc/adminguide:
        adminscripts.sgml (r1.30 -> r1.31)

Added Files:
-----------
    slony1-engine/tools:
        launch_clusters.sh (r1.1)
        mkslonconf.sh (r1.1)

-------------- next part --------------
--- /dev/null
+++ tools/launch_clusters.sh
@@ -0,0 +1,94 @@
+#!/bin/sh
+# $Id: launch_clusters.sh,v 1.1 2006/04/11 20:22:28 cbbrowne Exp $
+# Cluster starter
+
+# This script should be run periodically to search for slon
+# "node[n].conf" configuration files, and make sure corresponding slon
+# daemons are running.
+
+# Environment variables needed:
+
+# PATH=/opt/dbs/pgsql74/bin:$PATH
+#
+#   The PATH must include, preferably at the beginning, the path to
+#   the slon binaries that are to be run
+#
+#SLHOME=/var/spool/slony1
+#   This indicates the "home" directory for slon configuration files
+#
+#LOGHOME=/var/log/slony1
+#   This indicates the "home" directory for slon log files
+#
+#   This indicates a list of clusters that are to be managed
+#
+# There are further expectations of directory structures under $SLHOME
+# and $LOGHOME, as follows:
+#
+# Under $SLHOME, there is a structure of "conf" files in the directory
+#   $SLHOME/$cluster/conf
+# with names like node1.conf, node2.conf, and so forth
+#
+# Under $SLHOME, there is also a directory for storing PID files thus
+# $SLHOME/$cluster/pid/node1.pid, $SLHOME/$cluster/pid/node2.pid, and
+# so forth
+#
+# Under $LOGHOME, logs will be appended, for each cluster, and each
+# node, into $LOGHOME/$cluster/node$nodenum/$cluster-node$nodenum.log
+#CLUSTERS="oxrsorg oxrslive oxrsaero oxrsin oxrsflex2 oxrsmobi"
+
+WATCHLOG="$LOGHOME/watcher.log"
+
+# This function provides a shorthand to log messages to
+log_action () {
+    LOGVALUE=$1
+    NOW=`date`
+    echo "$NOW $LOGVALUE" >> $WATCHLOG
+}
+ 
+invoke_slon () {
+    LOGHOME=$1
+    NODENUM=$2
+    CLUSTER=$3
+    SLONCONF=$4
+    log_action "run slon - slon -f $SLONCONF >> $LOGHOME/node${NODENUM}/${CLUSTER}-node${NODENUM}.log"
+    mkdir -p $LOGHOME/node${NODENUM}
+    slon -f $SLONCONF >> $LOGHOME/node${NODENUM}/${CLUSTER}-node${NODENUM}.log &
+}
+
+start_slon_if_needed () {
+    CONFIGPATH=$1
+    NODENUM=$2
+    LOGHOME=$3
+
+    if [[ -e $CONFIGPATH/conf/node${NODENUM}.conf ]] ; then
+	SLONCONF="$CONFIGPATH/conf/node${NODENUM}.conf"
+	SLONPIDFILE=`grep "^ *pid_file=" $SLONCONF | cut -d "=" -f 2 | cut -d "#" -f 1 | cut -d " " -f 1`
+	CLUSTER=`grep "^ *cluster_name=" $SLONCONF | cut -d "=" -f 2 | cut -d "'" -f 2`
+    else
+	log_action "Could not find node configuration in $CONFIGPATH/conf/node${NODENUM}.conf"
+	return
+    fi
+    if [[ -e $SLONPIDFILE ]] ; then
+	SLONPID=`cat $SLONPIDFILE`
+	FINDIT=`ps auxww $SLONPID | grep slon`
+	if [[ -z $FINDIT ]]; then
+        # Need to restart slon
+	    log_action "slon died for config $CONFIGPATH/conf/node${NODENUM}.conf"
+	    invoke_slon $LOGHOME $NODENUM $CLUSTER $SLONCONF
+	else
+	    echo "Slon already running - $SLONPID"
+	fi
+    else
+	invoke_slon $LOGHOME $NODENUM $CLUSTER $SLONCONF
+    fi
+}
+
+for cluster in `echo $CLUSTERS`; do
+    for conffile in `find $SLHOME/$cluster/conf -name "node[0-9]*.conf"`; do
+	# 1. Use sed to chop off all the path info
+        # 2. First cut to chop off up to "node"
+        # 3. Second cut to take off the ".conf" part
+        nodenum=`echo $conffile | sed 's/.*\///g' | cut -d "e" -f 2 | cut -d "." -f 1`
+	start_slon_if_needed $SLHOME/$cluster $nodenum $LOGHOME
+    done
+done
--- /dev/null
+++ tools/mkslonconf.sh
@@ -0,0 +1,197 @@
+#!/bin/sh
+# $Id: mkslonconf.sh,v 1.1 2006/04/11 20:22:28 cbbrowne Exp $
+# mkslonconf.sh
+# Build a set of slon.conf files compatible with launch_clusters.sh
+
+# Start with: 
+
+# a) Environment set up with libpq-compatible parameters to connect to
+#    a database
+# b) SLONYCLUSTER set to the appropriate cluster name
+# c) MKDESTINATION being a directory for conf/pid files to live in
+# d) LOGHOME being a directory for log directories to live in
+# SLONYCLUSTER=oxrsaero
+# MKDESTINATION=/tmp
+# LOGHOME=/tmp/logs
+
+echo "building slon config files in ${MKDESTINATION}/${SLONYCLUSTER}"
+mkdir -p ${MKDESTINATION}/${SLONYCLUSTER}
+mkdir -p ${MKDESTINATION}/${SLONYCLUSTER}/conf
+mkdir -p ${MKDESTINATION}/${SLONYCLUSTER}/pid
+echo "Make sure ${MKDESTINATION}/${SLONYCLUSTER}/conf, ${MKDESTINATION}/${SLONYCLUSTER}/pid exist"
+
+query="select pa_server, min(pa_conninfo) from \"_${SLONYCLUSTER}\".sl_path group by pa_server;"
+#echo "Query: $query"
+queryoutput="/tmp/slonconf.query.$$"
+psql --no-align --command "$query" --field-separator "|" --quiet --tuples-only > $queryoutput
+exec 3< $queryoutput
+
+create_conf_file ()
+{
+   node=$1
+   dsn=$2
+   conffile="${MKDESTINATION}/${SLONYCLUSTER}/conf/node${node}.conf"
+cat <<_EOF_ > $conffile
+# Sets how many cleanup cycles to run before a vacuum is done.
+# Range: [0,100], default: 3
+#vac_frequency=3
+
+# Debug log level (higher value ==> more output).  Range: [0,4], default 4
+#log_level=2
+
+# Check for updates at least this often in milliseconds.
+# Range: [10-60000], default 100
+#sync_interval=1000
+
+# Maximum amount of time in milliseconds before issuing a SYNC event, 
+# This prevents a possible race condition in which the action sequence 
+# is bumped by the trigger while inserting the log row, which makes 
+# this bump is immediately visible to the sync thread, but 
+# the resulting log rows are not visible yet.  If the sync is picked 
+# up by the subscriber, processed and finished before the transaction 
+# commits, this transaction's changes will not be replicated until the 
+# next SYNC.  But if all application activity suddenly stops, 
+# there will be no more sequence bumps, so the high frequent -s check 
+# won't detect that.  Thus, the need for sync_interval_timeout.
+# Range: [0-120000], default 1000
+#sync_interval_timeout=10000
+
+# Maximum number of SYNC events to group together when/if a subscriber
+# falls behind.  SYNCs are batched only if there are that many available 
+# and if they are contiguous. Every other event type in between leads to 
+# a smaller batch.  And if there is only one SYNC available, even -g60 
+# will apply just that one. As soon as a subscriber catches up, it will 
+# apply every single SYNC by itself.
+# Range:  [0,100], default: 6
+#sync_group_maxsize=6
+
+# Size above which an sl_log_? row's log_cmddata is considered large.
+# Up to 500 rows of this size are allowed in memory at once. Rows larger
+# than that count into the sync_max_largemem space allocated and free'd
+# on demand.
+# Range:  [1024,32768], default: 8192
+#sync_max_rowsize=8192
+
+# Maximum amount of memory allowed for large rows. Note that the algorithm
+# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This
+# is done to ensure that a single row exceeding this limit alone does not
+# stall replication.
+# Range:  [1048576,1073741824], default: 5242880
+#sync_max_largemem=5242880
+
+# If this parameter is 1, messages go both to syslog and the standard 
+# output. A value of 2 sends output only to syslog (some messages will 
+# still go to the standard output/error).  The default is 0, which means 
+# syslog is off.  
+# Range:  [0-2], default: 0
+#syslog=0
+
+# If true, include the process ID on each log line.  Default is false.
+#log_pid=false
+
+# If true, include the timestamp on each log line.  Default is true.
+#log_timestamp=true
+
+# A strftime()-conformant format string for use with log timestamps.
+# Default is '%Y-%m-%d %H:%M:%S %Z'
+#log_timestamp_format='%Y-%m-%d %H:%M:%S %Z'
+
+# Where to write the pid file.  Default:  no pid file
+pid_file='${MKDESTINATION}/${SLONYCLUSTER}/pid/node${node}.pid'
+
+# Sets the syslog "facility" to be used when syslog enabled.  Valid 
+# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7.
+#syslog_facility=LOCAL0
+
+# Sets the program name used to identify slon messages in syslog.
+#syslog_ident=slon
+
+# Set the cluster name that this instance of slon is running against
+# default is to read it off the command line
+cluster_name='${SLONYCLUSTER}'
+
+# Set slon's connection info, default is to read it off the command line
+conn_info='${dsn}'
+
+# maximum time planned for grouped SYNCs
+# If replication is behind, slon will try to increase numbers of
+# syncs done targetting that they should take this quantity of
+# time to process. in ms
+# Range [10000,600000], default 60000. 
+desired_sync_time=60000
+
+# Execute the following SQL on each node at slon connect time
+# useful to set logging levels, or to tune the planner/memory
+# settings.  You can specify multiple statements by seperating
+# them with a ;
+#sql_on_connection="SET log_min_duration_statement TO '1000';"
+
+# Command to run upon committing a log archive.
+# This command is passed one parameter, namely the full pathname of
+# the archive file
+#command_on_logarchive=""
+
+# A PostgreSQL value compatible with ::interval which indicates how
+# far behind this node should lag its providers.
+# lag_interval=""
+
+# Directory in which to stow sync archive files
+# archive_dir=""
+
+_EOF_
+mkdir -p $LOGHOME/$SLONYCLUSTER/node${node}
+
+echo " For node ${node}, created conf file $conffile"
+echo " as well as log directory $LOGHOME/$SLONYCLUSTER/node${node}"
+echo " -------------------------------------------------------------"
+
+}
+
+while read line <&3; do
+    #echo "Line: $line"
+    node=`echo ${line} | cut -d "|" -f 1`   
+    dsn=`echo ${line} | cut -d "|" -f 2`
+
+   #echo "node[$node]  dsn[$dsn]"
+    conffile="${MKDESTINATION}/${SLONYCLUSTER}/conf/node${node}.conf"
+    echo "Generating slon conf file $conffile"
+
+    if [[ -e $conffile ]] ; then
+	echo "config file $conffile already exists."
+	echo "Do you want to (Overwrite) it or (Skip) it (Anything else aborts) [Overwrite|Skip]?"
+	read nl
+	case nl in
+	    (Overwrite)
+	    echo "overwriting..."
+	    create_conf_file $node $dsn
+	    (Skip)
+	    echo "skipping..."
+	    (*)
+	    echo "invalid input - aborting..."
+	    exit -1
+	esac
+    else
+	echo "creating conf file for new node $node with DSN [$dsn]"
+	create_conf_file $node $dsn
+    fi
+done
+rm $queryoutput
+
+cat <<EOF 
+---------------------------------------------------------------------
+Be sure to review .conf files created in
+${MKDESTINATION}/${SLONYCLUSTER}/conf to ensure they are reasonable
+before starting up slons against them.  Customize as needed.
+
+Notably, if you have nodes which are reached via different DSNs from
+different locations, then the conn_info value may not be correct.
+
+In addition, this script will happily create .conf files for nodes
+which it found missing.  If you wanted nodes to be controlled from
+some other host, this could draw them in to the local host, which
+would not be what you wanted.  In most cases, this would only cause
+minor inconvenience; if you are running log shipping against a
+particular remote subscriber, this could cause you some real
+heartburn...
+---------------------------------------------------------------------
+EOF
Index: adminscripts.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/adminscripts.sgml,v
retrieving revision 1.30
retrieving revision 1.31
diff -Ldoc/adminguide/adminscripts.sgml -Ldoc/adminguide/adminscripts.sgml -u -w -r1.30 -r1.31
--- doc/adminguide/adminscripts.sgml
+++ doc/adminguide/adminscripts.sgml
@@ -236,6 +236,144 @@
 from one version of &slony1; to another.</para>
 </sect2>
 
+<sect2><title>mkslonconf.sh</title>
+
+<para> This is a shell script designed to rummage through a &slony1;
+cluster and generate a set of <filename>slon.conf</filename> files
+that &lslon; accesses via the <command> slon -f slon.conf </command>
+option. </para>
+
+<para> With all of the configuration residing in a configuration file
+for each &lslon;, they can be invoked with minimal muss and fuss, with
+no risk of forgetting the <command>-a</command> option and thereby
+breaking a <link linkend="logshipping"> log shipping </link>
+node. </para>
+
+<para> Running it requires the following environment configuration: </para>
+
+<itemizedlist>
+
+<listitem><para> Firstly, the environment needs to be set up with
+suitable parameters for libpq to connect to one of the databases in
+the cluster.  Thus, you need some suitable combination of the
+following environment variables set:</para>
+
+<itemizedlist>
+<listitem><para><envar>PGPORT</envar></para></listitem>
+<listitem><para><envar>PGDATABASE</envar></para></listitem>
+<listitem><para><envar>PGHOST</envar></para></listitem>
+<listitem><para><envar>PGUSER</envar></para></listitem>
+<listitem><para><envar>PGSERVICE</envar></para></listitem>
+</itemizedlist>
+
+</listitem>
+
+<listitem><para> <envar>SLONYCLUSTER</envar> - the name of the
+&slony1; cluster to be <quote>rummaged</quote>.  </para></listitem>
+
+<listitem><para> <envar>MKDESTINATION</envar> - a directory for
+configuration to reside in; the script will create
+<filename>MKDESTINATION/$SLONYCLUSTER/conf</filename> for the &lslon;
+configuration files, and
+<filename>MKDESTINATION/$SLONYCLUSTER/pid</filename> for &lslon; to
+store PID files in. </para></listitem>
+
+<listitem><para> <envar>LOGHOME</envar> - a directory for log files to
+reside in; a directory of the form
+<command>$LOGHOME/$SLONYCLUSTER/node[number]</command> will be created
+for each node. </para></listitem>
+
+</itemizedlist>
+
+<para> For any <quote>new</quote> nodes that it discovers, this script
+will create a new &lslon; conf file. </para>
+
+<warning><para> It is fair to say that there are several conditions to
+beware of; none of these should be greatly surprising...</para>
+
+<itemizedlist>
+
+<listitem><para> The DSN is pulled from the minimum value found for
+each node in <envar>sl_path</envar>.  You may very well need to modify
+this.</para></listitem>
+
+<listitem><para> Various parameters are set to default values; you may
+wish to customize them by hand. </para></listitem>
+
+<listitem><para> If you are running &lslon; processes on multiple
+nodes (<emphasis>e.g.</emphasis> - as when running &slony1; across a
+WAN), this script will happily create fresh new config files for
+&lslon;s you wanted to have run on another host.  </para>
+
+<para> Be sure to check out what nodes it set up before restarting
+&lslon;s.  </para>
+
+<para> This would usually only cause some minor inconvenience due to,
+for instance, a &slon; running at a non-preferred site, and either
+failing due to lack of network connectivity (in which no damage is
+done!) or running a bit less efficiently than it might have due to
+living at the wrong end of the network <quote>pipe.</quote> </para>
+
+<para> On the other hand, if you are running a log shipping node at
+the remote site, accidentally introducing a &lslon; that
+<emphasis>isn't</emphasis> collecting logs could ruin your whole
+week. </para>
+</listitem>
+</itemizedlist>
+
+</warning>
+
+<para> The file layout set up by <filename>mkslonconf.sh</filename>
+was specifically set up to allow managing &lslon;s across a
+multiplicity of clusters using the script in the following
+section... </para>
+
+</sect2>
+
+<sect2><title> launch_clusters.sh </title>
+
+<para> This is another shell script which uses the configuration as
+set up by <filename>mkslonconf.sh</filename> and is intended to be run
+regularly to ensure that &lslon; processes are running.</para>
+
+<para> It uses the following environment variables:
+
+<itemizedlist>
+
+<listitem><para><envar>PATH</envar> which needs to contain, preferably
+at the beginning, a path to the &lslon; binaries that should be
+run.</para></listitem>
+
+<listitem><para><envar>SLHOME</envar> indicates the
+<quote>home</quote> directory for &lslon; configuration files; they
+are expected to be arranged in subdirectories, one for each cluster,
+with filenames of the form <filename>node1.conf</filename>,
+<filename>node2.conf</filename>, and such </para>
+
+<para> The script uses the command <command>find $SLHOME/$cluster/conf
+-name "node[0-9]*.conf"</command> to find &lslon; configuration files.</para>
+
+<para> If you remove some of these files, or rename them so their
+names do not conform to the <command>find</command> command, they
+won't be found; that is an easy way to drop nodes out of this system.
+</listitem>
+
+<listitem><para><envar>LOGHOME </envar> indicates the
+<quote>home</quote> directory for log storage.</para>
+
+<para> This script does not assume the use of the Apache log rotator
+to manage logs; in that &postgres; 8 does its own log rotation, it
+seems undesirable to keep a dependancy on specific log rotation
+<quote>technology.</quote> </listitem>
+
+<listitem><para><envar>CLUSTERS</envar> is a list of &slony1; clusters
+under management. </para></listitem>
+
+</itemizedlist>
+
+<para> In effect, you could run this every five minutes, and it would
+launch any missing &lslon; processes. </para>
+
 </sect1>
 <!-- Keep this comment at the end of the file
 Local variables:



More information about the Slony1-commit mailing list