From cvsuser Tue Mar 1 20:22:45 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: Added in UPDATE FUNCTION;
for some reason that hadn't made
Message-ID: <20050301202239.2C8FAB1D388@gborg.postgresql.org>
Log Message:
-----------
Added in UPDATE FUNCTION; for some reason that hadn't made it into the
SGML documentation.
Modified Files:
--------------
slony1-engine/doc/adminguide:
slonik_ref.sgml (r1.16 -> r1.17)
-------------- next part --------------
Index: slonik_ref.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonik_ref.sgml,v
retrieving revision 1.16
retrieving revision 1.17
diff -Ldoc/adminguide/slonik_ref.sgml -Ldoc/adminguide/slonik_ref.sgml -u -w -r1.16 -r1.17
--- doc/adminguide/slonik_ref.sgml
+++ doc/adminguide/slonik_ref.sgml
@@ -1904,6 +1904,46 @@
+ UPDATE FUNCTIONS
+
+ UPDATE FUNCTIONS
+
+ Reload stored functions
+
+
+ UPDATE FUNCTIONS (options);
+
+
+
+ Description
+
+ Reloads stored functions for a node.
+
+ Reloads all stored procedure and function definitions in the
+ &slony1; schema for the specified node. This command is usually
+ part of the &slony1; software upgrade procedure.
+
+
+
+ ID = ival
+
+ The node to refresh.
+
+
+
+
+
+ Example
+
+UPDATE FUNCTIONS (
+ ID = 3 # Update functions on node 3
+);
+
+
+
+
+
+
WAIT FOR EVENTWAIT FOR EVENT
From cvsuser Thu Mar 3 23:44:52 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: Portability changes: 1.
Message-ID: <20050303234446.ADB04B1CE87@gborg.postgresql.org>
Log Message:
-----------
Portability changes:
1. Use the appropriate ps command and options
Script is aware of Linux, FreeBSD, Solaris, and AIX
2. Fixed problem with line endings (that I probably introduced)
Modified Files:
--------------
slony1-engine/tools:
check_slon.sh (r1.1 -> r1.2)
-------------- next part --------------
Index: check_slon.sh
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/check_slon.sh,v
retrieving revision 1.1
retrieving revision 1.2
diff -Ltools/check_slon.sh -Ltools/check_slon.sh -u -w -r1.1 -r1.2
--- tools/check_slon.sh
+++ tools/check_slon.sh
@@ -22,7 +22,7 @@
# check parameters are valid
if [[ $# -lt 2 && $# -gt 3 ]]
then
- echo "Invalid parameters need CLUSTERNAME DBNAME [LOGFILE]"
+ echo "Invalid parameters need CLUSTERNAME DBNAME DBHOST [LOGFILE]"
exit 2
fi
@@ -32,8 +32,15 @@
LOGFILE=$3
# check to see whether the slon daemon is running
-SLONPROCESS=`ps -auxww | egrep "[s]lon $CLUSTERNAME" | egrep
-"dbname=$DBNAME" | awk '{print $2}'`
+case `uname` in
+Linux) PSCOMMAND="ps auxww" ;;
+SunOS) PSCOMMAND="/usr/ucb/ps -auxww" ;;
+FreeBSD) PSCOMMAND="/bin/ps -auxww" ;;
+AIX) PSCOMMAND="/usr/bin/ps auxww" ;;
+*) PSCOMMAND="ps auxww"
+esac
+
+SLONPROCESS=`$PSCOMMAND | egrep "[s]lon $CLUSTERNAME" | egrep "dbname=$DBNAME" | awk '{print $2}'`
if [ ! -n "$SLONPROCESS" ]
then
From cvsuser Thu Mar 3 23:46:45 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: - Added in ability to query a remote
host - Mention the
Message-ID: <20050303234636.DCF58B1CE87@gborg.postgresql.org>
Log Message:
-----------
- Added in ability to query a remote host
- Mention the need to set PGPORT
- Needed to properly quote the cluster namespace
- psql often doesn't live in /usr/local/pgsql/bin; assume it to be in PATH
Modified Files:
--------------
slony1-engine/tools:
check_slony_cluster.sh (r1.1 -> r1.2)
-------------- next part --------------
Index: check_slony_cluster.sh
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/check_slony_cluster.sh,v
retrieving revision 1.1
retrieving revision 1.2
diff -Ltools/check_slony_cluster.sh -Ltools/check_slony_cluster.sh -u -w -r1.1 -r1.2
--- tools/check_slony_cluster.sh
+++ tools/check_slony_cluster.sh
@@ -11,21 +11,25 @@
# script requires two parameters:
# CLUSTERNAME - name of slon cluster to be checked
# DBNAME - name of master database
+# DBHOST - host name of master database
+#
+# It also depends on PGPORT being set to the appropriate port
#
# Author: John Sidney-Woollett
# Created: 26-Feb-2005
# Copyright 2005
# check parameters are valid
-if [[ $# -ne 2 ]]
+if [[ $# -ne 3 ]]
then
- echo "Invalid parameters need CLUSTERNAME DBNAME"
+ echo "Invalid parameters need CLUSTERNAME DBNAME DBHOST"
exit 2
fi
# assign parameters
CLUSTERNAME=$1
DBNAME=$2
+DBHOST=$3
# setup the query to check the replication status
SQL="select case
@@ -35,16 +39,16 @@
from (
-- determine total active receivers
select (select count(distinct sub_receiver)
- from _$CLUSTERNAME.sl_subscribe
+ from \"_$CLUSTERNAME\".sl_subscribe
where sub_active = true) as ttlcount,
(
-- determine active nodes syncing within 10 seconds
select count(*) from (
select st_received, st_last_received_ts - st_last_event_ts as cfmdelay
- from _$CLUSTERNAME.sl_status
+ from \"_$CLUSTERNAME\".sl_status
where st_received in (
select distinct sub_receiver
- from _$CLUSTERNAME.sl_subscribe
+ from \"_$CLUSTERNAME\".sl_subscribe
where sub_active = true
)
) as t1
@@ -52,8 +56,7 @@
) as t2"
# query the master database
-CHECK=`/usr/local/pgsql/bin/psql -c "$SQL" --tuples-only -U postgres
-$DBNAME`
+CHECK=`psql -c "$SQL" --tuples-only -U postgres -h $DBHOST $DBNAME`
if [ ! -n "$CHECK" ]
then
From cvsuser Mon Mar 7 14:54:27 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: There's a minor typo in
src/slon/confoptions.h that
Message-ID: <20050307145425.C1458B1D2F7@gborg.postgresql.org>
Log Message:
-----------
There's a minor typo in src/slon/confoptions.h that confuses
Syslog_ident and Syslog_facility - this patch should correct it.
-- Korry
korryd@hotmail.com
Modified Files:
--------------
slony1-engine/src/slon:
confoptions.h (r1.15 -> r1.16)
-------------- next part --------------
Index: confoptions.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/confoptions.h,v
retrieving revision 1.15
retrieving revision 1.16
diff -Lsrc/slon/confoptions.h -Lsrc/slon/confoptions.h -u -w -r1.15 -r1.16
--- src/slon/confoptions.h
+++ src/slon/confoptions.h
@@ -292,7 +292,7 @@
"LOCAL4, LOCAL5, LOCAL6, LOCAL7."),
SLON_C_STRING
},
- &Syslog_ident,
+ &Syslog_facility,
"LOCAL0"
},
{
From cvsuser Mon Mar 7 19:31:08 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: Added scanner support for include and
define Flex scanner
Message-ID: <20050307193105.882DCB1D30F@gborg.postgresql.org>
Log Message:
-----------
Added scanner support for include and define
Flex scanner code as per Korry
cbbrowne added a ducttape script to test this facility as well as
documentation. Due to the addition of this facility, the documentation
describing the use of M4 for similar purpose has been dropped.
Modified Files:
--------------
slony1-engine/doc/adminguide:
failover.sgml (r1.12 -> r1.13)
help.sgml (r1.12 -> r1.13)
slonik_ref.sgml (r1.17 -> r1.18)
usingslonik.sgml (r1.7 -> r1.8)
slony1-engine/src/slonik:
scan.l (r1.22 -> r1.23)
Added Files:
-----------
slony1-engine/src/ducttape:
test_7_defines (r1.1)
-------------- next part --------------
Index: slonik_ref.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonik_ref.sgml,v
retrieving revision 1.17
retrieving revision 1.18
diff -Ldoc/adminguide/slonik_ref.sgml -Ldoc/adminguide/slonik_ref.sgml -u -w -r1.17 -r1.18
--- doc/adminguide/slonik_ref.sgml
+++ doc/adminguide/slonik_ref.sgml
@@ -83,17 +83,120 @@
Those commands are grouped together into one transaction
per participating node.
-
+
+
+
+ Slonik Meta Commands
+
+
+ The following commands may be used to somewhat abstract the
+ definitions of components of Slonik scripts; grouping configuration into central files
+ that may be reused, and allowing
+ mnemonic identifiers to replace cryptic numeric object IDs.
+
+
+
+ INCLUDE
+
+ INCLUDE
+
+ pulling in slonik code from another file
+
+
+
+ include
+ <pathname>
+
+
+
+ Description
+
+ This draws the specified slonik script inline into the present
+ script. If the specifies a relative
+ path, will search relative to the
+ current working directory.
+
+
+
+ Nested include files are supported. The scanner and parser
+ report the proper file names and line numbers when they run into
+ an error.
+
+ Example
+
+ include </tmp/preamble.slonik>;
+
+
+
+
+ DEFINE
+
+ DEFINE
+
+ Defining a named symbol
+
+
+
+ define
+ name
+ value
+
+
+
+ Description
+
+ This defines a named symbol. Symbol names must follow the
+ slonik rules for constructing identifiers, by starting with a
+ letter, followed by letters, numbers, and underscores.
+
+
+
+ Symbol values may contain spaces and may recursively contain
+ symbol references.
+
+
+
+ Symbols are referenced by using a @ followed by
+ the symbol name. Note that symbol referencing is suppressed
+ inside string literals.
+
+
+ Example
+
+define cluster movies;
+define sakai 1;
+define chen 2;
+define fqn fully qualified name;
+
+cluster name = @cluster;
+node @sakai admin conninfo = 'service=sakai-replication';
+node @chen admin conninfo = 'service=chen-replication';
+define setMovies id = 1;
+define sakaiMovies @setMovies, origin = @sakai;
+
+create set ( @sakaiMovies, comment = 'movies' );
+
+set add table( set @sakaiMovies, id = 1, @fqn = 'public.customers', comment = 'sakai customers' );
+set add table( set @sakaiMovies, id = 2, @fqn = 'public.tapes', comment = 'sakai tapes' );
+echo 'But @sakaiMovies will display as a string, and is not expanded';
+
+
+
+
+
+
+
Slonik Preamble Commands
- The following commands must appear as a
- preamble at the very top of every
- slonik command script. They do not
- cause any direct action on any of the nodes in the replication
- system, but affect the execution of the entire script.
+ The following commands must appear as a preamble at
+ the beginning of each slonik command
+ script. They do not cause any direct action on any of the nodes in
+ the replication system, but affect the execution of the entire
+ script.
Index: help.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/help.sgml,v
retrieving revision 1.12
retrieving revision 1.13
diff -Ldoc/adminguide/help.sgml -Ldoc/adminguide/help.sgml -u -w -r1.12 -r1.13
--- doc/adminguide/help.sgml
+++ doc/adminguide/help.sgml
@@ -38,7 +38,22 @@
If your Russian is much better than your English,
then
KirovOpenSourceCommunity: Slony may be the place to
-go.
+go.
+
+ pgpool
+
+pgpool is a connection pool server
+for &postgres;; it allows an application to connect to it as if it
+were a standard &postgres; server. It caches connections, which
+reduces the overhead involved in establishing them. It supports a
+scheduled switchover feature, which would allow
+dynamically switching over from one server to another. That would be
+very useful when doing a , as it would
+allow applications to be switched to point to the new origin without
+needing to update their configuration.
+
+
Other Information Sources
Index: usingslonik.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/usingslonik.sgml,v
retrieving revision 1.7
retrieving revision 1.8
diff -Ldoc/adminguide/usingslonik.sgml -Ldoc/adminguide/usingslonik.sgml -u -w -r1.7 -r1.8
--- doc/adminguide/usingslonik.sgml
+++ doc/adminguide/usingslonik.sgml
@@ -32,8 +32,9 @@
address.
Users seem interested in wrapping everything possible
-in TRY blocks, which is regrettably
-less useful than might be imagined...
+in TRY blocks, which is regrettably somewhat
+less useful than might be
+hoped...
@@ -43,11 +44,9 @@
Named nodes, named sets
- Unfortunately, the use of naming nearly turns into a need for
-an ESP protocol, as slonik
-would need to start by determining the (possibly
-in flux) set of mappings between node names and node
-IDs.
+ This is supported by the (new in 1.1) and statements.
+ Looping and control constructs
@@ -64,10 +63,6 @@
- Some sort of text rewriting system such as M4 may be
-used to map mnemonic object names onto the perhaps-less-intuitive
-numeric arrangement.
-
Embedding generation of slonik inside shell
scripts
@@ -83,111 +78,6 @@
- Using m4 to rewrite slonik scripts
-
- This needs to be prefaced with something of a warning, from the
-GNU M4 documentation:
-
- Some people found `m4' to be fairly addictive. They
-first use `m4' for simple problems, then take bigger and bigger
-challenges, learning how to write complex `m4' sets of macros along
-the way. Once really addicted, users pursue writing of sophisticated
-`m4' applications even to solve simple problems, devoting more time
-debugging their `m4' scripts than doing real work. Beware that `m4'
-may be dangerous for the health of compulsive
-programmers.
-
- This being said, m4 has three
-significant merits over other text rewriting systems (such as
-cpp, the C preprocessor):
-
-
-
- Like slonik, m4 uses # to indicate
-comments, with the result that it may be quietly used to do rewrites
-on slonik scripts.
-
- Using cpp would require changing
-over to use C or C++ style comments.
-
- m4 is reasonably
-ubiquitous, being available in environments like
-Solaris and AIX
-even when they do not have compiler tools for C available. Its
-presence is commonly mandated by the presence of
-Sendmail.
-
-
- A potential merit over
-cpp is that m4
-can do more than just rewrite symbols. It has control structures, can
-store data in variables, and can loop.
-
- Of course, down that road lies the addictions warned of above,
-as well as the complexity challenges of
-sendmail.cf. As soon as you discover you need
-things like loops and variables, it is quite likely that you want to
-write a slonik generator in your favorite scripting language, whether
-that be Bourne Shell, Perl, or Tcl. Fans of more esoteric languages like
-Icon, Snobol, or Scheme will have to fight their own battles to get
-those deemed to be reasonable choices for best
-practices.
-
-
- An m4 example
-
- Without further ado, here is an example where you set up a
-central file, cluster.m4 containing some M4
-rewrite rules:
-
-define(`node_srvrds005', `1')
-define(`node_srvrds004', `4')
-define(`node_srvrds003', `3')
-define(`node_srvrds007', `78')
-define(`ds501', `501')
-
-
- In view of those node name definitions, you may write a Slonik
-script to initialize the cluster as follows, setup_cluster.slonik:
-
-
-node node_srvrds005 admin conninfo 'dsn=foo';
-node node_srvrds004 admin conninfo 'dsn=bar';
-node node_srvrds003 admin conninfo 'dsn=foo';
-node node_srvrds007 admin conninfo 'dsn=foo';
-node ds501 admin conninfo 'dsn=foo';
-
-create cluster info (id=node_srvrds005);
-store node (id=node_srvrds004, comment='Node on ds004', spool='f');
-store node (id=node_srvrds003, comment='Node on ds003', spool='f');
-store node (id=node_srvrds007, comment='Node on ds007', spool='t');
-store node (id=ds501, comment='Node on ds-501', spool='f');
-
-
- You then run the rewrite rules on the script, thus:
-
- % m4 cluster.m4 setup_cluster.slonik
- And receive the following output:
-
-node 1 admin conninfo 'dsn=foo';
-node 4 admin conninfo 'dsn=bar';
-node 3 admin conninfo 'dsn=foo';
-node 78 admin conninfo 'dsn=foo';
-node 501 admin conninfo 'dsn=foo';
-
-create cluster info (id=1);
-store node (id=4, comment='Node on ds004', spool='f');
-store node (id=3, comment='Node on ds003', spool='f');
-store node (id=78, comment='Node on ds007', spool='t');
-store node (id=501, comment='Node on ds-501', spool='f');
-
-
- This makes no attempt to do anything smarter,
-such as to try to create the nodes via a loop that maps across a list
-of nodes. As mentioned earlier, if you wish to do such things, it is
-highly preferable to do this by using a scripting language like the
-Bourne Shell or Perl.
-
Embedding Slonik in Shell Scripts As mentioned earlier, there are numerous &slony1; test scripts
@@ -251,9 +141,13 @@
The PREAMBLE value could then be reused over and
over again if the shell script invokes slonik
-multiple times.
+multiple times. You might also consider using and place the preamble in a file that is
+included.
+
- It also becomes simple to assign names to sets and nodes:
+ Shell variables provide a simple way to assign names to sets
+and nodes:
origin=1
@@ -331,11 +225,18 @@
scripting language supports things like:
-Record data structures that allow assigning things in parallel
- Functions, procedures, or subroutines, allowing you to implement
-useful functionality once, and then refer to it multiple times within a script
- Some sort of module import system so that common functionality
-can be shared across many scripts
+
+Record data structures that allow
+assigning things in parallel
+
+ Functions, procedures, or subroutines, allowing you
+to implement useful functionality once, and then refer to it multiple
+times within a script
+
+ Some sort of module import system so
+that common functionality can be shared across many
+scripts
+
If you can depend on having Korn shell available, well,
those are all shells with extensions supporting reasonably
sophisticated data structures and module systems. On Linux, Bash is
-fairly ubiquitous; on commercial UNIX, Korn shell is
-fairly ubiquitous; on BSD, sophisticated shells are an
-option rather than a default.
+fairly ubiquitous; on commercial UNIX, Korn
+shell is fairly ubiquitous; on BSD, sophisticated
+shells are an option rather than a default. At that point, it makes sense to start looking at other
scripting languages, of which Perl is the most ubiquitous, being
Index: failover.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/failover.sgml,v
retrieving revision 1.12
retrieving revision 1.13
diff -Ldoc/adminguide/failover.sgml -Ldoc/adminguide/failover.sgml -u -w -r1.12 -r1.13
--- doc/adminguide/failover.sgml
+++ doc/adminguide/failover.sgml
@@ -60,8 +60,9 @@
switched roles completely. After reconfiguring the web application (or
-pgpool) to connect to the database on node2, the web
-server is restarted and resumes normal operation.
+ pgpool ) to
+connect to the database on node2, the web server is restarted and
+resumes normal operation.
Done in one shell script, that does the application shutdown,
slonik, move config files and startup all
--- /dev/null
+++ src/ducttape/test_7_defines
@@ -0,0 +1,308 @@
+#!/bin/sh
+
+# **********
+# test_7_defines
+#
+# This test script creates a standalone pgbench database
+# as slony_test1 and then:
+#
+# - initializes a primary node and starts the node daemon
+# - creates a set containing all 4 pgbench tables
+# - creates a second database as slony_test2
+# - adds database slony_test2 to the system
+# - starts the second replication daemon
+# - creates the pgbench tables (schema only)
+# - subscribes the replication set from the primary node
+#
+# The nature of the test has to do with the use of the new slonik
+# "define" and "include" commands.
+# **********
+
+export PATH
+TMPOUT=/tmp/output.$$
+DB1=slony_test1
+DB2=slony_test2
+
+PGBENCH_SCALE=1
+PGBENCH_CLIENTS=5
+PGBENCH_TRANS=`expr 30000 / $PGBENCH_CLIENTS`
+
+trap '
+ echo ""
+ echo "**** user abort"
+ if [ ! -z $pgbench_pid ] ; then
+ echo "**** killing pgbench"
+ kill -15 $pgbench_pid
+ fi
+ if [ ! -z $slon1_pid ] ; then
+ echo "**** killing node daemon 1"
+ kill -15 $slon1_pid
+ fi
+ if [ ! -z $slon2_pid ] ; then
+ echo "**** killing node daemon 2"
+ kill -15 $slon2_pid
+ fi
+ exit 1
+' 2 15
+
+######################################################################
+# Preparations ... create a standalone pgbench database and
+# have the "application" (pgbench) running.
+######################################################################
+
+#####
+# Make sure the install is up to date
+#####
+WGM=`which gmake`
+if [ -z $WGM ] ; then
+ MAKE=make
+ CGNU=`make -v | grep GNU`
+ if [ -z $CGNU ] ; then
+ echo "GNU Make not found - please install GNU Make"
+ exit 1
+ fi
+else
+ MAKE=gmake
+fi
+echo -n "**** running 'make install' in src directory ... "
+if ! ${MAKE} -C .. install >$TMPOUT 2>&1 ; then
+ echo "failed"; cat $TMPOUT; rm $TMPOUT; exit 1
+fi
+echo "done"
+rm $TMPOUT
+
+PREAMBLE_FILE=/tmp/preamble.$$
+cat < $PREAMBLE_FILE
+define origin 11;
+define sub1 22;
+cluster name = T1;
+node @origin admin conninfo='dbname=$DB1';
+node @sub1 admin conninfo='dbname=$DB2';
+EOF
+
+
+
+#####
+# Remove old databases, if they exist
+#####
+echo "**** remove old test databases"
+dropdb $DB1 || echo "**** ignored"
+sleep 1
+dropdb $DB2 || echo "**** ignored"
+sleep 1
+
+#####
+# Create the "Primary Node"
+#####
+echo "**** creating database for Node 11"
+
+createdb $DB1 || exit 1
+pgbench -i -s $PGBENCH_SCALE $DB1
+pg_dump -s $DB1 >pgbench_schema.sql
+
+#####
+# Start pgbench in the background and give it rampup time
+#####
+pgbench -n -s $PGBENCH_SCALE -c $PGBENCH_CLIENTS -t $PGBENCH_TRANS $DB1 &
+pgbench_pid=$!
+echo "**** pgbench is running in background with pid $pgbench_pid"
+echo -n "**** sleeping 10 seconds to give pgbench time for rampup ... "
+sleep 10
+echo "done"
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now a standalone database with a running pgbench"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB1 as the primary cluster T1 node, start the node daemon,
+# and create a replication set containing the pgbench tables.
+######################################################################
+
+echo "**** initializing $DB1 as Primary Node for Slony-I cluster T1"
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ init cluster (id = @origin, comment = 'Node @origin');
+ echo 'Database $DB1 initialized as Node 11';
+_EOF_
+if [ $? -ne 0 ] ; then
+ kill $pgbench_pid;
+ exit 1
+fi
+
+echo "**** starting the Slony-I node deamon for $DB1"
+xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+slon1_pid=$!
+echo "slon[$slon1_pid] on dbname=$DB1"
+
+echo "**** creating a replication set containing the 4 pgbench tables ... "
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ try {
+ table add key (node id = @origin, fully qualified name = 'public.history');
+ }
+ on error {
+ exit 1;
+ }
+
+ try {
+ create set (id = 1, origin = @origin, comment = 'Set 1 - pgbench tables');
+ set add table (set id = 1, origin = @origin,
+ id = 1, fully qualified name = 'public.accounts',
+ comment = 'Table accounts');
+ set add table (set id = 1, origin = @origin,
+ id = 2, fully qualified name = 'public.branches',
+ comment = 'Table branches');
+ set add table (set id = 1, origin = @origin,
+ id = 3, fully qualified name = 'public.tellers',
+ comment = 'Table tellers');
+ set add table (set id = 1, origin = @origin,
+ id = 4, fully qualified name = 'public.history',
+ key = serial, comment = 'Table accounts');
+ }
+ on error {
+ exit 1;
+ }
+_EOF_
+
+if [ $? -ne 0 ] ; then
+ echo "failed"
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ cat $TMPOUT
+ rm $TMPOUT
+ exit 1
+fi
+echo "**** set created"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+ echo "**** pgbench terminated ???"
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now the Slony-I origin for set 1"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB2 as a subscriber node and let it subscribe the replication
+# set of the running pgbench
+######################################################################
+echo "**** creating database for node 22"
+if ! createdb $DB2 ; then
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo "**** initializing $DB2 as node 22 of Slony-I cluster T1"
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ echo 'Creating node 22';
+ try {
+ store node (id = @sub1, comment = 'node @sub1', event node = @origin);
+ } on error {
+ echo 'could not establish node @sub1';
+ exit -1;
+ }
+ try {
+ store path (server = @origin, client = @sub1, conninfo = 'dbname=$DB1');
+ store path (server = @sub1, client = @origin, conninfo = 'dbname=$DB2');
+ }
+ on error {
+ echo 'could not establish paths between @origin and @sub1';
+ exit -1;
+ }
+ echo 'Database $DB2 added as node @sub1';
+_EOF_
+if [ $? -ne 0 ] ; then
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo "**** starting the Slony-I node deamon for $DB1"
+xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+slon2_pid=$!
+echo "slon[$slon2_pid] on dbname=$DB2"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+ echo "**** pgbench terminated ???"
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+######################################################################
+# And now comes the moment where the big elephant starts to pee
+# and the attendants in the first row climb on their chairs ...
+######################################################################
+echo "**** creating pgbench tables and subscribing node 22 to set 1"
+(
+ cat pgbench_schema.sql
+) | psql -q $DB2
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ subscribe set ( id = 1, provider = @origin, receiver = @sub1, forward = yes );
+_EOF_
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB2 should now be copying data and attempting to catch up."
+echo "**********************************************************************"
+echo ""
+
+echo -n "**** waiting for pgbench to finish "
+while kill -0 $pgbench_pid 2>/dev/null ; do
+ echo -n "."
+ sleep 10
+done
+echo "**** pgbench finished"
+echo "**** please terminate the replication engines when caught up."
+wait $slon1_pid
+wait $slon2_pid
+
+kill $pgbench_pid 2>/dev/null
+kill $slon1_pid 2>/dev/null
+kill $slon2_pid 2>/dev/null
+
+echo -n "**** comparing databases ... "
+psql $DB1 >dump.tmp.1.$$ <<_EOF_
+ select 'accounts:'::text, aid, bid, abalance, filler
+ from accounts order by aid;
+ select 'branches:'::text, bid, bbalance, filler
+ from branches order by bid;
+ select 'tellers:'::text, tid, bid, tbalance, filler
+ from tellers order by tid;
+ select 'history:'::text, tid, bid, aid, delta, mtime, filler,
+ "_Slony-I_T1_rowID" from history order by "_Slony-I_T1_rowID";
+_EOF_
+psql $DB2 >dump.tmp.2.$$ <<_EOF_
+ select 'accounts:'::text, aid, bid, abalance, filler
+ from accounts order by aid;
+ select 'branches:'::text, bid, bbalance, filler
+ from branches order by bid;
+ select 'tellers:'::text, tid, bid, tbalance, filler
+ from tellers order by tid;
+ select 'history:'::text, tid, bid, aid, delta, mtime, filler,
+ "_Slony-I_T1_rowID" from history order by "_Slony-I_T1_rowID";
+_EOF_
+
+if diff dump.tmp.1.$$ dump.tmp.2.$$ >test_1.diff ; then
+ echo "success - databases are equal."
+ rm dump.tmp.?.$$
+ rm test_1.diff
+else
+ echo "FAILED - see test_1.diff for database differences"
+fi
+rm $PREAMBLE_FILE
Index: scan.l
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slonik/scan.l,v
retrieving revision 1.22
retrieving revision 1.23
diff -Lsrc/slonik/scan.l -Lsrc/slonik/scan.l -u -w -r1.22 -r1.23
--- src/slonik/scan.l
+++ src/slonik/scan.l
@@ -10,6 +10,30 @@
* $Id$
*-------------------------------------------------------------------------
*/
+struct __yy_buffer
+{
+ YY_BUFFER_STATE buffer; /* lexer buffer to restore on pop */
+ long lineno; /* line number to restore on pop */
+ char * fileName; /* file name to restore on pop */
+ struct __yy_buffer * prev; /* pointer to previous stack frame */
+} * yy_buffer = NULL;
+
+typedef struct _symbol
+{
+ char * name; /* Name of symbol with % prepended */
+ char * value; /* Value of symbol */
+ struct _symbol * next; /* Pointer to next symbol */
+} symbol;
+
+static symbol * symbols; /* Head of list of symbols */
+
+static char * getSymbol( const char * name ); /* Return a symbol's value (or NULL)*/
+static void addSymbol( char * str ); /* Add a new symbol */
+static void freeSymbols( void ); /* Free all symbols */
+static void pushBuffer( char * context ); /* Push a new lexer buffer on stack */
+static void popBuffer( void ); /* Pop previous lexer buffer */
+
+extern char * current_file;
%}
%option 8bit
@@ -18,12 +42,14 @@
%option yylineno
%option case-insensitive
+%x incl define
%x IN_STRING
digit [0-9]
ident_start [A-Za-z\200-\377_]
ident_cont [A-Za-z\200-\377_0-9\$]
space [ \t\n\r\f]
+quote '
/*
quoted_ident (\"[^\"]*\")+
@@ -37,6 +63,9 @@
BEGIN(INITIAL);
+include{space}* { BEGIN(incl); }
+define{space}* { BEGIN(define); }
+
add { return K_ADD; }
admin { return K_ADMIN; }
all { return K_ALL; }
@@ -126,6 +155,44 @@
}
[^'\\]+ {}
+{identifier}{space}+.*";" { addSymbol( yytext ); BEGIN(INITIAL); }
+
+@{identifier} {
+ char * value = getSymbol( yytext );
+
+ if( value )
+ {
+ pushBuffer( strdup( current_file ));
+ yy_scan_string( value );
+ }
+ }
+
+\<[^\>]+\>{space}*";"? {
+
+ char * fileName = strdup( yytext + 1 ); /* Skip '<' */
+
+ *strchr( fileName, '>' ) = '\0'; /* Trim '>' */
+
+ pushBuffer( fileName );
+
+ if(( yyin = fopen( fileName, "r" )) == NULL )
+ {
+ fprintf( stderr, "Include file (%s) not found\n", fileName );
+ exit( 1 );
+ }
+
+ yy_switch_to_buffer( yy_create_buffer( yyin, YY_BUF_SIZE ));
+
+ BEGIN(INITIAL);
+ }
+
+<> {
+ if( yy_buffer == NULL )
+ yyterminate();
+ else
+ popBuffer();
+ }
+
#[^\r\n]* ;
. { return yytext[0]; }
@@ -151,8 +218,108 @@
yy_switch_to_buffer(yy_scan_string(dstring_data(&ds)));
dstring_free(&ds);
+
+ freeSymbols();
}
+static void pushBuffer( char * context )
+{
+ struct __yy_buffer * yb = malloc( sizeof( *yb ));
+
+ yb->buffer = YY_CURRENT_BUFFER;
+ yb->lineno = yylineno;
+ yb->fileName = strdup( current_file );
+ yb->prev = yy_buffer;
+
+ yy_buffer = yb;
+
+ current_file = context;
+ yylineno = 1;
+}
+
+static void popBuffer( void )
+{
+ struct __yy_buffer * yb = yy_buffer;
+
+ if( yyin != NULL )
+ fclose( yyin );
+
+ yy_delete_buffer( YY_CURRENT_BUFFER );
+ yy_switch_to_buffer( yy_buffer->buffer );
+
+ free( current_file );
+
+ current_file = yy_buffer->fileName;
+ yylineno = yy_buffer->lineno;
+
+ yy_buffer = yy_buffer->prev;
+
+ free( yb );
+}
+
+static void addSymbol( char * str )
+{
+ char * name = str;
+ char * value = str;
+ symbol * sym = NULL;
+
+ while( *value != ' ' && *value != '\t' )
+ value++;
+
+ *(value++) = '\0';
+
+ while( *value == ' ' || *value == '\t' )
+ value++;
+
+ value[strlen(value) - 1 ] = '\0';
+
+ sym = malloc( sizeof( *sym ));
+ sym->value = strdup( value );
+ sym->next = NULL;
+
+ /* Store the symbol name in searchable form with a leading @ */
+
+ sym->name = malloc( strlen( name ) + 1 + 1 );
+
+ sym->name[0] = '@';
+ strcpy( sym->name+1, name );
+
+ if( symbols != NULL )
+ sym->next = symbols;
+
+ symbols = sym;
+}
+
+static char * getSymbol( const char * name )
+{
+ symbol * sym;
+
+ for( sym = symbols; sym; sym = sym->next )
+ {
+ if( strcmp( name, sym->name ) == 0 )
+ return( sym->value );
+ }
+
+ return( NULL );
+}
+
+static void freeSymbols( void )
+{
+ symbol * sym = symbols;
+
+ while( sym )
+ {
+ symbol * victim = sym;
+
+ sym = sym->next;
+
+ free( victim->name );
+ free( victim->value );
+ free( victim );
+ }
+
+ symbols = NULL;
+}
/*
* Local Variables:
* tab-width: 4
From cvsuser Mon Mar 7 23:27:08 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By cbbrowne: Major changes to log shipping,
allowing it to support
Message-ID: <20050307232705.EB038B1D2F8@gborg.postgresql.org>
Log Message:
-----------
Major changes to log shipping, allowing it to support substantially
all events (to the degree supportible). Notably, COPY_SET now
copies the contents of tables in newly subscribed sets.
Also includes a new event, ACCEPT_SET, which addresses a race condition
where updates might be lost. If ACCEPT_SET is received before the
MOVE_SET has been processed, then the slon will wait until it has
received both.
Modified Files:
--------------
slony1-engine/src/backend:
slony1_funcs.sql (r1.55 -> r1.56)
slony1-engine/src/slon:
cleanup_thread.c (r1.19 -> r1.20)
remote_worker.c (r1.76 -> r1.77)
slon.h (r1.45 -> r1.46)
Added Files:
-----------
slony1-engine/src/ducttape:
test_8_logship (r1.1)
-------------- next part --------------
Index: slony1_funcs.sql
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/backend/slony1_funcs.sql,v
retrieving revision 1.55
retrieving revision 1.56
diff -Lsrc/backend/slony1_funcs.sql -Lsrc/backend/slony1_funcs.sql -u -w -r1.55 -r1.56
--- src/backend/slony1_funcs.sql
+++ src/backend/slony1_funcs.sql
@@ -1877,6 +1877,21 @@
end loop;
end if;
+ -- On the new origin, raise an event - ACCEPT_SET
+ if v_local_node_id = p_new_origin then
+ -- Find the event number from the origin
+ select max(ev_seqno) as seqno into v_sub_row
+ from @NAMESPACE@.sl_event
+ where ev_type = ''MOVE_SET'' and
+ ev_data1 = p_set_id and
+ ev_data2 = p_old_origin and
+ ev_data3 = p_new_origin and
+ ev_origin = p_old_origin;
+
+ perform @NAMESPACE@.createEvent(''_@CLUSTERNAME@'', ''ACCEPT_SET'',
+ p_set_id, p_old_origin, p_new_origin, v_sub_row.seqno);
+ end if;
+
-- ----
-- Next we have to reverse the subscription path
-- ----
@@ -4894,6 +4909,36 @@
--
-- Called by slonik during the function upgrade process.
-- ----------------------------------------------------------------------
+create or replace function @NAMESPACE@.add_missing_table_field (text, text, text, text)
+returns bool as '
+DECLARE
+ p_namespace alias for $1;
+ p_table alias for $2;
+ p_field alias for $3;
+ p_type alias for $4;
+ v_row record;
+ v_query text;
+BEGIN
+ select 1 into v_row from pg_namespace n, pg_class c, pg_attribute a
+ where quote_ident(n.nspname) = p_namespace and
+ c.relnamespace = n.oid and
+ quote_ident(c.relname) = p_table and
+ a.attrelid = c.oid and
+ quote_ident(a.attname) = p_field;
+ if not found then
+ raise notice ''Upgrade table %.% - add field %'', p_namespace, p_table, p_field;
+ v_query := ''alter table '' || p_namespace || ''.'' || p_table || '' add column '';
+ v_query := v_query || p_field || '' '' || p_type || '';'';
+ execute v_query;
+ return ''t'';
+ else
+ return ''f'';
+ end if;
+END;' language plpgsql;
+
+comment on function @NAMESPACE@.add_missing_table_field (text, text, text, text)
+is 'Add a column of a given type to a table if it is missing';
+
create or replace function @NAMESPACE@.upgradeSchema(text)
returns text as '
@@ -4945,7 +4990,6 @@
execute ''alter table @NAMESPACE@.sl_node add column no_spool boolean'';
update @NAMESPACE@.sl_node set no_spool = false;
end if;
-
return p_old;
end;
' language plpgsql;
@@ -4990,6 +5034,6 @@
where con_origin = @NAMESPACE@.getLocalNodeId('_@CLUSTERNAME@')
group by 1, 2
);
-comment on view @NAMESPACE@.sl_status is 'View showing how far behind remote nodes are.
-';
+
+comment on view @NAMESPACE@.sl_status is 'View showing how far behind remote nodes are.';
--- /dev/null
+++ src/ducttape/test_8_logship
@@ -0,0 +1,310 @@
+#!/bin/sh
+# $Id: test_8_logship,v 1.1 2005/03/07 23:27:02 cbbrowne Exp $
+# **********
+# test_8_logship
+#
+# This test script creates a standalone pgbench database
+# as slony_test1 and then:
+#
+# - initializes a primary node and starts the node daemon
+# - creates a set containing all 4 pgbench tables
+# - creates a second database as slony_test2
+# - adds database slony_test2 to the system
+# - starts the second replication daemon
+# - creates the pgbench tables (schema only)
+# - subscribes the replication set from the primary node
+#
+# The nature of the test has to do with the use of the new slonik
+# log shipping functionality...
+# **********
+
+export PATH
+TMPOUT=/tmp/output.$$
+LOGSHIPDIR=/tmp/logs.$$
+mkdir -p $LOGSHIPDIR
+DB1=slony_test1
+DB2=slony_test
+CLUSTERNAME=T1
+PGBENCH_SCALE=1
+PGBENCH_CLIENTS=5
+PGBENCH_TRANS=`expr 30000 / $PGBENCH_CLIENTS`
+DEBUGLEVEL=4
+
+trap '
+ echo ""
+ echo "**** user abort"
+ if [ ! -z $pgbench_pid ] ; then
+ echo "**** killing pgbench"
+ kill -15 $pgbench_pid
+ fi
+ if [ ! -z $slon1_pid ] ; then
+ echo "**** killing node daemon 1"
+ kill -15 $slon1_pid
+ fi
+ if [ ! -z $slon2_pid ] ; then
+ echo "**** killing node daemon 2"
+ kill -15 $slon2_pid
+ fi
+ exit 1
+' 2 15
+
+######################################################################
+# Preparations ... create a standalone pgbench database and
+# have the "application" (pgbench) running.
+######################################################################
+
+#####
+# Make sure the install is up to date
+#####
+WGM=`which gmake`
+if [ -z $WGM ] ; then
+ MAKE=make
+ CGNU=`make -v | grep GNU`
+ if [ -z $CGNU ] ; then
+ echo "GNU Make not found - please install GNU Make"
+ exit 1
+ fi
+else
+ MAKE=gmake
+fi
+echo -n "**** running 'make install' in src directory ... "
+if ! ${MAKE} -C .. install >$TMPOUT 2>&1 ; then
+ echo "failed"; cat $TMPOUT; rm $TMPOUT; exit 1
+fi
+echo "done"
+rm $TMPOUT
+
+PREAMBLE_FILE=/tmp/preamble.$$
+cat < $PREAMBLE_FILE
+define origin 11;
+define sub1 22;
+cluster name = $CLUSTERNAME;
+node @origin admin conninfo='dbname=$DB1';
+node @sub1 admin conninfo='dbname=$DB2';
+EOF
+
+
+#####
+# Remove old databases, if they exist
+#####
+echo "**** remove old test databases"
+dropdb $DB1 || echo "**** ignored"
+sleep 1
+dropdb $DB2 || echo "**** ignored"
+sleep 1
+
+#####
+# Create the "Primary Node"
+#####
+echo "**** creating database for Node 11"
+
+createdb $DB1 || exit 1
+pgbench -i -s $PGBENCH_SCALE $DB1
+pg_dump -s $DB1 >pgbench_schema.sql
+
+#####
+# Start pgbench in the background and give it rampup time
+#####
+pgbench -n -s $PGBENCH_SCALE -c $PGBENCH_CLIENTS -t $PGBENCH_TRANS $DB1 &
+pgbench_pid=$!
+echo "**** pgbench is running in background with pid $pgbench_pid"
+echo -n "**** sleeping 10 seconds to give pgbench time for rampup ... "
+sleep 10
+echo "done"
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now a standalone database with a running pgbench"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB1 as the primary cluster T1 node, start the node daemon,
+# and create a replication set containing the pgbench tables.
+######################################################################
+
+echo "**** initializing $DB1 as Primary Node for Slony-I cluster $CLUSTERNAME"
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ init cluster (id = @origin, comment = 'Node @origin');
+ echo 'Database $DB1 initialized as Node 11';
+_EOF_
+if [ $? -ne 0 ] ; then
+ kill $pgbench_pid;
+ exit 1
+fi
+
+echo "**** starting the Slony-I node daemon for $DB1"
+xterm -title "Slon node 11" -e sh -c "slon -d$DEBUGLEVEL -s500 -g10 $CLUSTERNAME dbname=$DB1; echo -n 'Enter>'; read line" &
+slon1_pid=$!
+echo "slon[$slon1_pid] on dbname=$DB1"
+
+echo "**** creating a replication set containing the 4 pgbench tables ... "
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ try {
+ table add key (node id = @origin, fully qualified name = 'public.history');
+ }
+ on error {
+ exit 1;
+ }
+
+ try {
+ create set (id = 1, origin = @origin, comment = 'Set 1 - pgbench tables');
+ set add table (set id = 1, origin = @origin,
+ id = 1, fully qualified name = 'public.accounts',
+ comment = 'Table accounts');
+ set add table (set id = 1, origin = @origin,
+ id = 2, fully qualified name = 'public.branches',
+ comment = 'Table branches');
+ set add table (set id = 1, origin = @origin,
+ id = 3, fully qualified name = 'public.tellers',
+ comment = 'Table tellers');
+ set add table (set id = 1, origin = @origin,
+ id = 4, fully qualified name = 'public.history',
+ key = serial, comment = 'Table accounts');
+ }
+ on error {
+ exit 1;
+ }
+_EOF_
+
+if [ $? -ne 0 ] ; then
+ echo "failed"
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ cat $TMPOUT
+ rm $TMPOUT
+ exit 1
+fi
+echo "**** set created"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+ echo "**** pgbench terminated ???"
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now the Slony-I origin for set 1"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB2 as a subscriber node and let it subscribe the replication
+# set of the running pgbench
+######################################################################
+echo "**** creating database for node 22"
+if ! createdb $DB2 ; then
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo "**** initializing $DB2 as node 22 of Slony-I cluster $CLUSTERNAME"
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ echo 'Creating node 22';
+ try {
+ store node (id = @sub1, comment = 'node @sub1', event node = @origin);
+ } on error {
+ echo 'could not establish node @sub1';
+ exit -1;
+ }
+ try {
+ store path (server = @origin, client = @sub1, conninfo = 'dbname=$DB1');
+ store path (server = @sub1, client = @origin, conninfo = 'dbname=$DB2');
+ }
+ on error {
+ echo 'could not establish paths between @origin and @sub1';
+ exit -1;
+ }
+ echo 'Database $DB2 added as node @sub1';
+_EOF_
+if [ $? -ne 0 ] ; then
+ kill $pgbench_pid 2>/dev/null
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+echo "**** starting the Slony-I node daemon for $DB1"
+xterm -title "Slon node 22" -e sh -c "slon -d$DEBUGLEVEL -s10000 -o10000 -g10 -a $LOGSHIPDIR $CLUSTERNAME dbname=$DB2; echo -n 'Enter>'; read line" &
+slon2_pid=$!
+echo "slon[$slon2_pid] on dbname=$DB2"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+ echo "**** pgbench terminated ???"
+ kill $slon1_pid 2>/dev/null
+ exit 1
+fi
+
+######################################################################
+# And now comes the moment where the big elephant starts to pee
+# and the attendants in the first row climb on their chairs ...
+######################################################################
+echo "**** creating pgbench tables and subscribing node 22 to set 1"
+(
+ cat pgbench_schema.sql
+) | psql -q $DB2
+slonik <<_EOF_
+ include <$PREAMBLE_FILE>;
+ subscribe set ( id = 1, provider = @origin, receiver = @sub1, forward = yes );
+_EOF_
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB2 should now be copying data and attempting to catch up."
+echo "**********************************************************************"
+echo ""
+
+echo -n "**** waiting for pgbench to finish "
+while kill -0 $pgbench_pid 2>/dev/null ; do
+ echo -n "."
+ sleep 10
+done
+echo "**** pgbench finished"
+echo "**** please terminate the replication engines when caught up."
+wait $slon1_pid
+wait $slon2_pid
+
+kill $pgbench_pid 2>/dev/null
+kill $slon1_pid 2>/dev/null
+kill $slon2_pid 2>/dev/null
+
+echo -n "**** comparing databases ... "
+psql $DB1 >dump.tmp.1.$$ <<_EOF_
+ select 'accounts:'::text, aid, bid, abalance, filler
+ from accounts order by aid;
+ select 'branches:'::text, bid, bbalance, filler
+ from branches order by bid;
+ select 'tellers:'::text, tid, bid, tbalance, filler
+ from tellers order by tid;
+ select 'history:'::text, tid, bid, aid, delta, mtime, filler,
+ "_Slony-I_${CLUSTERNAME}_rowID" from history order by "_Slony-I_${CLUSTERNAME}_rowID";
+_EOF_
+psql $DB2 >dump.tmp.2.$$ <<_EOF_
+ select 'accounts:'::text, aid, bid, abalance, filler
+ from accounts order by aid;
+ select 'branches:'::text, bid, bbalance, filler
+ from branches order by bid;
+ select 'tellers:'::text, tid, bid, tbalance, filler
+ from tellers order by tid;
+ select 'history:'::text, tid, bid, aid, delta, mtime, filler,
+ "_Slony-I_${CLUSTERNAME}_rowID" from history order by "_Slony-I_${CLUSTERNAME}_rowID";
+_EOF_
+
+if diff dump.tmp.1.$$ dump.tmp.2.$$ >test_1.diff ; then
+ echo "success - databases are equal."
+ rm dump.tmp.?.$$
+ rm test_1.diff
+else
+ echo "FAILED - see test_1.diff for database differences"
+fi
+rm $PREAMBLE_FILE
Index: remote_worker.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/remote_worker.c,v
retrieving revision 1.76
retrieving revision 1.77
diff -Lsrc/slon/remote_worker.c -Lsrc/slon/remote_worker.c -u -w -r1.76 -r1.77
--- src/slon/remote_worker.c
+++ src/slon/remote_worker.c
@@ -249,7 +249,9 @@
static int generate_archive_header (int node_id, char *seqbuf);
static int submit_query_to_archive(SlonDString *ds);
static int submit_string_to_archive (const char *s);
+static int submit_raw_data_to_archive (const char *s);
static int logarchive_tracking (const char *namespace, int sub_set, const char *firstseq, const char *seqbuf);
+static int write_void_log (int node_id, char *seqbuf, const char *message);
#define TERMINATE_QUERY_AND_ARCHIVE dstring_free(&query); terminate_log_archive();
@@ -587,6 +589,9 @@
no_id, no_comment, no_spool);
need_reloadListen = true;
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- STORE_NODE");
+
}
else if (strcmp(event->ev_type, "ENABLE_NODE") == 0)
{
@@ -601,6 +606,9 @@
no_id);
need_reloadListen = true;
+
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- ENABLE_NODE");
}
else if (strcmp(event->ev_type, "DROP_NODE") == 0)
{
@@ -650,6 +658,8 @@
rtcfg_cluster_name);
need_reloadListen = true;
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- DROP_NODE");
}
else if (strcmp(event->ev_type, "STORE_PATH") == 0)
{
@@ -667,6 +677,8 @@
pa_server, pa_client, pa_conninfo, pa_connretry);
need_reloadListen = true;
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- STORE_PATH");
}
else if (strcmp(event->ev_type, "DROP_PATH") == 0)
{
@@ -682,6 +694,8 @@
pa_server, pa_client);
need_reloadListen = true;
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- DROP_PATH");
}
else if (strcmp(event->ev_type, "STORE_LISTEN") == 0)
{
@@ -696,6 +710,8 @@
"select %s.storeListen_int(%d, %d, %d); ",
rtcfg_namespace,
li_origin, li_provider, li_receiver);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- STORE_LISTEN");
}
else if (strcmp(event->ev_type, "DROP_LISTEN") == 0)
{
@@ -710,6 +726,8 @@
"select %s.dropListen_int(%d, %d, %d); ",
rtcfg_namespace,
li_origin, li_provider, li_receiver);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- DROP_LISTEN");
}
else if (strcmp(event->ev_type, "STORE_SET") == 0)
{
@@ -724,6 +742,9 @@
"select %s.storeSet_int(%d, %d, '%q'); ",
rtcfg_namespace,
set_id, set_origin, set_comment);
+
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- STORE_SET");
}
else if (strcmp(event->ev_type, "DROP_SET") == 0)
{
@@ -734,18 +755,46 @@
slon_appendquery(&query1,
"select %s.dropSet_int(%d); ",
rtcfg_namespace, set_id);
+
+ /* The table deleted needs to be
+ * dropped from log shipping too */
+ if (archive_dir) {
+ rc = open_log_archive(rtcfg_nodeid, seqbuf);
+ rc = generate_archive_header(rtcfg_nodeid, seqbuf);
+ slon_mkquery(&query1,
+ "delete from %s.sl_setsync_offline "
+ " where ssy_setid= %d;",
+ rtcfg_namespace, set_id);
+ rc = submit_query_to_archive(&query1);
+ rc = close_log_archive();
+ }
}
else if (strcmp(event->ev_type, "MERGE_SET") == 0)
{
int set_id = (int)strtol(event->ev_data1, NULL, 10);
int add_id = (int)strtol(event->ev_data2, NULL, 10);
-
+ char dropquery[280];
rtcfg_dropSet(add_id);
slon_appendquery(&query1,
"select %s.mergeSet_int(%d, %d); ",
rtcfg_namespace,
set_id, add_id);
+
+ /* Log shipping gets the change here
+ * that we need to delete the table
+ * being merged from the set being
+ * maintained. */
+ if (archive_dir) {
+ rc = open_log_archive(rtcfg_nodeid, seqbuf);
+ rc = generate_archive_header(rtcfg_nodeid, seqbuf);
+ rc = slon_mkquery(&query1,
+ "delete from %s.sl_setsync_offline "
+ " where ssy_setid= %d;",
+ rtcfg_namespace, add_id);
+ rc = submit_query_to_archive(&query1);
+ rc = close_log_archive();
+ }
}
else if (strcmp(event->ev_type, "SET_ADD_TABLE") == 0)
{
@@ -754,6 +803,8 @@
* subscribed sets yet and table information is not maintained
* in the runtime configuration.
*/
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_ADD_TABLE");
}
else if (strcmp(event->ev_type, "SET_ADD_SEQUENCE") == 0)
{
@@ -762,6 +813,8 @@
* subscribed sets yet and sequences information is not
* maintained in the runtime configuration.
*/
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_ADD_SEQUENCE");
}
else if (strcmp(event->ev_type, "SET_DROP_TABLE") == 0)
{
@@ -770,6 +823,8 @@
slon_appendquery(&query1, "select %s.setDropTable_int(%d);",
rtcfg_namespace,
tab_id);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_DROP_TABLE");
}
else if (strcmp(event->ev_type, "SET_DROP_SEQUENCE") == 0)
{
@@ -778,6 +833,8 @@
slon_appendquery(&query1, "select %s.setDropSequence_int(%d);",
rtcfg_namespace,
seq_id);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_DROP_SEQUENCE");
}
else if (strcmp(event->ev_type, "SET_MOVE_TABLE") == 0)
{
@@ -787,6 +844,8 @@
slon_appendquery(&query1, "select %s.setMoveTable_int(%d, %d);",
rtcfg_namespace,
tab_id, new_set_id);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_MOVE_TABLE");
}
else if (strcmp(event->ev_type, "SET_MOVE_SEQUENCE") == 0)
{
@@ -796,6 +855,8 @@
slon_appendquery(&query1, "select %s.setMoveSequence_int(%d, %d);",
rtcfg_namespace,
seq_id, new_set_id);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SET_MOVE_SEQUENCE");
}
else if (strcmp(event->ev_type, "STORE_TRIGGER") == 0)
{
@@ -806,6 +867,8 @@
"select %s.storeTrigger_int(%d, '%q'); ",
rtcfg_namespace,
trig_tabid, trig_tgname);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- STORE_TRIGGER");
}
else if (strcmp(event->ev_type, "DROP_TRIGGER") == 0)
{
@@ -816,6 +879,70 @@
"select %s.dropTrigger_int(%d, '%q'); ",
rtcfg_namespace,
trig_tabid, trig_tgname);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- DROP_TRIGGER");
+ }
+ else if (strcmp(event->ev_type, "ACCEPT_SET") == 0)
+ {
+ int set_id = (int) strtol(event->ev_data1, NULL, 10);
+ int old_origin = (int) strtol(event->ev_data2, NULL, 10);
+ int new_origin = (int) strtol(event->ev_data3, NULL, 10);
+ int seq_no = (int) strtol(event->ev_data4, NULL, 10);
+ PGresult *res;
+
+ /* If we're a remote node, and haven't yet
+ * received the MOVE_SET event from the
+ * new origin, then we'll need to sleep a
+ * bit... This avoids a race condition
+ * where new SYNCs take place on the new
+ * origin, and are ignored on some
+ * subscribers (and their children)
+ * because the MOVE_SET wasn't yet
+ * received and processed */
+
+ if ((rtcfg_nodeid != old_origin) && (rtcfg_nodeid != new_origin)) {
+ slon_mkquery(&query1,
+ "select 1 from %s.sl_event accept "
+ "where "
+ " accept.ev_type = 'ACCEPT_SET' and "
+ " accept.ev_origin = %d and "
+ " accept.ev_data1 = %d and "
+ " accept.ev_data2 = %d and "
+ " accept.ev_data3 = %d and "
+ " accept.ev_data4 = %d and "
+ " not exists "
+ " (select 1 from %s.sl_event move "
+ " where "
+ " accept.ev_origin = move.ev_data3 and "
+ " move.ev_type = 'MOVE_SET' and "
+ " move.ev_data1 = accept.ev_data1 and "
+ " move.ev_data2 = accept.ev_data2 and "
+ " move.ev_data3 = accept.ev_data3 and "
+ " move.ev_seqno = %d); ",
+
+ rtcfg_namespace,
+ old_origin, set_id, old_origin, new_origin, seq_no,
+ rtcfg_namespace, seq_no);
+ res = PQexec(local_dbconn, dstring_data(&query1));
+ while (PQntuples(res) > 0) {
+ int sleeptime = 15;
+ int sched_rc;
+ slon_log(SLON_WARN, "remoteWorkerThread_%d: "
+ "accept set: node has not yet received MOVE_SET event "
+ "for set %d old origin %d new origin - sleep %d seconds\n",
+ rtcfg_nodeid, set_id, old_origin, new_origin, sleeptime);
+ sched_rc = sched_msleep(node, sleeptime * 1000);
+ if (sched_rc != SCHED_STATUS_OK) {
+ event_ok = false;
+ break;
+ } else {
+ if (sleeptime < 60)
+ sleeptime *= 2;
+ }
+ res = PQexec(local_dbconn, dstring_data(&query1));
+ }
+ }
+
}
else if (strcmp(event->ev_type, "MOVE_SET") == 0)
{
@@ -831,6 +958,7 @@
* that, we need to execute it now and select the resulting
* provider for us.
*/
+
slon_appendquery(&query1,
"select %s.moveSet_int(%d, %d, %d); ",
rtcfg_namespace,
@@ -880,6 +1008,8 @@
rtcfg_namespace,
failed_node, backup_node, set_id);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- FAILOVER_SET");
need_reloadListen = true;
}
else if (strcmp(event->ev_type, "SUBSCRIBE_SET") == 0)
@@ -896,7 +1026,8 @@
"select %s.subscribeSet_int(%d, %d, %d, '%q'); ",
rtcfg_namespace,
sub_set, sub_provider, sub_receiver, sub_forward);
-
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- SUBSCRIBE_SET");
need_reloadListen = true;
}
else if (strcmp(event->ev_type, "ENABLE_SUBSCRIPTION") == 0)
@@ -1004,7 +1135,9 @@
rtcfg_namespace,
sub_set, sub_provider, sub_receiver);
}
-
+ /* Note: No need to do anything based
+ on archive_dir here; copy_set does
+ that nicely already. */
need_reloadListen = true;
}
else if (strcmp(event->ev_type, "UNSUBSCRIBE_SET") == 0)
@@ -1022,6 +1155,16 @@
sub_set, sub_receiver);
need_reloadListen = true;
+ if (archive_dir) {
+ rc = open_log_archive(rtcfg_nodeid, seqbuf);
+ rc = generate_archive_header(rtcfg_nodeid, seqbuf);
+ slon_mkquery(&query1,
+ "delete from %s.sl_setsync_offline "
+ " where ssy_setid= %d;",
+ rtcfg_namespace, sub_set);
+ rc = submit_query_to_archive(&query1);
+ rc = close_log_archive();
+ }
}
else if (strcmp(event->ev_type, "DDL_SCRIPT") == 0)
{
@@ -1087,11 +1230,15 @@
"select %s.updateReloid(%d, '%q', %d); ",
rtcfg_namespace,
reset_config_setid, reset_configonly_on_node);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- RESET_CONFIG");
}
else
{
printf("TODO: ********** remoteWorkerThread: node %d - EVENT %d," INT64_FORMAT " %s - unknown event type\n",
node->no_id, event->ev_origin, event->ev_seqno, event->ev_type);
+ if (archive_dir)
+ write_void_log (rtcfg_nodeid, seqbuf, "-- UNHANDLED EVENT!!!");
}
/*
@@ -2107,6 +2254,33 @@
dstring_init(&query1);
sprintf(seqbuf, INT64_FORMAT, event->ev_seqno);
+
+ /* Log Shipping Support begins... */
+ /* - Open the log, put the header in
+ Isn't it convenient that seqbuf was just populated??? :-)
+ */
+ if (archive_dir) {
+ rc = open_log_archive(rtcfg_nodeid, seqbuf);
+ if (rc < 0) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
+ "Could not open COPY SET archive file %s - %s",
+ node->no_id, archive_tmp, strerror(errno));
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ rc = generate_archive_header(rtcfg_nodeid, seqbuf);
+ if (rc < 0) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
+ "Could not generate COPY SET archive header %s - %s",
+ node->no_id, archive_tmp, strerror(errno));
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ }
/*
* Listen on the special relation telling what node daemon this connection
* belongs to.
@@ -2118,6 +2292,7 @@
{
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
@@ -2147,6 +2322,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
if (*(PQgetvalue(res1, 0, 0)) == 't')
@@ -2157,6 +2333,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
PQclear(res1);
@@ -2170,6 +2347,7 @@
{
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
}
@@ -2199,6 +2377,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ntuples1 = PQntuples(res1);
@@ -2236,6 +2415,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
rc = *PQgetvalue(res2, 0, 0) == 't';
@@ -2259,6 +2439,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
rc = *PQgetvalue(res2, 0, 0) == 't';
@@ -2279,6 +2460,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
slon_log(SLON_DEBUG3, "remoteWorkerThread_%d: "
@@ -2314,6 +2496,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
@@ -2334,6 +2517,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ntuples2 = PQntuples(res2);
@@ -2348,6 +2532,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
}
@@ -2356,6 +2541,10 @@
/*
* Begin a COPY from stdin for the table on the local DB
*/
+ slon_log(SLON_DEBUG4, "remoteWorkerThread_%d: "
+ "Begin COPY of table %s\n",
+ node->no_id, tab_fqname);
+
slon_mkquery(&query1,
"select %s.truncateTable('%s'); "
"copy %s from stdin; ",
@@ -2372,9 +2561,25 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
+ if (archive_dir) {
+ slon_log(SLON_DEBUG4, "start log ship copy of %s\n", tab_fqname);
+ slon_mkquery(&query1,
+ "delete from %s;copy %s from stdin;", tab_fqname, tab_fqname);
+ rc = submit_query_to_archive(&query1);
+ if (rc < 0) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_d: "
+ "Could not generate copy_set request for %s - %s",
+ node->no_id, tab_fqname, strerror(errno));
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ }
/*
* Begin a COPY to stdout for the table on the provider DB
*/
@@ -2398,6 +2603,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
@@ -2410,7 +2616,6 @@
int len = strlen(copydata);
copysize += (int64) len;
-
if (PQputCopyData(loc_dbconn, copydata, len) != 1)
{
slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
@@ -2426,8 +2631,30 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
+ if (archive_dir) {
+ rc = fwrite(copydata, 1, len, archive_fp);
+ if (rc != len) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
+ "PQputCopyData() - log shipping - %s",
+ node->no_id, strerror(errno));
+#ifdef SLON_MEMDEBUG
+ memset(copydata, 88, len);
+#endif
+ PQfreemem(copydata);
+ PQputCopyEnd(loc_dbconn, "Slony-I: copy set operation");
+ PQclear(res3);
+ PQclear(res2);
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+
+ }
+ }
#ifdef SLON_MEMDEBUG
memset(copydata, 88, len);
#endif
@@ -2444,6 +2671,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
@@ -2464,6 +2692,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
PQclear(res3);
@@ -2480,6 +2709,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
res2 = PQgetResult(loc_dbconn);
@@ -2493,8 +2723,12 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
+ if (archive_dir) {
+ rc = submit_string_to_archive("\\.");
+ }
#else /* ! HAVE_PQPUTCOPYDATA */
copydone = false;
while (!copydone)
@@ -2517,16 +2751,22 @@
case 0:
PQputline(loc_dbconn, copybuf);
PQputline(loc_dbconn, "\n");
+ if (archive_dir)
+ submit_string_to_archive(copybuf);
break;
case 1:
PQputline(loc_dbconn, copybuf);
+ if (archive_dir)
+ submit_raw_data_to_archive(copybuf);
break;
}
}
}
PQputline(loc_dbconn, "\\.\n");
-
+ if (archive_dir) {
+ rc = submit_string_to_archive("\\\.");
+ }
/*
* End the COPY to stdout on the provider
*/
@@ -2541,6 +2781,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
PQclear(res3);
@@ -2562,6 +2803,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
#endif /* HAVE_PQPUTCOPYDATA */
@@ -2580,8 +2822,13 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
+ if (archive_dir) {
+ submit_query_to_archive(&query1);
+ }
+
gettimeofday(&tv_now, NULL);
slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
"%.3f seconds to copy table %s\n",
@@ -2616,6 +2863,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ntuples1 = PQntuples(res1);
@@ -2638,6 +2886,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
}
@@ -2670,6 +2919,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ntuples1 = PQntuples(res1);
@@ -2683,6 +2933,8 @@
"set last_value of sequence %s (%s) to %s\n",
node->no_id, seql_seqid, seq_fqname, seql_last_value);
+
+
/*
* sequence with ID 0 is a nodes rowid ... only remember in seqlog.
*/
@@ -2691,6 +2943,10 @@
slon_mkquery(&query1,
"select \"pg_catalog\".setval('%q', '%s'); ",
seq_fqname, seql_last_value);
+
+ if (archive_dir) {
+ submit_query_to_archive(&query1);
+ }
}
else
dstring_reset(&query1);
@@ -2706,6 +2962,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
}
@@ -2749,6 +3006,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
if (PQntuples(res1) != 1)
@@ -2759,6 +3017,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
if (PQgetisnull(res1, 0, 0))
@@ -2805,6 +3064,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
if (PQntuples(res1) != 1)
@@ -2815,6 +3075,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ssy_seqno = PQgetvalue(res1, 0, 0);
@@ -2859,6 +3120,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
ntuples1 = PQntuples(res2);
@@ -2899,6 +3161,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
if (PQntuples(res1) != 1)
@@ -2909,6 +3172,7 @@
PQclear(res1);
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
dstring_init(&ssy_action_list);
@@ -2937,14 +3201,45 @@
{
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ if (archive_dir) {
+ slon_mkquery(&query1,
+ "insert into %s.sl_setsync_offline () "
+ "values ('%d', '%d');",
+ rtcfg_namespace, set_id, ssy_seqno);
+ rc = submit_query_to_archive(&query1);
+ if (rc < 0) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
+ " could not insert to sl_setsync_offline",
+ node->no_id);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
+ }
gettimeofday(&tv_now, NULL);
slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
"%.3f seconds to build initial setsync status\n",
node->no_id,
TIMEVAL_DIFF(&tv_start2, &tv_now));
+ if (archive_dir) {
+ rc = close_log_archive();
+ if (rc < 0) {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: "
+ " could not close archive log %s - %s",
+ node->no_id, archive_tmp, strerror(errno));
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ }
+
+
/*
* Roll back the transaction we used on the provider and close the
* database connection.
@@ -2954,6 +3249,7 @@
{
slon_disconnectdb(pro_conn);
dstring_free(&query1);
+ terminate_log_archive();
return -1;
}
slon_disconnectdb(pro_conn);
@@ -2966,11 +3262,9 @@
gettimeofday(&tv_now, NULL);
slon_log(SLON_DEBUG1, "copy_set %d done in %.3f seconds\n", set_id,
TIMEVAL_DIFF(&tv_start, &tv_now));
-
return 0;
}
-
static int
sync_event(SlonNode * node, SlonConn * local_conn,
WorkerGroupData * wd, SlonWorkMsg_event * event)
@@ -4219,9 +4513,11 @@
int close_log_archive () {
int rc;
+ if (archive_dir) {
rc = fprintf(archive_fp, "\n------------------------------------------------------------------\n-- End Of Archive Log\n------------------------------------------------------------------\ncommit;\n");
rc = fclose(archive_fp);
rc = rename(archive_tmp, archive_name);
+ }
return rc;
}
@@ -4231,13 +4527,18 @@
}
int submit_query_to_archive(SlonDString *ds) {
- return fprintf(archive_fp, "%s\n", *ds->data);
+ return fprintf(archive_fp, "%s\n", ds->data);
}
int submit_string_to_archive (const char *s) {
return fprintf(archive_fp, "%s\n", s);
}
+/* Raw form used for COPY where we don't want any extra cr/lf output */
+int submit_raw_data_to_archive (const char *s) {
+ return fprintf(archive_fp, "%s", s);
+}
+
void terminate_log_archive () {
if (archive_fp) {
fclose(archive_fp);
@@ -4248,17 +4549,20 @@
time_t now;
now = time(NULL);
return fprintf(archive_fp,
- "-- Slony-I sync log\n"
+ "-- Slony-I log shipping archive\n"
"-- Node %d, Event %s\n"
"-- at... %s\n"
"start transaction;\n",
node_id, seqbuf, ctime(&now));
}
-/*
- * Local Variables:
- * tab-width: 4
- * c-indent-level: 4
- * c-basic-offset: 4
- * End:
- */
+/* write_void_log() writes out a "void" log consisting of the message
+ * which must either be a valid SQL query or a SQL comment. */
+
+int write_void_log (int node_id, char *seqbuf, const char *message) {
+ open_log_archive(node_id, seqbuf);
+ generate_archive_header(node_id, seqbuf);
+ submit_string_to_archive(message);
+ close_log_archive();
+}
+
Index: slon.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.h,v
retrieving revision 1.45
retrieving revision 1.46
diff -Lsrc/slon/slon.h -Lsrc/slon/slon.h -u -w -r1.45 -r1.46
--- src/slon/slon.h
+++ src/slon/slon.h
@@ -50,7 +50,7 @@
#define SLON_CLEANUP_SLEEP 600 /* sleep 10 minutes between */
/* cleanup calls */
-#define SLON_VACUUM_FREQUENCY 1 /* vacuum every 3rd cleanup */
+#define SLON_VACUUM_FREQUENCY 3 /* vacuum every 3rd cleanup */
typedef enum
Index: cleanup_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/cleanup_thread.c,v
retrieving revision 1.19
retrieving revision 1.20
diff -Lsrc/slon/cleanup_thread.c -Lsrc/slon/cleanup_thread.c -u -w -r1.19 -r1.20
--- src/slon/cleanup_thread.c
+++ src/slon/cleanup_thread.c
@@ -32,7 +32,8 @@
* ---------- Global data ----------
*/
int vac_frequency = SLON_VACUUM_FREQUENCY;
-
+static unsigned long earliest_xid = 0;
+static unsigned long get_earliest_xid (PGconn *dbconn);
/*
* ---------- cleanupThread_main
*
@@ -55,6 +56,7 @@
int n ,
t;
int vac_count = 0;
+ char *vacuum_action;
slon_log(SLON_DEBUG1, "cleanupThread: thread starts\n");
@@ -166,28 +168,44 @@
*/
if (vac_frequency != 0 && ++vac_count >= vac_frequency)
{
+ unsigned long latest_xid;
vac_count = 0;
-
+ latest_xid = get_earliest_xid(dbconn);
+ if (earliest_xid != latest_xid) {
+ vacuum_action = "vacuum analyze";
+ } else {
+ vacuum_action = "analyze";
+ slon_log(SLON_DEBUG4, "cleanupThread: xid %d still active - analyze instead\n",
+ earliest_xid);
+ }
+ earliest_xid = latest_xid;
/*
* Build the query string for vacuuming replication runtime data
* and event tables
*/
dstring_init(&query3);
slon_mkquery(&query3,
- "vacuum analyze %s.sl_event; "
- "vacuum analyze %s.sl_confirm; "
- "vacuum analyze %s.sl_setsync; "
- "vacuum analyze %s.sl_log_1; "
- "vacuum analyze %s.sl_log_2;"
- "vacuum analyze %s.sl_seqlog;"
- "vacuum analyze pg_catalog.pg_listener;",
+ "%s %s.sl_event; "
+ "%s %s.sl_confirm; "
+ "%s %s.sl_setsync; "
+ "%s %s.sl_log_1; "
+ "%s %s.sl_log_2;"
+ "%s %s.sl_seqlog;"
+ "%s pg_catalog.pg_listener;",
+ vacuum_action,
rtcfg_namespace,
+ vacuum_action,
rtcfg_namespace,
+ vacuum_action,
rtcfg_namespace,
+ vacuum_action,
rtcfg_namespace,
+ vacuum_action,
rtcfg_namespace,
- rtcfg_namespace);
-
+ vacuum_action,
+ rtcfg_namespace,
+ vacuum_action
+ );
gettimeofday(&tv_start, NULL);
res = PQexec(dbconn, dstring_data(&query3));
@@ -231,3 +249,41 @@
slon_log(SLON_DEBUG1, "cleanupThread: thread done\n");
pthread_exit(NULL);
}
+
+
+static unsigned long get_earliest_xid (PGconn *dbconn) {
+ unsigned long lo = 2147483647;
+ unsigned long minhi = -1;
+ unsigned long minlo = lo;
+ unsigned long xid;
+ long n,t;
+ PGresult *res;
+ SlonDString query1;
+ dstring_init(&query1);
+ slon_mkquery(&query1, "select transaction from pg_catalog.pg_locks where transaction is not null;");
+ res = PQexec(dbconn, dstring_data(&query1));
+ if (PQresultStatus(res) != PGRES_TUPLES_OK) {
+ slon_log(SLON_FATAL, "cleanupThread: could not read locks from pg_locks!");
+ PQclear(res);
+ slon_abort();
+ return -1;
+ } else {
+ n = PQntuples(res);
+ for (t = 0; t < n; t++) {
+ xid = atoi(PQgetvalue(res, t, 0));
+ printf ("xid: %d\n", xid);
+ if (xid > lo) {
+ if (xid < minlo)
+ minlo = xid;
+ } else {
+ if (xid < minhi)
+ minhi = xid;
+ }
+ }
+ }
+ printf("minhi: %d minlo: %d\n", minlo, minhi);
+ if ((minhi - lo) < minlo)
+ return minlo;
+ else
+ return minhi;
+}
From cvsuser Tue Mar 8 22:52:41 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By darcyb: Add new conf option: sql_on_connection.
Message-ID: <20050308225240.69BF4B1D30B@gborg.postgresql.org>
Log Message:
-----------
Add new conf option: sql_on_connection.
Description:
Execute the enclosed SQL on each node serviced by this slon daemon at
connect time it is usefull for setting ogging levels, or to tune the planner/memory memory used by slon.
Modified Files:
--------------
slony1-engine/doc/adminguide:
slonconf.sgml (r1.2 -> r1.3)
slony1-engine/share:
slon.conf-sample (r1.2 -> r1.3)
slony1-engine/src/slon:
conf-file.l (r1.2 -> r1.3)
confoptions.h (r1.16 -> r1.17)
dbutils.c (r1.15 -> r1.16)
slon.h (r1.46 -> r1.47)
-------------- next part --------------
Index: slonconf.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonconf.sgml,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ldoc/adminguide/slonconf.sgml -Ldoc/adminguide/slonconf.sgml -u -w -r1.2 -r1.3
--- doc/adminguide/slonconf.sgml
+++ doc/adminguide/slonconf.sgml
@@ -165,6 +165,21 @@
+
+
+ sql_on_connection (string)
+
+ sql_on_connection configuration parameter
+
+
+
+ Execute this SQL on each node at slon connect time. Useful to set logging
+ levels, or to tune the planner/memory settings. You can specify multiple
+ statements by seperating them with a ;
+
+
+
+
Index: slon.conf-sample
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/share/slon.conf-sample,v
retrieving revision 1.2
retrieving revision 1.3
diff -Lshare/slon.conf-sample -Lshare/slon.conf-sample -u -w -r1.2 -r1.3
--- share/slon.conf-sample
+++ share/slon.conf-sample
@@ -72,3 +72,8 @@
# Range [10000,600000], default 60000.
#desired_sync_time=60000
+# Execute the following SQL on each node at slon connect time
+# useful to set logging levels, or to tune the planner/memory
+# settings. You can specify multiple statements by seperating
+# them with a ;
+#sql_on_connection="SET log_min_duration_statement TO '1000';"
Index: confoptions.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/confoptions.h,v
retrieving revision 1.16
retrieving revision 1.17
diff -Lsrc/slon/confoptions.h -Lsrc/slon/confoptions.h -u -w -r1.16 -r1.17
--- src/slon/confoptions.h
+++ src/slon/confoptions.h
@@ -28,10 +28,11 @@
char *Syslog_ident;
char *Syslog_facility;
int Use_syslog;
+
bool logpid;
bool logtimestamp;
char *log_timestamp_format;
-
+char *sql_on_connection;
enum config_type
{
@@ -283,6 +284,19 @@
&archive_dir,
NULL
},
+ {
+ {
+ (const char *)"sql_on_connection",
+ gettext_noop("SQL to send to each connected node on "
+ "connection establishment, usefull to enable "
+ "duration logging, or to adjust any other "
+ "connection settable GUC"),
+ NULL,
+ SLON_C_STRING
+ },
+ &sql_on_connection,
+ NULL
+ },
#ifdef HAVE_SYSLOG
{
{
Index: conf-file.l
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/conf-file.l,v
retrieving revision 1.2
retrieving revision 1.3
diff -Lsrc/slon/conf-file.l -Lsrc/slon/conf-file.l -u -w -r1.2 -r1.3
--- src/slon/conf-file.l
+++ src/slon/conf-file.l
@@ -28,6 +28,7 @@
SLON_EQUALS = 5,
SLON_UNQUOTED_STRING = 6,
SLON_QUALIFIED_ID = 7,
+ SLON_ESCAPED_STRING = 8,
SLON_EOL = 99,
SLON_FERROR = 100
};
@@ -62,7 +63,7 @@
UNQUOTED_STRING {LETTER}({LETTER_OR_DIGIT}|[-._:/])*
STRING \'([^'\n]|\\.)*\'
-
+ESCAPED_STRING \"([^"\n]|\\.)*\"
%%
\n ConfigFileLineno++; return SLON_EOL;
@@ -73,6 +74,7 @@
{QUALIFIED_ID} return SLON_QUALIFIED_ID;
{STRING} return SLON_STRING;
{UNQUOTED_STRING} return SLON_UNQUOTED_STRING;
+{ESCAPED_STRING} return SLON_ESCAPED_STRING;
{INTEGER} return SLON_INTEGER;
{REAL} return SLON_REAL;
= return SLON_EQUALS;
@@ -172,12 +174,12 @@
{
token = yylex();
}
- if (token != SLON_ID && token != SLON_STRING && token != SLON_INTEGER && token != SLON_REAL && token != SLON_UNQUOTED_STRING)
+ if (token != SLON_ID && token != SLON_STRING && token != SLON_INTEGER && token != SLON_REAL && token != SLON_UNQUOTED_STRING && token != SLON_ESCAPED_STRING)
{
goto parse_error;
}
opt_value = strdup(yytext);
- if (token == SLON_STRING)
+ if (token == SLON_STRING || token == SLON_ESCAPED_STRING)
{
memmove(opt_value,opt_value+1,strlen(opt_value)-1);
opt_value[strlen(opt_value)-2]='\0';
Index: dbutils.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/dbutils.c,v
retrieving revision 1.15
retrieving revision 1.16
diff -Lsrc/slon/dbutils.c -Lsrc/slon/dbutils.c -u -w -r1.15 -r1.16
--- src/slon/dbutils.c
+++ src/slon/dbutils.c
@@ -73,6 +73,24 @@
PQfinish(dbconn);
return NULL;
}
+ if (sql_on_connection != NULL)
+ {
+
+ PGresult *res;
+ SlonDString query;
+
+ dstring_init(&query);
+ slon_mkquery(&query, "%s", sql_on_connection);
+ res = PQexec(dbconn, dstring_data(&query));
+ if ( ! ((PQresultStatus(res) == PGRES_TUPLES_OK) ||
+ (PQresultStatus(res) == PGRES_COMMAND_OK)) )
+ {
+ slon_log(SLON_ERROR,
+ "query %s failed\n",
+ dstring_data(&query));
+ }
+ PQclear(res);
+ }
/*
* Embed it into a SlonConn structure used to exchange it with the
Index: slon.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.h,v
retrieving revision 1.46
retrieving revision 1.47
diff -Lsrc/slon/slon.h -Lsrc/slon/slon.h -u -w -r1.46 -r1.47
--- src/slon/slon.h
+++ src/slon/slon.h
@@ -508,7 +508,7 @@
extern int slon_mkquery(SlonDString * ds, char *fmt,...);
extern int slon_appendquery(SlonDString * ds, char *fmt,...);
-
+extern char *sql_on_connection;
/* ----------
* Globals in misc.c
From cvsuser Thu Mar 10 17:50:06 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By smsimms: Removed a "my" keyword that limits the
scope of Slony sets
Message-ID: <20050310175004.916BEB1CE2A@gborg.postgresql.org>
Log Message:
-----------
Removed a "my" keyword that limits the scope of Slony sets too much.
Modified Files:
--------------
slony1-engine/tools/altperl:
slon_tools.conf-sample (r1.4 -> r1.5)
-------------- next part --------------
Index: slon_tools.conf-sample
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_tools.conf-sample,v
retrieving revision 1.4
retrieving revision 1.5
diff -Ltools/altperl/slon_tools.conf-sample -Ltools/altperl/slon_tools.conf-sample -u -w -r1.4 -r1.5
--- tools/altperl/slon_tools.conf-sample
+++ tools/altperl/slon_tools.conf-sample
@@ -78,7 +78,7 @@
# The $SLONY_SETS variable contains information about all of the sets
# in your cluster.
-my $SLONY_SETS = {
+$SLONY_SETS = {
# A unique name for the set
"set1" => {
From cvsuser Thu Mar 10 18:03:12 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By smsimms: New script to add a node to an existing
cluster.
Message-ID: <20050310180310.D3F8AB1CE29@gborg.postgresql.org>
Log Message:
-----------
New script to add a node to an existing cluster.
Based on init_cluster.pl, minus the "INIT CLUSTER" command, and only
adding paths and listens for the relevant node.
Added Files:
-----------
slony1-engine/tools/altperl:
store_node.pl (r1.1)
-------------- next part --------------
--- /dev/null
+++ tools/altperl/store_node.pl
@@ -0,0 +1,233 @@
+#!@@PERL@@
+# $Id: store_node.pl,v 1.1 2005/03/10 18:03:10 smsimms Exp $
+# Author: Steve Simms
+# Copyright 2005 PostgreSQL Global Development Group
+
+use Getopt::Long;
+
+# Defaults
+my $CONFIG_FILE = '@@SYSCONFDIR@@/slon_tools.conf';
+my $SHOW_USAGE = 0;
+
+# Read command-line options
+GetOptions("config=s" => \$CONFIG_FILE,
+ "help" => \$SHOW_USAGE);
+
+my $USAGE =
+"Usage: store_node [--config file] node#
+
+ Generates the slonik commands necessary to add a node to a
+ cluster. Also displays a report showing the relationships between
+ the various nodes.
+
+";
+
+if ($SHOW_USAGE) {
+ print $USAGE;
+ exit 0;
+}
+
+require '@@PGLIBDIR@@/slon-tools.pm';
+require $CONFIG_FILE;
+
+$node = $ARGV[0];
+
+# Node can be passed either as "node1" or just "1"
+if ($node =~ /^(?:node)?(\d+)$/) {
+ $node = $1;
+} else {
+ die $USAGE;
+}
+
+my $FILE="/tmp/store_node.$$";
+
+open(SLONIK, ">", $FILE);
+print SLONIK genheader();
+
+# STORE NODE
+print SLONIK "\n# STORE NODE\n";
+my ($dbname, $dbhost) = ($DBNAME[$node], $HOST[$node]);
+print SLONIK " store node (id = $node, event node = $MASTERNODE, comment = 'Node $node - $dbname\@$dbhost');\n";
+print SLONIK " echo 'Set up replication nodes';\n";
+
+# STORE PATH
+print SLONIK "\n# STORE PATH\n";
+
+my @COST;
+my @VIA;
+my @PATH;
+generate_listen_paths();
+
+print SLONIK " echo 'Next: configure paths for each node/origin';\n";
+foreach my $nodea (@NODES) {
+ my $dsna = $DSN[$nodea];
+ foreach my $nodeb (@NODES) {
+ if ($nodea != $nodeb) {
+ next unless ($node == $nodea or $node == $nodeb);
+ my $dsnb = $DSN[$nodeb];
+ my $providerba = $VIA[$nodea][$nodeb];
+ my $providerab = $VIA[$nodeb][$nodea];
+ if (!$printed[$nodea][$nodeb] and $providerab == $nodea) {
+ print SLONIK " store path (server = $nodea, client = $nodeb, conninfo = '$dsna');\n";
+ $printed[$nodea][$nodeb] = "done";
+ }
+ if (!$printed[$nodeb][$nodea] and $providerba == $nodea) {
+ print SLONIK " store path (server = $nodeb, client = $nodea, conninfo = '$dsnb');\n";
+ $printed[$nodeb][$nodea] = "done";
+ }
+ }
+ }
+}
+
+# STORE LISTEN
+print SLONIK "\n# STORE LISTEN\n";
+foreach my $origin (@NODES) {
+ my $dsna = $DSN[$origin];
+ foreach my $receiver (@NODES) {
+ if ($origin != $receiver) {
+ my $provider = $VIA[$origin][$receiver];
+ next unless ($node == $origin or
+ $node == $receiver or
+ $node == $provider);
+ print SLONIK " store listen (origin = $origin, receiver = $receiver, provider = $provider);\n";
+ }
+ }
+}
+print SLONIK " echo 'Replication nodes prepared';\n";
+print SLONIK " echo 'Please start a slon replication daemon for each node';\n";
+close SLONIK;
+run_slonik_script($FILE);
+report_on_paths();
+
+sub generate_listen_paths {
+ my $infinity = 10000000; # Initial costs are all infinite
+ foreach my $node1 (@NODES) {
+ foreach my $node2 (@NODES) {
+ $COST[$node1][$node2] = $infinity;
+ }
+ }
+
+ # Initialize paths between parents and children, and based on them,
+ # generate initial seeding of listener paths, @VIA
+
+ foreach my $node1 (@NODES) {
+ $COST[$node1][$node1] = 0;
+ $VIA[$node1][$node1] = 0;
+ foreach my $node2 (@NODES) {
+ if ($node2 != $node1) {
+ if ((not ($PARENT[$node1] or $PARENT[$node2])) or
+ ($PARENT[$node1] and $PARENT[$node1] == $node2) or
+ ($PARENT[$node2] and $PARENT[$node2] == $node1)) {
+ $PATH[$node1][$node2] = 1;
+ $PATH[$node2][$node1] = 1;
+ # Set up a cost 1 path between them
+ # Parent to child
+ $COST[$node1][$node2] = 1;
+ $VIA[$node1][$node2] = $node1;
+
+ # Child to parent
+ $COST[$node2][$node1] = 1;
+ $VIA[$node2][$node1] = $node2;
+ }
+ }
+ }
+ }
+
+ # Now, update the listener paths...
+ # 4 level nested iteration:
+ # 1 while not done, do
+ # 2 for each node, node1
+ # 3 for each node, node2, where node2 <> node1, where we don't
+ # yet have a listener path
+ # 4 for each node node3 (<> node1 or node2),
+ # consider introducing the listener path:
+ # node1 to node2 then node2 to node3
+ # In concept, it's an O(n^4) algorithm; since the number of nodes, n,
+ # is not likely to get particularly large, it's not worth tuning
+ # further.
+ $didwork = "yes";
+ while ($didwork eq "yes") {
+ $didwork = "no";
+ foreach my $node1 (@NODES) {
+ foreach my $node3 (@NODES) {
+ if (($VIA[$node3][$node1] == 0) && ($node3 != $node1)) {
+ foreach my $node2 (@NODES) {
+ if ($PATH[$node1][$node2] && ($VIA[$node2][$node3] != 0) && ($node2 != $node3) && ($node2 != $node1)) {
+ # Consider introducing a path from n1 to n2 then n2 to n3
+ # as a cheaper alternative to going direct from n1 to n3
+ my $oldcost = $COST[$node3][$node1];
+ my $newcost = $COST[$node1][$node2] + $COST[$node2][$node3];
+ if ($newcost < $oldcost) {
+ $didwork = "yes";
+ # So we go via node 2
+ $VIA[$node3][$node1] = $node2;
+ $COST[$node3][$node1] = $newcost;
+
+ $VIA[$node1][$node3] = $node2;
+ $COST[$node1][$node3] = $newcost;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+sub report_on_paths {
+ print "# Configuration Summary:\n";
+ print "#\n";
+ print "# COST\n";
+ print "# ";
+ foreach my $node2 (@NODES) {
+ printf "| %3d ", $node2;
+ }
+ print "|\n# ";
+ print ("-----+" x (scalar(@NODES) + 1));
+ print "\n";
+ foreach my $node1 (@NODES) {
+ printf "# %3d ", $node1;
+ foreach my $node2 (@NODES) {
+ if ($COST[$node2][$node1] == $infinity) {
+ printf "| inf ";
+ } else {
+ printf "|%4d ", $COST[$node2][$node1];
+ }
+ }
+ print "|\n";
+ }
+ print "# \n";
+ print "# VIA\n";
+ print "# ";
+ foreach my $node2 (@NODES) {
+ printf "| %3d ", $node2;
+ }
+ print "|\n# ";
+ print ("-----+" x (scalar(@NODES) + 1));
+ print "\n";
+ foreach my $node1 (@NODES) {
+ printf "# %3d ", $node1;
+ foreach my $node2 (@NODES) {
+ printf "|%4d ", $VIA[$node2][$node1];
+ }
+ print "|\n";
+ }
+
+ print "# \n";
+ print "# PATHS\n";
+ print "# ";
+ foreach my $node2 (@NODES) {
+ printf "| %3d ", $node2;
+ }
+ print "|\n# ";
+ print ("-----+" x (scalar(@NODES) + 1));
+ print "\n";
+ foreach my $node1 (@NODES) {
+ printf "# %3d ", $node1;
+ foreach my $node2 (@NODES) {
+ printf "|%4d ", $PATH[$node2][$node1];
+ }
+ print "|\n";
+ }
+ print "\n";
+}
From cvsuser Thu Mar 10 18:08:44 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:15 2007
Subject: [Slony1-commit] By smsimms: Document the existence of store_node.pl.
Message-ID: <20050310180842.18C00B1CE29@gborg.postgresql.org>
Log Message:
-----------
Document the existence of store_node.pl.
Modified Files:
--------------
slony1-engine/doc/adminguide:
adminscripts.sgml (r1.21 -> r1.22)
-------------- next part --------------
Index: adminscripts.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/adminscripts.sgml,v
retrieving revision 1.21
retrieving revision 1.22
diff -Ldoc/adminguide/adminscripts.sgml -Ldoc/adminguide/adminscripts.sgml -u -w -r1.21 -r1.22
--- doc/adminguide/adminscripts.sgml
+++ doc/adminguide/adminscripts.sgml
@@ -197,6 +197,10 @@
the slon sometimes stops working without becoming aware of it.
+store_node
+
+Adds a node to an existing cluster.
+subscribe_setGenerates Slonik script to subscribe a particular node to a particular replication set.
From cvsuser Thu Mar 10 21:03:19 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Removed the generate_listen_paths item
and discussion, since
Message-ID: <20050310210316.CE1C2B1CDE4@gborg.postgresql.org>
Log Message:
-----------
Removed the generate_listen_paths item and discussion, since it has
been implemented.
Added the following:
- The scripts are currently not compatible with "use strict". They
would probably become easier to follow if they were (particularly
with regard to figuring out where variables are defined).
- We probably don't need one script per slonik command. Which of
these can be combined, preferably without having to implement pages
of arcane command-line options.
- slon_start, slon_kill, and possibly slon_watchdog should be made
into a slon_ctl shell script, with an /etc/init.d-compatible
wrapper.
Modified Files:
--------------
slony1-engine/tools/altperl:
ToDo (r1.2 -> r1.3)
-------------- next part --------------
Index: ToDo
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/ToDo,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/altperl/ToDo -Ltools/altperl/ToDo -u -w -r1.2 -r1.3
--- tools/altperl/ToDo
+++ tools/altperl/ToDo
@@ -6,77 +6,25 @@
with the configuration actually found on some node, add "additional"
bits, and drop obsolete bits.
-- It would seem likely that the function "generate_listen_paths()" in
- init_cluster.pl would be beneficial to port to pl/pgsql, as
- there presently isn't any capability to rebuild the listener
- paths by automatically dropping the old ones.
+ [Note: If drop_node and store_node are used, this may not be as
+ immediately necessary, but would still be nice to have.]
- At present, the configuration generated by this set of tools is
fairly fragile. If just about any sort of error is made, it is
commonly needful to drop all of the Slony schemas, thereby cleaning
_everything_ out, and restarting the configuration process from
- scratch.
-
- That certainly isn't ideal.
-
-
--------------------------------------------------------------------------------------------------
-
-More about the "generating SET LISTEN" calculation
-------------------------------------------------------------------------
-
-I have been mulling over the notion of setting up the Slonik STORE
-LISTEN(ORIGIN=a, RECEIVER=b, PROVIDER=c) configuration via a stored
-procedure, using in-the-DBMS data.
-
-The "features" of this idea:
-
-This involves having a table (view?) containing the intended parentage
-for each node, that is, which nodes point to which parents.
-
-This would allow the following good things:
-
-- Can't drop a node that has children, probably adding in other
-possible data checks
-
-- Can calculate the full "listener matrix" within pl/pgsql instead of
-doing it in Perl (take a look at init_cluster.pl, subroutine
-generate_listen_paths()).
-
-My preliminary thinking about it was pointing to there being a pretty
-elegant way to do this using SQL queries that might be more readable
-than the dynamic programming formulation embedded in that subroutine.
-(I didn't write out the Bellman equations, but took a look back at my
-old optimization texts ;-).)
-
-The primary problem that this solves is to create those STORE LISTEN()
-definitions, which get pretty involved to generate by hand if you get
-more than three nodes.
-
-In doing some further thinking, I noticed a couple of conspicuous
-challenges:
-
-1. There can be no fixed association with sets, as the sl_listen table
-does not contain set fields, and different sets can use differently
-shaped subscription trees.
-
-(I think users would be doing something pretty stupid to have _wildly_
-different arrangements for different sets, but I still have to support
-it...)
-
-2. The tree cannot be based on subscriptions because it needs to exist
-before any subscriptions are established
-
-In effect, I have no fixed place where I can get the information at
-the point at which I most need it.
-
-Once all nodes are subscribed, I could use subscription information to
-weight the cost functions, but I need the data BEFORE we do anything.
-
-This isn't pointing yet to a good approach to "seeding" it; if someone
-has some inspiration, let me know...
---
-Christopher Browne
-
-
--------------------------------------------------------------------------------------------------
+ scratch. That certainly isn't ideal, and is partially preventable
+ by proofreading the slonik scripts before running them, but more
+ robust error-handling (and prevention) would be welcome.
+
+- The scripts are currently not compatible with "use strict". They
+ would probably become easier to follow if they were (particularly
+ with regard to figuring out where variables are defined).
+
+- We probably don't need one script per slonik command. Which of
+ these can be combined, preferably without having to implement pages
+ of arcane command-line options.
+
+- slon_start, slon_kill, and possibly slon_watchdog should be made
+ into a slon_ctl shell script, with an /etc/init.d-compatible
+ wrapper.
From cvsuser Thu Mar 10 21:05:56 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Rewrote the README file to reflect
changes since 1.0.5.
Message-ID: <20050310210555.36AFCB1CDE4@gborg.postgresql.org>
Log Message:
-----------
Rewrote the README file to reflect changes since 1.0.5.
Modified Files:
--------------
slony1-engine/tools/altperl:
README (r1.11 -> r1.12)
-------------- next part --------------
Index: README
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/README,v
retrieving revision 1.11
retrieving revision 1.12
diff -Ltools/altperl/README -Ltools/altperl/README -u -w -r1.11 -r1.12
--- tools/altperl/README
+++ tools/altperl/README
@@ -1,129 +1,80 @@
README
$Id$
-Christopher Browne
-Database Administrator
-Afilias Canada
+Christopher Browne, Afilias Canada
+Steve Simms, Technically Sound
-This is a "second system" set of scripts for managing a set of Slony-I
-instances.
+The altperl scripts provide an alternate method of managing Slony-I,
+generating slonik scripts and monitoring slon daemons. They support
+an arbitrary number of Slony-I nodes in clusters of various shapes and
+sizes.
-Unlike the shell scripts that have previously been used, these scripts
-support having an arbitrary number of Slony-I nodes. They are
-configured in the [cluster].nodes file (e.g. - environment variable
-SLONYNODES) by calling add_node() once indicating the configuration
-for each node that is needed.
+To install the scripts, run "make" and "make install" in this
+directory. The files will be installed under the --prefix you passed
+to configure.
-The following configuration is set up:
+Enter a complete description of your cluster configuration (both nodes
+and sets) in slon_tools.conf. The provided slon_tools.conf-sample
+contains documentation about each of the available options.
- Host Level Configuration
---------------------------------------
+If you want to support multiple clusters, you can create multiple
+slon_tools.conf files and specify which one to use in any of the
+scripts by passing the --config option.
-This configuration will normally apply to all clusters being managed
-on a particular host, so it would probably make sense to modify it
-directly in slon_tools.conf.
- $APACHE_ROTATOR is an optional reference to the location of the
- Apache log rotator; if you set it to a path to an Apache "rotatelog"
- program, that will be used to keep log file size down to a "dull
- roar".
-
+For the impatient: Steps to get started
+---------------------------------------
- $LOGDIR is the directory in which to put log files. The script will
- generate a subdirectory for each node.
+1. From the top-level source directory:
- Node Level Configuration
---------------------------------------
+ ./configure --prefix=/usr/local/slony --with-perltools
+ make
+ make install
-This configuration should be set up in the file represented in the
-environment variable SLONYNODES.
+2. Dump the schema from one database to another:
- $CLUSTER_NAME represents the name of the cluster. In each database
- involved in the replication set, you will find the namespace
- "_$CLUSTER_NAME" that contains Slony-I's configuration tables
+ pg_dump --schema-only --host=server1 source_db | psql --host=server2 dest_db
- $MASTERNODE is the number of the "master" node. It defaults to 1, if
- not otherwise set.
+3. Modify /usr/local/slony/etc/slon_tools.conf to reflect your setup.
- Set Level Configuration
------------------------------------
+4. Initialize the Slony-I cluster:
-The configuration of the tables, sequences and such are stored in the
-file pointed to by the environment variable SLONYSET, in the following
-Perl variables:
+ /usr/local/slony/bin/init_cluster
- $TABLE_ID - where to start numbering table IDs
- $SEQUENCE_ID - where to start numbering sequence IDs
+ Verify that the output looks reasonable, then run:
- The table IDs are required to be unique across all sets in a
- Slony-I cluster, so if you add extra sets, you need to set
- $TABLE_ID to a value that won't conflict, typically something
- higher than largest value used in earlier sets.
+ /usr/local/slony/bin/init_cluster | /usr/local/pgsql/bin/slonik
- @PKEYEDTABLES contains all of the tables that have primary keys
+5. Start up slon daemons for both servers:
- %KEYEDTABLES contains tables with candidate primary keys associated
- with the index you _want_.
+ /usr/local/slony/bin/slon_start node1
+ /usr/local/slony/bin/slon_start node2
- @SERIALTABLES contains tables that do not have a unique key
- to which Slony-I will need to add and populate
- a unique key
+6. Set up set 1 on the "master" node:
- @SEQUENCES lists all of the application sequences that are to be
- replicated.
+ /usr/local/slony/bin/create_set set1
-The values in slon_tools.conf are "hardcoded" as far as the tools are
-concerned.
+7. Subscribe node 2 to set 1:
-To make this more flexible, slon_tools.conf also looks at the
-environment variables SLONYNODES and SLONYSET as alternative sources
-for configuration.
+ /usr/local/slony/bin/subscribe_set set1 node2
-That way, you may do something like:
+After some period of time (from a few seconds to a few days depending
+on the size of the set), you should have a working replica of the
+tables in set 1 on node 2.
- for i in `seq 10`; do
- SLONYNODES="./node$i.config" ./init_cluster.pl
- done
-- Such an "alternative cluster.nodes" might import Pg, and do queries
-against a database to be replicated in order to populate the sets of
-tables and such.
+Alternate Configuration Method
+------------------------------
-- The "alternative cluster.nodes" might search some sort of 'registry' for
-the set of nodes to be replicated.
+The slon_tools.conf file is interpreted by Perl, so you could modify
+it to query a database to determine the configuration. (Beware of
+chicken-and-egg scenarios in doing this, however!)
-Parallel to SLONYNODES is the environment variable SLONYSET, which
-controls the contents of replication sets. It looks as though it
-should be usual for there to be just one "intentionally active"
-subscription set at any given time, with other sets being set up in
-order to be merged with the "main" set.
-Steps to start up replication
--------------------------------
+For More Information
+--------------------
-0. Dump from source system to destination
- pg_dump -s -c flex1 | psql flex2
+There are numerous other scripts for maintaining a Slony cluster. To
+learn more about any of them, run "tool_name --help".
-1. Initializes the Slony cluster
- ./init_cluster.pl
-
- This sets up a FULL cross-join set of paths and listeners, doing
- something of a shortest-path evaluation of which "store listens" to
- set up.
-
-2. Start up slon servers for both DB instances
- ./slon_start.pl node1
- ./slon_start.pl node2
-
-3. Sets up all the tables for "set 1" for FlexReg 2.0
- ./create_set.pl set1
-
-4. Subscribe Node #2 to Set #1
- ./subscribe_set.pl set1 node2
- This is the Big One...
-
-That SHOULD be it, although "should" is probably too strong a word :-)
-
-There are numerous other tools for adding/dropping Slony-I
-configuration, and scripts that might manage simple forms of
-switchover/failover.
+See also the Slony-I administration guide in the doc directory.
From cvsuser Thu Mar 10 23:06:16 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Modified monitoring script so that it
looks for some
Message-ID: <20050310230613.8BD1AB1C759@gborg.postgresql.org>
Log Message:
-----------
Modified monitoring script so that it looks for some particular
problem values, and sends out email based on finding problems.
Modified Files:
--------------
slony1-engine/tools:
test_slony_state.pl (r1.1 -> r1.2)
slony1-engine/doc/adminguide:
monitoring.sgml (r1.16 -> r1.17)
-------------- next part --------------
Index: test_slony_state.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/test_slony_state.pl,v
retrieving revision 1.1
retrieving revision 1.2
diff -Ltools/test_slony_state.pl -Ltools/test_slony_state.pl -u -w -r1.1 -r1.2
--- tools/test_slony_state.pl
+++ tools/test_slony_state.pl
@@ -1,8 +1,8 @@
#!perl # -*- perl -*-
# $Id$
# Christopher Browne
-# Copyright 2004
-# Afilias Canada
+# Copyright 2005
+# PostgreSQL Global Development Group
# This script, given DSN parameters to access a Slony-I cluster,
# submits a number of queries to test the state of the nodes in the
@@ -11,11 +11,12 @@
use Pg;
use Getopt::Long;
#use strict;
+my %PROBLEMS;
my $sleep_seconds = 4;
my $goodopts = GetOptions("help", "database=s", "host=s", "user=s", "cluster=s",
- "password=s", "port=s");
+ "password=s", "port=s", "recipient=s", "mailprog=s");
if (defined($opt_help)) {
show_usage();
}
@@ -28,6 +29,8 @@
$password = $opt_password if (defined($opt_password));
$host = $opt_host if (defined($opt_host));
$cluster = $opt_cluster if (defined($opt_cluster));
+$recipient = $opt_recipient if (defined($opt_recipient));
+$mailprog = $opt_mailprog if (defined($opt_mailprog));
#DBI: my $initialDSN = "dbi:Pg:dbname=$database;host=$host;port=$port";
my $initialDSN = "dbname=$database host=$host port=$port";
@@ -35,9 +38,6 @@
print "DSN: $initialDSN\n===========================\n";
-# DBI: my $dbh = DBI->connect($initialDSN, $user, $password,
-# {RaiseError => 0, PrintError => 0, AutoCommit => 1});
-# die "connect: $DBI::errstr" if ( !defined($dbh) || $DBI::err );
my $dbh = Pg::connectdb($initialDSN);
print "Rummage for DSNs\n=============================\n";
@@ -65,6 +65,8 @@
test_node($node, $dsn);
}
+report_on_problems ();
+
sub test_node {
my ($node, $dsn) = @_;
@@ -81,12 +83,45 @@
Tuples: $reltuples
};
+ my $HILISTENPAGES = 5000;
+ if ($relpages > $HILISTENPAGES) {
+ add_problem ($node, "pg_listener relpages high - $relpages",
+ qq{Number of pages in table pg_listener is $relpages
+This is higher than the warning level of $HILISTENPAGES.
+
+Perhaps a long running transaction is preventing pg_listener from
+being vacuumed out?
+});
+ }
+
+ my $HILISTENTUPLES = 200000;
+ if ($reltuples > $HILISTENTUPLES) {
+ add_problem ($node, "pg_listener reltuples high - $reltuples",
+ qq{Number of tuples in system table pg_listener is $reltuples.
+This is higher than the warning level of $HILISTENTUPLES.
+
+Perhaps a long running transaction is preventing pg_listener from
+being vacuumed out?
+});
+ }
+
+ my $HISLTUPLES=200000;
print "\nSize Tests\n================================================\n";
my $sizequeries = qq{select relname, relpages, reltuples from pg_catalog.pg_class where relname in ('sl_log_1', 'sl_log_2', 'sl_seqlog') order by relname;};
$res = $dbh->exec($sizequeries);
while (my @row = $res->fetchrow) {
my ($relname, $relpages, $reltuples) = @row;
printf "%15s %8d %9f\n", $relname, $relpages, $reltuples;
+ if ($reltuples > $HISLTUPLES) {
+ add_problem($node, "$relname tuples = $reltuples > $HISLTUPLES",
+ qq{Number of tuples in Slony-I table $relname is $reltuples which
+exceeds $HISLTUPLES.
+
+You may wish to investigate whether or not a node is down, or perhaps
+if sl_confirm entries have not been propagating properly.
+});
+
+ }
}
print "\nListen Path Analysis\n===================================================\n";
@@ -108,11 +143,21 @@
where li_origin = origin and li_receiver = receiver);
};
$res = $dbh->exec($missing_paths);
+ my $allmissingpaths;
while (my @row = $res->fetchrow) {
my ($origin, $receiver) = @row;
- printf "(origin,receiver) where there is exists a direct path missing in sl_listen: (%d,%d)\n",
+ my $string = sprintf "(origin,receiver) where there is exists a direct path missing in sl_listen: (%d,%d)\n",
$origin, $receiver;
+ print $string;
$listenproblems++;
+ $allmissingpaths .= $string;
+ }
+ if ($allmissingpaths) {
+ add_problem($node, "Missing sl_listen paths", qq{$allmissingpaths
+
+Please check contents of table sl_listen; some STORE LISTEN requests may be
+necessary.
+});
}
# Each subscriber node must have a direct listen path
@@ -124,7 +169,13 @@
$res = $dbh->exec($no_direct_path);
while (my @row = $res->fetchrow) {
my ($set, $provider, $receiver) = @row;
- printf "No direct path found for set %5d from provider %5d to receiver %5d\n", $set, $provider, $receiver;
+ my $string = sprintf "No direct path found for set %5d from provider %5d to receiver %5d\n", $set, $provider, $receiver;
+ print $string;
+ add_problem($node, "Missing path from $provider to $receiver", qq{Missing sl_listen entry - $string
+
+Please check contents of table sl_listen; some STORE LISTEN requests may be
+necessary.
+});
$listenproblems++;
}
@@ -139,16 +190,28 @@
printf "%7s %9s %9s %12s %12s\n", "Origin", "Min SYNC", "Max SYNC", "Min SYNC Age", "Max SYNC Age";
print "================================================================================\n";
+ my $WANTAGE = "00:30:00";
my $event_summary = qq{
select ev_origin, min(ev_seqno), max(ev_seqno),
date_trunc('minutes', min(now() - ev_timestamp)),
- date_trunc('minutes', max(now() - ev_timestamp))
+ date_trunc('minutes', max(now() - ev_timestamp)),
+ min(now() - ev_timestamp) > '$WANTAGE' as agehi
from _$cluster.sl_event group by ev_origin;
};
$res = $dbh->exec($event_summary);
while (my @row = $res->fetchrow) {
- my ($origin, $minsync, $maxsync, $minage, $maxage) = @row;
- printf "%7s %9d %9d %12s %12s\n", $origin, $minsync, $maxsync, $minage, $maxage;
+ my ($origin, $minsync, $maxsync, $minage, $maxage, $agehi) = @row;
+ printf "%7s %9d %9d %12s %12s %4s\n", $origin, $minsync, $maxsync, $minage, $maxage, $agehi;
+ if ($agehi eq 't') {
+ add_problem($origin, "Events not propagating to node $origin",
+ qq{Events not propagating quickly in sl_event -
+For origin node $origin, earliest propagated event of age $minage > $WANTAGE
+
+Are slons running for both nodes?
+
+Could listen paths be missing so that events are not propagating?
+});
+ }
}
print "\n";
@@ -156,11 +219,13 @@
print "Summary of sl_confirm aging\n";
printf "%9s %9s %9s %9s %12s %12s\n", "Origin", "Receiver", "Min SYNC", "Max SYNC", "Age of latest SYNC", "Age of eldest SYNC";
print "=================================================================================\n";
+ my $WANTCONFIRM = "00:30:00";
my $confirm_summary = qq{
select con_origin, con_received, min(con_seqno) as minseq,
max(con_seqno) as maxseq, date_trunc('minutes', min(now()-con_timestamp)) as age1,
- date_trunc('minutes', max(now()-con_timestamp)) as age2
+ date_trunc('minutes', max(now()-con_timestamp)) as age2,
+ min(now() - con_timestamp) > '$WANTCONFIRM' as tooold
from _$cluster.sl_confirm
group by con_origin, con_received
order by con_origin, con_received;
@@ -168,8 +233,20 @@
$res = $dbh->exec($confirm_summary);
while (my @row = $res->fetchrow) {
- my ($origin, $receiver, $minsync, $maxsync, $minage, $maxage) = @row;
- printf "%9s %9s %9s %9s %12s %12s\n", $origin, $receiver, $minsync, $maxsync, $minage, $maxage;
+ my ($origin, $receiver, $minsync, $maxsync, $minage, $maxage, $agehi) = @row;
+ printf "%9s %9s %9s %9s %12s %12s %4s\n", $origin, $receiver, $minsync, $maxsync, $minage, $maxage, $agehi;
+ if ($agehi eq 't') {
+ add_problem($origin, "Confirmations not propagating from $origin to $receiver",
+ qq{Confirmations not propagating quickly in sl_confirm -
+
+For origin node $origin, receiver node $receiver, earliest propagated
+confirmation has age $minage > $WANTCONFIRM
+
+Are slons running for both nodes?
+
+Could listen paths be missing so that confirmations are not propagating?
+});
+ }
}
print "\n";
@@ -178,10 +255,11 @@
printf "%15s %15s %15s %12s %20s\n", "Database", "PID", "User", "Query Age", "Query";
print "================================================================================\n";
+ my $ELDERLY_TXN = "01:30:00";
my $old_conn_query = qq{
select datname, procpid, usename, date_trunc('minutes', now() - query_start), substr(current_query,0,20)
from pg_stat_activity
- where (now() - query_start) > '1:30'::interval and
+ where (now() - query_start) > '$ELDERLY_TXN'::interval and
current_query <> ''
order by query_start;
};
@@ -190,8 +268,14 @@
while (my @row = $res->fetchrow) {
my ($db, $pid, $user, $age, $query) = @row;
printf "%15s %15d %15s %12s %20s\n", $db, $pid, $user, $age, $query;
+ add_problem($origin, "Old Transactions Kept Open",
+ qq{Old Transaction still running with age $age > $ELDERLY_TXN
+
+Query: $query
+});
}
print "\n";
+
}
sub show_usage {
@@ -200,5 +284,26 @@
chomp $inerr;
print $inerr, "\n";
}
- die "$0 --host --database --user --cluster --port=integer --password";
+ die "$0 --host --database --user --cluster --port=integer --password --recipient --mailprog";
+}
+
+sub add_problem {
+ my ($node, $short, $long) = @_;
+ $PROBLEMS{"$node $short"} = $long;
+}
+
+sub report_on_problems {
+ my ($totalproblems, $message);
+ foreach my $key (sort keys %PROBLEMS) {
+ $totalproblems++;
+ $message .= "\nNode: $key\n================================================\n" . $PROBLEMS{$key} . "\n";
+ }
+ if ($totalproblems) {
+ open(MAIL, "|$mailprog -s \"Slony State Test Warning - Cluster $cluster\" $recipient");
+ print MAIL "\n";
+ print MAIL $message;
+ close (MAIL);
+ print "\n\nSending message thus - |$mailprog -s \"Slony State Test Warning - Cluster $cluster\" $recipient\n";
+ print "Message:\n\n$message\n";
+ }
}
Index: monitoring.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/monitoring.sgml,v
retrieving revision 1.16
retrieving revision 1.17
diff -Ldoc/adminguide/monitoring.sgml -Ldoc/adminguide/monitoring.sgml -u -w -r1.16 -r1.17
--- doc/adminguide/monitoring.sgml
+++ doc/adminguide/monitoring.sgml
@@ -71,7 +71,10 @@
You specify arguments including ,
, ,
, , and
- to connect to any of the nodes on a cluster.
+ to connect to any of the nodes on a cluster.
+You also specify a command (which should be
+a program equivalent to Unix
+mailx) and a recipient of email.
The script then rummages through
to find all of the nodes in the cluster, and the DSNs to allow it to,
@@ -116,9 +119,8 @@
- The script does not yet do much in the way of diagnosis work;
-it should be enhanced to be able to, based on some parameterization,
-notify someone of those problems it encounters.
+ The script does some diagnosis work based on parameters in the
+script; if you don't like the values, pick your favorites!
From cvsuser Thu Mar 10 23:11:28 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: signal handling watchdog using forked
processes - Frank
Message-ID: <20050310231126.C4FDAB1CE28@gborg.postgresql.org>
Log Message:
-----------
signal handling watchdog using forked processes - Frank Thompson
Slony-I 1.1 Change Request:
The problem at hand is related to signal handling differences
in multithreaded programs between different operating
systems. In the slon process, it is intended that only the
"main" thread is dealing with signals and that it uses the
regular thread communication like mutexes and condition
variables to control all the other working threads.
That mutex and condition var communication sometimes can lock
up, which causes the entire slon daemon to freeze. This can in
extreme cases even happen after one of the working threads
sent the main thread a signal to restart the slon process.
What I had in mind to fix this is to have the program to
actually fork() very early on. The parent process will then be
the only one dealing with signals, while the childs ignores
all signals entirely. The parent and the master thread of the
child (which is the current slon) will communicate over a pipe
or a socketpair. This way, the parent could detect that the
slon stopped responding altogether and can issue a signal 9 to
cleanup the situation.
In other words, building a watchdog process right into the
slon executable.
Summary of Changes:
slon main() will now fork() at the start and allow the parent
process to handle cleanup, signal and termination of the child
process. The child process is the scheduler which will talk
to the parent (watchdog) process and vice versa via two sets
of socket pairs. When shutting down the child process in the
case of restart or termination, non blocking reads and writes
are used to avoid lockups. Failure to shutdown the scheduler
process nicely, the parent will SIGKILL it to ensure timely
operation within the signal handler.
globals:
slon.c
------
int watchdog_pipe[2]; // socket pair to talk with child process
int sched_wakeuppipe[2]; // socket pair to talk with scheduler and parent process
runtime_config.c
----------------
pid_t slon_pid; // current pid
pid_t slon_ppid; // parent pid
pid_t slon_cpid; // child pid
macros:
slon_abort() and slon_restart() will now observe which process
in the tree they will act upon, parent or child
#define slon_abort() \
do { \
kill((slon_ppid == 0 ? slon_pid : slon_ppid), SIGTERM); \
pthread_exit(NULL); \
} while (0)
#define slon_restart() \
do { \
kill((slon_ppid == 0 ? slon_pid : slon_ppid), SIGHUP); \
} while (0)
functions:
slon.c
------
static void sighandler(int signo); // new slon parent process signal handler
static void main_sigalrmhandler(int signo); // new scheduler alarm signal handler
static void slon_kill_child(void); // new pipe notify based child termination function
Modified Files:
--------------
slony1-engine/src/slon:
cleanup_thread.c (r1.20 -> r1.21)
local_listen.c (r1.29 -> r1.30)
runtime_config.c (r1.23 -> r1.24)
scheduler.c (r1.18 -> r1.19)
slon.c (r1.45 -> r1.46)
slon.h (r1.47 -> r1.48)
-------------- next part --------------
Index: scheduler.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/scheduler.c,v
retrieving revision 1.18
retrieving revision 1.19
diff -Lsrc/slon/scheduler.c -Lsrc/slon/scheduler.c -u -w -r1.18 -r1.19
--- src/slon/scheduler.c
+++ src/slon/scheduler.c
@@ -15,10 +15,10 @@
#include
#include
+#include
#include
#include
#include
-#include
#include
#include
#include
@@ -36,7 +36,6 @@
#define PF_LOCAL PF_UNIX
#endif
-
/*
* ---------- Static data ----------
*/
@@ -45,7 +44,6 @@
static int sched_numfd = 0;
static fd_set sched_fdset_read;
static fd_set sched_fdset_write;
-static int sched_wakeuppipe[2];
static SlonConn *sched_waitqueue_head = NULL;
static SlonConn *sched_waitqueue_tail = NULL;
@@ -55,17 +53,14 @@
static pthread_mutex_t sched_master_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t sched_master_cond = PTHREAD_COND_INITIALIZER;
-static sigset_t sched_sigset;
-
/*
* ---------- Local functions ----------
*/
static void *sched_mainloop(void *);
-static void sched_sighandler(int signo);
-static void sched_sighuphandler(int signo);
static void sched_add_fdset(int fd, fd_set * fds);
static void sched_remove_fdset(int fd, fd_set * fds);
+static void sched_shutdown();
/*
@@ -85,16 +80,6 @@
sched_main_thread = pthread_self();
/*
- * Block signals. Since sched_start_mainloop() is called before any other
- * thread is created, this will be inherited by all threads in the system.
- */
- sigemptyset(&sched_sigset);
- sigaddset(&sched_sigset, SIGHUP);
- sigaddset(&sched_sigset, SIGINT);
- sigaddset(&sched_sigset, SIGTERM);
- pthread_sigmask(SIG_BLOCK, &sched_sigset, NULL);
-
- /*
* Grab the scheduler master lock
*/
if (pthread_mutex_lock(&sched_master_lock) < 0)
@@ -153,32 +138,6 @@
int
sched_wait_mainloop(void)
{
- int signo;
-
- /*
- * Wait for signal.
- */
- sigemptyset(&sched_sigset);
- sigaddset(&sched_sigset, SIGHUP);
- sigaddset(&sched_sigset, SIGINT);
- sigaddset(&sched_sigset, SIGTERM);
- sigwait(&sched_sigset, &signo);
-
- sigemptyset(&sched_sigset);
- pthread_sigmask(SIG_SETMASK, &sched_sigset, NULL);
-
- switch (signo)
- {
- case SIGHUP:
- sched_sighuphandler(signo);
- break;
-
- case SIGINT:
- case SIGTERM:
- sched_sighandler(signo);
- break;
- }
-
/*
* Wait for the scheduler to finish.
*/
@@ -413,17 +372,6 @@
FD_ZERO(&sched_fdset_read);
FD_ZERO(&sched_fdset_write);
- /*
- * Create a pipe used by the main thread to cleanly wakeup the scheduler
- * on signals.
- */
- if (pipe(sched_wakeuppipe) < 0)
- {
- perror("sched_mainloop: pipe()");
- sched_status = SCHED_STATUS_ERROR;
- pthread_cond_signal(&sched_master_cond);
- pthread_exit(NULL);
- }
sched_add_fdset(sched_wakeuppipe[0], &sched_fdset_read);
/*
@@ -575,6 +523,11 @@
sched_status = SCHED_STATUS_ERROR;
break;
}
+
+ if (buf[0] == 'p')
+ {
+ sched_status = SCHED_STATUS_SHUTDOWN;
+ }
}
/*
@@ -644,9 +597,13 @@
* close the scheduler heads-up socket pair so nobody will think we're
* listening any longer.
*/
+
+ /*
close(sched_wakeuppipe[0]);
+ sched_wakeuppipe[0] = -1;
close(sched_wakeuppipe[1]);
- sched_wakeuppipe[0] = sched_wakeuppipe[1] = -1;
+ sched_wakeuppipe[1] = -1;
+ */
/*
* Then we cond_signal all connections that are in the queue.
@@ -692,7 +649,7 @@
* conditions with signals. ----------
*/
static void
-sched_sighandler(int signo)
+sched_shutdown()
{
/*
* Lock the master mutex and make sure that we are the main thread
@@ -733,13 +690,6 @@
}
-static void
-sched_sighuphandler(int signo)
-{
- slon_restart_request = true;
- sched_sighandler(signo);
-}
-
/*
* ---------- sched_add_fdset
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.45
retrieving revision 1.46
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.45 -r1.46
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -32,12 +32,12 @@
/*
* ---------- Global data ----------
*/
-int slon_restart_request = false;
+int watchdog_pipe[2];
+int sched_wakeuppipe[2];
pthread_mutex_t slon_wait_listen_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t slon_wait_listen_cond = PTHREAD_COND_INITIALIZER;
-
/*
* ---------- Local data ----------
*/
@@ -51,11 +51,16 @@
static pthread_t main_thread;
static char *const *main_argv;
-static void sigalrmhandler(int signo);
+
+static void sighandler(int signo);
+static void main_sigalrmhandler(int signo);
+static void slon_kill_child(void);
int slon_log_level;
char *pid_file;
char *archive_dir = NULL;
+int child_status;
+
/*
* ---------- main ----------
@@ -72,13 +77,15 @@
PGconn *startup_conn;
int c;
int errors = 0;
+ int signo;
+ char pipe_c;
+ pid_t pid;
extern int optind;
extern char *optarg;
-
+ struct sigaction act;
InitializeConfOptions();
-
while ((c = getopt(argc, argv, "f:a:d:s:t:g:c:p:o:hv")) != EOF)
{
switch (c)
@@ -146,6 +153,9 @@
* identifier
*/
slon_pid = getpid();
+ slon_cpid = 0;
+ slon_ppid = 0;
+ main_argv = argv;
if ((char *)argv[optind])
{
@@ -214,7 +224,7 @@
}
if (PQstatus(startup_conn) != CONNECTION_OK)
{
- slon_log(SLON_FATAL, "main: Cannot connect to local database - %s",
+ slon_log(SLON_FATAL, "main: Cannot connect to local database - %s\n",
PQerrorMessage(startup_conn));
PQfinish(startup_conn);
slon_exit(-1);
@@ -252,6 +262,81 @@
}
}
+ /*
+ * Pipes to be used as communication devices between the parent (watchdog)
+ * and child (worker) processes.
+ */
+ if (pipe(watchdog_pipe) < 0)
+ {
+ slon_log(SLON_FATAL, "slon: parent pipe create failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (pipe(sched_wakeuppipe) < 0)
+ {
+ slon_log(SLON_FATAL, "slon: sched_wakeuppipe create failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+
+ /*
+ * Fork here to allow parent process to trap signals and child process to
+ * handle real processing work creating a watchdog and worker process
+ * hierarchy
+ */
+ if ((slon_cpid = fork()) < 0)
+ {
+ slon_log(SLON_FATAL, "Fork failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ else if (slon_cpid == 0) /* child */
+ {
+ slon_pid = getpid();
+ slon_ppid = getppid();
+
+ slon_log(SLON_DEBUG2, "main: main process started\n");
+ /*
+ * Wait for the parent process to initialize
+ */
+ if (read(watchdog_pipe[0], &pipe_c, 1) != 1)
+ {
+ slon_log(SLON_FATAL, "main: read from parent pipe failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+
+ if (pipe_c != 'p')
+ {
+ slon_log(SLON_FATAL, "main: incorrect data from parent pipe -(%c)\n",pipe_c);
+ slon_exit(-1);
+ }
+
+ slon_log(SLON_DEBUG2, "main: begin signal handler setup\n");
+
+ if (signal(SIGHUP,SIG_IGN) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGHUP signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGINT,SIG_IGN) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGINT signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGTERM,SIG_IGN) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGTERM signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGCHLD,SIG_IGN) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGCHLD signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGQUIT,SIG_IGN) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGQUIT signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+
+ slon_log(SLON_DEBUG2, "main: end signal handler setup\n");
/*
* Start the event scheduling system
@@ -270,7 +355,7 @@
"set transaction isolation level serializable;");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- slon_log(SLON_FATAL, "Cannot start transaction - %s",
+ slon_log(SLON_FATAL, "Cannot start transaction - %s\n",
PQresultErrorMessage(res));
PQclear(res);
slon_exit(-1);
@@ -292,7 +377,7 @@
res = PQexec(startup_conn, dstring_data(&query));
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- slon_log(SLON_FATAL, "main: Cannot get node list - %s",
+ slon_log(SLON_FATAL, "main: Cannot get node list - %s\n",
PQresultErrorMessage(res));
PQclear(res);
dstring_free(&query);
@@ -342,7 +427,7 @@
res = PQexec(startup_conn, dstring_data(&query));
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- slon_log(SLON_FATAL, "main: Cannot get path config - %s",
+ slon_log(SLON_FATAL, "main: Cannot get path config - %s\n",
PQresultErrorMessage(res));
PQclear(res);
dstring_free(&query);
@@ -373,7 +458,7 @@
res = PQexec(startup_conn, dstring_data(&query));
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- slon_log(SLON_FATAL, "main: Cannot get set config - %s",
+ slon_log(SLON_FATAL, "main: Cannot get set config - %s\n",
PQresultErrorMessage(res));
PQclear(res);
dstring_free(&query);
@@ -400,7 +485,7 @@
res = PQexec(startup_conn, dstring_data(&query));
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- slon_log(SLON_FATAL, "main: Cannot get subscription config - %s",
+ slon_log(SLON_FATAL, "main: Cannot get subscription config - %s\n",
PQresultErrorMessage(res));
PQclear(res);
dstring_free(&query);
@@ -429,7 +514,7 @@
res = PQexec(startup_conn, dstring_data(&query));
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- slon_log(SLON_FATAL, "main: Cannot get last local eventid - %s",
+ slon_log(SLON_FATAL, "main: Cannot get last local eventid - %s\n",
PQresultErrorMessage(res));
PQclear(res);
dstring_free(&query);
@@ -453,7 +538,7 @@
res = PQexec(startup_conn, "rollback transaction;");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- slon_log(SLON_FATAL, "main: Cannot rollback transaction - %s",
+ slon_log(SLON_FATAL, "main: Cannot rollback transaction - %s\n",
PQresultErrorMessage(res));
PQclear(res);
slon_exit(-1);
@@ -532,10 +617,10 @@
* Wait for all remote threads to finish
*/
main_thread = pthread_self();
- main_argv = argv;
- signal(SIGALRM, sigalrmhandler);
+ signal(SIGALRM, main_sigalrmhandler);
alarm(20);
+ slon_log(SLON_DEBUG2, "main: wait for remote threads\n");
rtcfg_joinAllRemoteThreads();
alarm(0);
@@ -560,53 +645,222 @@
slon_log(SLON_ERROR, "main: cannot join snmpThread - %s\n",
strerror(errno));
#endif
- if (slon_restart_request)
+
+ /*
+ * Tell parent that worker is done
+ */
+ slon_log(SLON_DEBUG2, "main: notify parent that worker is done\n");
+
+ if (write(watchdog_pipe[1], "c", 1) != 1)
{
- slon_log(SLON_DEBUG1, "main: restart requested\n");
- execvp(argv[0], argv);
- slon_log(SLON_FATAL,
- "main: cannot restart via execvp(): %s\n", strerror(errno));
- exit(-1);
+ slon_log(SLON_FATAL, "main: write to watchdog pipe failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
}
+ slon_log(SLON_DEBUG1, "main: done\n");
+
+ exit(0);
+ }
+ else /* parent */
+ {
+ slon_log(SLON_DEBUG2, "slon: watchdog process started\n");
+
/*
- * That's it.
+ * Install signal handlers
*/
- slon_log(SLON_DEBUG1, "main: done\n");
- return 0;
+
+ slon_log(SLON_DEBUG2, "slon: begin signal handler setup\n");
+
+ act.sa_handler = &sighandler;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_NOMASK;
+
+ if (sigaction(SIGHUP,&act,NULL) < 0)
+ {
+ slon_log(SLON_FATAL, "slon: SIGHUP signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGINT,sighandler) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGINT signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGTERM,sighandler) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGTERM signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGCHLD,sighandler) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGCHLD signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+ if (signal(SIGQUIT,sighandler) == SIG_ERR)
+ {
+ slon_log(SLON_FATAL, "slon: SIGQUIT signal handler setup failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
}
+ slon_log(SLON_DEBUG2, "slon: end signal handler setup\n");
-void
-slon_exit(int code)
+ /*
+ * Tell worker/scheduler that parent has completed initialization
+ */
+ if (write(watchdog_pipe[1], "p", 1) != 1)
{
- if (pid_file)
+ slon_log(SLON_FATAL, "slon: write to pipe failed -(%d) %s\n", errno,strerror(errno));
+ slon_exit(-1);
+ }
+
+ slon_log(SLON_DEBUG2, "slon: wait for main child process\n");
+
+ while ((pid = wait(&child_status)) != slon_cpid)
{
- unlink(pid_file);
+ slon_log(SLON_DEBUG2, "slon: child terminated status: %d; pid: %d, current worker pid: %d\n", child_status, pid, slon_cpid);
+ }
+
+ slon_log(SLON_DEBUG1, "slon: done\n");
+
+ /*
+ * That's it.
+ */
+ slon_exit(0);
}
- exit(code);
}
static void
-sigalrmhandler(int signo)
+main_sigalrmhandler(int signo)
{
if (main_thread == pthread_self())
{
alarm(0);
+ slon_log(SLON_WARN, "main: shutdown timeout exiting\n");
+ kill(slon_ppid,SIGQUIT);
+ exit(-1);
+ }
+ else
+ {
+ slon_log(SLON_WARN, "main: force SIGALRM the main thread\n");
+ pthread_kill(main_thread,SIGALRM);
+ }
+}
- slon_log(SLON_WARN, "main: shutdown timeout\n");
- if (slon_restart_request)
+static void
+sighandler(int signo)
+{
+ switch (signo)
{
+ case SIGALRM:
+ case SIGCHLD:
+ break;
+
+ case SIGHUP:
+ slon_log(SLON_DEBUG1, "slon: restart requested\n");
+ slon_kill_child();
execvp(main_argv[0], main_argv);
- slon_log(SLON_FATAL,
- "main: cannot restart via execvp(): %s\n", strerror(errno));
+ slon_log(SLON_FATAL, "slon: cannot restart via execvp(): %s\n", strerror(errno));
+ slon_exit(-1);
+ break;
+
+ case SIGINT:
+ case SIGTERM:
+ slon_log(SLON_DEBUG1, "slon: shutdown requested\n");
+ slon_kill_child();
+ slon_exit(-1);
+ break;
+
+ case SIGQUIT:
+ slon_log(SLON_DEBUG1, "slon: shutdown now requested\n");
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
+ break;
}
- exit(-1);
}
- pthread_kill(main_thread, SIGALRM);
+
+void
+slon_kill_child()
+{
+ char pipe_c;
+ struct timeval tv;
+ fd_set fds;
+ int rc;
+ int fd;
+
+ if (slon_cpid == 0) return;
+
+ tv.tv_sec = 60;
+ tv.tv_usec = 0;
+
+ slon_log(SLON_DEBUG2, "slon: notify worker process to shutdown\n");
+
+ fd = sched_wakeuppipe[1];
+ FD_ZERO(&fds);
+ FD_SET(fd,&fds);
+
+ rc = select(fd + 1, NULL, &fds, NULL, &tv);
+
+ if (rc == 0 || rc < 0)
+ {
+ slon_log(SLON_DEBUG2, "slon: select write to worker timeout\n");
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
}
+ if (write(sched_wakeuppipe[1], "p", 1) != 1)
+ {
+ slon_log(SLON_FATAL, "main: write to worker pipe failed -(%d) %s\n", errno,strerror(errno));
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
+ }
+
+ slon_log(SLON_DEBUG2, "slon: wait for worker process to shutdown\n");
+
+ fd = watchdog_pipe[0];
+ FD_ZERO(&fds);
+ FD_SET(fd,&fds);
+
+ rc = select(fd + 1, &fds, NULL, NULL, &tv);
+
+ if (rc == 0 || rc < 0)
+ {
+ slon_log(SLON_DEBUG2, "slon: select read from worker pipe timeout\n");
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
+ }
+
+ if (read(watchdog_pipe[0], &pipe_c, 1) != 1)
+ {
+ slon_log(SLON_FATAL, "slon: read from worker pipe failed -(%d) %s\n", errno,strerror(errno));
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
+ }
+
+ if (pipe_c != 'c')
+ {
+ slon_log(SLON_FATAL, "slon: incorrect data from worker pipe -(%c)\n",pipe_c);
+ kill(slon_cpid,SIGKILL);
+ slon_exit(-1);
+ }
+
+ slon_log(SLON_DEBUG2, "slon: worker process shutdown ok\n");
+}
+
+void
+slon_exit(int code)
+{
+ if (slon_ppid == 0 && pid_file)
+ {
+ slon_log(SLON_DEBUG2, "slon: remove pid file\n");
+ unlink(pid_file);
+ }
+
+ slon_log(SLON_DEBUG2, "slon: exit(%d)\n",code);
+
+ exit(code);
+}
+
+
/*
* Local Variables:
* tab-width: 4
Index: slon.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.h,v
retrieving revision 1.47
retrieving revision 1.48
diff -Lsrc/slon/slon.h -Lsrc/slon/slon.h -u -w -r1.47 -r1.48
--- src/slon/slon.h
+++ src/slon/slon.h
@@ -324,6 +324,8 @@
* ----------
*/
extern pid_t slon_pid;
+extern pid_t slon_ppid;
+extern pid_t slon_cpid;
extern char *rtcfg_cluster_name;
extern char *rtcfg_namespace;
extern char *rtcfg_conninfo;
@@ -344,16 +346,18 @@
*/
#define slon_abort() \
do { \
- kill(slon_pid, SIGTERM); \
+ kill((slon_ppid == 0 ? slon_pid : slon_ppid), SIGTERM); \
pthread_exit(NULL); \
} while (0)
#define slon_restart() \
do { \
- kill(slon_pid, SIGHUP); \
+ kill((slon_ppid == 0 ? slon_pid : slon_ppid), SIGHUP); \
} while (0)
extern void slon_exit(int code);
extern int slon_restart_request;
+extern int watchdog_pipe[];
+extern int sched_wakeuppipe[];
extern pthread_mutex_t slon_wait_listen_lock;
extern pthread_cond_t slon_wait_listen_cond;
Index: runtime_config.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/runtime_config.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -Lsrc/slon/runtime_config.c -Lsrc/slon/runtime_config.c -u -w -r1.23 -r1.24
--- src/slon/runtime_config.c
+++ src/slon/runtime_config.c
@@ -33,6 +33,8 @@
* ---------- Global data ----------
*/
pid_t slon_pid;
+pid_t slon_cpid;
+pid_t slon_ppid;
char *rtcfg_cluster_name = NULL;
char *rtcfg_namespace = NULL;
char *rtcfg_conninfo = NULL;
Index: local_listen.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/local_listen.c,v
retrieving revision 1.29
retrieving revision 1.30
diff -Lsrc/slon/local_listen.c -Lsrc/slon/local_listen.c -u -w -r1.29 -r1.30
--- src/slon/local_listen.c
+++ src/slon/local_listen.c
@@ -160,7 +160,7 @@
slon_log(SLON_INFO,
"localListenThread: got restart notification - "
"signal scheduler\n");
- slon_restart();
+ kill(getppid(), SIGHUP);
}
/*
Index: cleanup_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/cleanup_thread.c,v
retrieving revision 1.20
retrieving revision 1.21
diff -Lsrc/slon/cleanup_thread.c -Lsrc/slon/cleanup_thread.c -u -w -r1.20 -r1.21
--- src/slon/cleanup_thread.c
+++ src/slon/cleanup_thread.c
@@ -67,6 +67,7 @@
{
kill(getpid(), SIGTERM);
pthread_exit(NULL);
+ /* slon_abort(); */
}
dbconn = conn->dbconn;
From cvsuser Fri Mar 11 03:49:54 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Bug #1152 Description: The
query_slony_status
Message-ID: <20050311034950.5B225B1CE6C@gborg.postgresql.org>
Log Message:
-----------
Bug #1152
Description:
The query_slony_status function doesn't work with uppercase
setnames (the watchdog2 script uses that function).
workaround is to use \\"_$SETNAME\\" instead of _$SETNAME in the
script, and to use my $result=`$SLON_BIN_PATH/psql -p $port -h
$host -U slony --tuples-only $dbname --command="$query"`;
as the query command (the username was missing there).
Modified Files:
--------------
slony1-engine/tools/altperl:
slon-tools.pm (r1.19 -> r1.20)
-------------- next part --------------
Index: slon-tools.pm
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon-tools.pm,v
retrieving revision 1.19
retrieving revision 1.20
diff -Ltools/altperl/slon-tools.pm -Ltools/altperl/slon-tools.pm -u -w -r1.19 -r1.20
--- tools/altperl/slon-tools.pm
+++ tools/altperl/slon-tools.pm
@@ -174,23 +174,23 @@
select * from
(select now() - con_timestamp < '$killafter'::interval, now() - con_timestamp as age,
con_timestamp
-from _$CLUSTER_NAME.sl_confirm c, _$CLUSTER_NAME.sl_subscribe slony_master
+from "_$CLUSTER_NAME".sl_confirm c, "_$CLUSTER_NAME".sl_subscribe slony_master
where c.con_origin = slony_master.sub_provider and
- not exists (select * from _$CLUSTER_NAME.sl_subscribe providers
+ not exists (select * from "_$CLUSTER_NAME".sl_subscribe providers
where providers.sub_receiver = slony_master.sub_provider and
providers.sub_set = slony_master.sub_set and
slony_master.sub_active = 't' and
providers.sub_active = 't') and
- c.con_received = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
+ c.con_received = "_$CLUSTER_NAME".getLocalNodeId('_$CLUSTER_NAME') and
now() - con_timestamp < '$killafter'::interval
limit 1) as slave_confirmed_events
union all (select
now() - con_timestamp < '$killafter'::interval, now() - con_timestamp as age,
con_timestamp
-from _$CLUSTER_NAME.sl_confirm c, _$CLUSTER_NAME.sl_subscribe slony_master
- where c.con_origin = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
- exists (select * from _$CLUSTER_NAME.sl_subscribe providers
- where providers.sub_provider = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
+from "_$CLUSTER_NAME".sl_confirm c, "_$CLUSTER_NAME".sl_subscribe slony_master
+ where c.con_origin = "_$CLUSTER_NAME".getLocalNodeId('_$CLUSTER_NAME') and
+ exists (select * from "_$CLUSTER_NAME".sl_subscribe providers
+ where providers.sub_provider = "_$CLUSTER_NAME".getLocalNodeId('_$CLUSTER_NAME') and
slony_master.sub_active = 't') and
now() - con_timestamp < '$killafter'::interval
limit 1)
From cvsuser Fri Mar 11 03:52:55 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Per Bug #1152,
add -U $dbuser parameter to database query
Message-ID: <20050311035251.3C341B1CE6C@gborg.postgresql.org>
Log Message:
-----------
Per Bug #1152, add -U $dbuser parameter to database query
Modified Files:
--------------
slony1-engine/tools/altperl:
slon-tools.pm (r1.20 -> r1.21)
-------------- next part --------------
Index: slon-tools.pm
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon-tools.pm,v
retrieving revision 1.20
retrieving revision 1.21
diff -Ltools/altperl/slon-tools.pm -Ltools/altperl/slon-tools.pm -u -w -r1.20 -r1.21
--- tools/altperl/slon-tools.pm
+++ tools/altperl/slon-tools.pm
@@ -196,8 +196,8 @@
limit 1)
;
};
- my ($port, $host, $dbname)= ($PORT[$nodenum], $HOST[$nodenum], $DBNAME[$nodenum]);
- my $result=`@@PGBINDIR@@/psql -p $port -h $host -c "$query" --tuples-only $dbname`;
+ my ($port, $host, $dbname, $dbuser)= ($PORT[$nodenum], $HOST[$nodenum], $DBNAME[$nodenum], $USER[$nodenum]);
+ my $result=`@@PGBINDIR@@/psql -p $port -h $host -U $dbuser -c "$query" --tuples-only $dbname`;
chomp $result;
#print "Query was: $query\n";
#print "Result was: $result\n";
From cvsuser Fri Mar 11 18:55:46 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Added --config and --help to
uninstall_nodes.pl (last
Message-ID: <20050311185544.5C429B1CDE2@gborg.postgresql.org>
Log Message:
-----------
Added --config and --help to uninstall_nodes.pl (last script).
According to the documentation for UNINSTALL NODE, it already handles
the various commands that were subsequently being sent to psql, so
I've removed them from this script.
Instead of just running UNINSTALL NODE on $MASTERNODE, run it on all
nodes, doing $MASTERNODE last.
Modified Files:
--------------
slony1-engine/tools/altperl:
uninstall_nodes.pl (r1.7 -> r1.8)
-------------- next part --------------
Index: uninstall_nodes.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/uninstall_nodes.pl,v
retrieving revision 1.7
retrieving revision 1.8
diff -Ltools/altperl/uninstall_nodes.pl -Ltools/altperl/uninstall_nodes.pl -u -w -r1.7 -r1.8
--- tools/altperl/uninstall_nodes.pl
+++ tools/altperl/uninstall_nodes.pl
@@ -3,26 +3,38 @@
# Author: Christopher Browne
# Copyright 2004 Afilias Canada
+use Getopt::Long;
+
+# Defaults
+$CONFIG_FILE = '@@SYSCONFDIR@@/slon_tools.conf';
+$SHOW_USAGE = 0;
+
+# Read command-line options
+GetOptions("config=s" => \$CONFIG_FILE,
+ "help" => \$SHOW_USAGE);
+
+my $USAGE =
+"Usage: uninstall_nodes [--config file]
+
+ Removes Slony configuration from all nodes in a cluster.
+
+";
+
+if ($SHOW_USAGE) {
+ print $USAGE;
+ exit 0;
+}
+
require '@@PGLIBDIR@@/slon-tools.pm';
-require '@@SYSCONFDIR@@/slon_tools.conf';
+require $CONFIG_FILE;
$FILE="/tmp/slonik.$$";
open(SLONIK, ">$FILE");
print SLONIK genheader();
-print SLONIK qq{
- uninstall node (id=$MASTERNODE);
-};
-close SLONIK;
-run_slonik_script($FILE);
-
foreach my $node (@NODES) {
- foreach my $command ("drop schema _$CLUSTER_NAME cascade;") {
- print $command, "\n";
- print `echo "$command" | psql -h $HOST[$node] -U $USER[$node] -d $DBNAME[$node] -p $PORT[$node]`;
- }
- foreach my $t (@SERIALTABLES) {
- my $command = "alter table $t drop column \\\"_Slony-I_" . $CLUSTER_NAME . "_rowID\\\";";
- print $command, "\n";
- print `echo "$command" | psql -h $HOST[$node] -U $USER[$node] -d $DBNAME[$node] -p $PORT[$node]`;
- }
+ next if $node == $MASTERNODE; # Do this one last
+ print SLONIK " uninstall node (id=$node);\n";
}
+print SLONIK " uninstall node (id=$MASTERNODE);\n";
+close SLONIK;
+run_slonik_script($FILE);
From cvsuser Fri Mar 11 19:53:05 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: New Directory
Message-ID: <20050311195303.23A0FB1CE6C@gborg.postgresql.org>
Update of /usr/local/cvsroot/slony1/slony1-engine/Win32
In directory gborg.postgresql.org:/tmp/cvs-serv27350/Win32
Log Message:
Directory /usr/local/cvsroot/slony1/slony1-engine/Win32 added to the repository
From cvsuser Fri Mar 11 20:09:49 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Add in some preliminary Win32 support;
makefiles that may
Message-ID: <20050311200947.7FEC3B1CE8B@gborg.postgresql.org>
Log Message:
-----------
Add in some preliminary Win32 support; makefiles that may be helpful
for compiling xxid and backend functions.
Added Files:
-----------
slony1-engine/Win32:
Makefile.backend (r1.1)
Makefile.xxid (r1.1)
README (r1.1)
README.compiling (r1.1)
-------------- next part --------------
--- /dev/null
+++ Win32/Makefile.xxid
@@ -0,0 +1,14 @@
+# $Id: Makefile.xxid,v 1.1 2005/03/11 20:09:47 cbbrowne Exp $
+
+MODULES=xxid
+slony_subdir = src/xxid
+slony_top_builddir = ../..
+include $(slony_top_builddir)/Makefile.global
+
+ifdef USE_PGXS
+PGXS = $(shell pg_config --pgxs)
+include $(PGXS)
+else
+top_builddir = $(PGSOURCETREE)
+include $(PGSOURCETREE)/contrib/contrib-global.mk
+endif
--- /dev/null
+++ Win32/README.compiling
@@ -0,0 +1,18 @@
+$Id: README.compiling,v 1.1 2005/03/11 20:09:47 cbbrowne Exp $
+
+I created new makefiles for slony_funcs and xxid using the contrib
+infrastructure, which makes them win32 compatible. IMHO the attached
+versions should replace the current ones.
+
+After some short tests, Slony-I 1.0.5 seems to be able to replicate
+with win32 servers using those modules correctly.
+
+The slon process currently refuses to link, the thread lib seems to be
+the problem.
+
+Apparently slonik uses fork(), which would make porting it much more
+expensive. I won't spent more time on that, pgAdmin will do that
+anyway.
+
+Regards,
+Andreas Pflug
\ No newline at end of file
--- /dev/null
+++ Win32/Makefile.backend
@@ -0,0 +1,15 @@
+# $Id: Makefile.backend,v 1.1 2005/03/11 20:09:47 cbbrowne Exp $
+
+MODULES=slony1_funcs
+slony_subdir = src/slony1_funcs
+slony_top_builddir = ../..
+include $(slony_top_builddir)/Makefile.global
+CFLAGS += -I$(slony_top_builddir)
+
+ifdef USE_PGXS
+PGXS = $(shell pg_config --pgxs)
+include $(PGXS)
+else
+top_builddir = $(PGSOURCETREE)
+include $(PGSOURCETREE)/contrib/contrib-global.mk
+endif
--- /dev/null
+++ Win32/README
@@ -0,0 +1,45 @@
+Preliminary Win32 Support
+------------------------------------
+$Id: README,v 1.1 2005/03/11 20:09:47 cbbrowne Exp $
+
+As per the README.compiling documentation, Andreas Pflug has
+contributed Makefiles that may be useful for those that wish to
+compile Slony-I components on Win32.
+
+I haven't included his address. If you want to assist in providing
+Win32 support, it may be worth contacting him. Otherwise, it seems
+inappropriate to pester him about problems compiling this.
+
+There are two files provided:
+
+ 1. Makefile.backend
+
+ This makefile should be suitable for building the C components
+ found in the src/backend directory.
+
+ 2. Makefile.xxid
+
+ This should be suitable for building the C components found in
+ src/xxid.
+
+These files would replace those that ./configure would produce in
+those directories.
+
+There are no makefiles for building slon or slonik:
+
+ - In the case of slon, there appears to be some issue with linking
+ in a pthreads library. Someone who is familiar with the use of
+ pthreads on Win32 may be able to resolve this.
+
+ In the absence of a slon that runs on Win32, you ought to be able
+ to run slon processes on Unix-like operating systems.
+
+ - In the case of slonik, the use of fork() apparently makes the port
+ more challenging.
+
+ Andreas has been working on adding Slony-I management tools to
+ pgAdmin III, which does run on Microsoft Windows(tm). That may be
+ an alternative to slonik.
+
+cbbrowne@acm.org
+Christopher Browne
\ No newline at end of file
From cvsuser Mon Mar 14 23:25:53 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Changes to address problems found when
running multiple
Message-ID: <20050314232551.23BE5B1CDE2@gborg.postgresql.org>
Log Message:
-----------
Changes to address problems found when running multiple Slony-I
clusters against a single backend...
The problem is that if two slons are invoked at almost exactly the same
time, then the cleanup threads will be pretty nearly in phase with one
another. Which gives a (regrettably) excellent chance that both will try
doing an ANALYZE of pg_catalog.pg_listeners concurrently. They'll both
try to commit statistics for it, and one slon will therefore fail.
1. At startup time, a random "bias" is calculated, and the slon
sleeps up to an extra 100000ms (e.g. - 100s) between iterations
2. A further "fuzz" of up to 100s is added
3. The 7 tables are vacuumed in 7 separate queries
4. If any of the vacuums fails, this is treated as a SLON_ERROR, not
as a SLON_FATAL problem requiring that the slon fall over.
Modified Files:
--------------
slony1-engine/src/slon:
cleanup_thread.c (r1.21 -> r1.22)
-------------- next part --------------
Index: cleanup_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/cleanup_thread.c,v
retrieving revision 1.21
retrieving revision 1.22
diff -Lsrc/slon/cleanup_thread.c -Lsrc/slon/cleanup_thread.c -u -w -r1.21 -r1.22
--- src/slon/cleanup_thread.c
+++ src/slon/cleanup_thread.c
@@ -32,8 +32,20 @@
* ---------- Global data ----------
*/
int vac_frequency = SLON_VACUUM_FREQUENCY;
+static int vac_bias = 0;
static unsigned long earliest_xid = 0;
static unsigned long get_earliest_xid (PGconn *dbconn);
+
+/* The list of tables that need to be vacuumed by Slony-I */
+static char *table_list [] = {"%s.sl_event",
+ "%s.sl_confirm",
+ "%s.sl_setsync",
+ "%s.sl_log_1",
+ "%s.sl_log_2",
+ "%s.sl_seqlog",
+ "pg_catalog.pg_listener"};
+static char tstring[255];
+
/*
* ---------- cleanupThread_main
*
@@ -60,6 +72,13 @@
slon_log(SLON_DEBUG1, "cleanupThread: thread starts\n");
+ /* Want the vacuum time bias to be between 0 and 100 seconds,
+ * hence between 0 and 100000 */
+ if (vac_bias == 0) {
+ vac_bias = rand() % 100000;
+ }
+ slon_log(SLON_DEBUG4, "cleanupThread: bias = %d\n", vac_bias);
+
/*
* Connect to the local database
*/
@@ -80,8 +99,13 @@
/*
* Loop until shutdown time arrived
+ *
+ * Note the introduction of vac_bias and an up-to-100s random
+ * "fuzz"; this reduces the likelihood that having multiple
+ * slons hitting the same cluster will run into conflicts due
+ * to trying to vacuum pg_listener concurrently
*/
- while (sched_wait_time(conn, SCHED_WAIT_SOCK_READ, SLON_CLEANUP_SLEEP * 1000) == SCHED_STATUS_OK)
+ while (sched_wait_time(conn, SCHED_WAIT_SOCK_READ, SLON_CLEANUP_SLEEP * 1000 + vac_bias + (rand() % 100000)) == SCHED_STATUS_OK)
{
/*
* Call the stored procedure cleanupEvent()
@@ -185,39 +209,24 @@
* and event tables
*/
dstring_init(&query3);
+ gettimeofday(&tv_start, NULL);
+ for (t=0; t < 8; t++) {
+ sprintf(tstring, table_list[t], rtcfg_namespace);
slon_mkquery(&query3,
- "%s %s.sl_event; "
- "%s %s.sl_confirm; "
- "%s %s.sl_setsync; "
- "%s %s.sl_log_1; "
- "%s %s.sl_log_2;"
- "%s %s.sl_seqlog;"
- "%s pg_catalog.pg_listener;",
- vacuum_action,
- rtcfg_namespace,
- vacuum_action,
- rtcfg_namespace,
+ "%s %s;",
vacuum_action,
- rtcfg_namespace,
- vacuum_action,
- rtcfg_namespace,
- vacuum_action,
- rtcfg_namespace,
- vacuum_action,
- rtcfg_namespace,
- vacuum_action
- );
+ tstring);
- gettimeofday(&tv_start, NULL);
res = PQexec(dbconn, dstring_data(&query3));
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- slon_log(SLON_FATAL,
+ slon_log(SLON_ERROR,
"cleanupThread: \"%s\" - %s",
dstring_data(&query3), PQresultErrorMessage(res));
PQclear(res);
- slon_abort();
- break;
+ /* slon_abort();
+ break; */
+ }
}
PQclear(res);
gettimeofday(&tv_end, NULL);
From cvsuser Tue Mar 15 16:41:07 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Add a DBI-based state tester
Message-ID: <20050315164105.60D62B1CDDE@gborg.postgresql.org>
Log Message:
-----------
Add a DBI-based state tester
Added Files:
-----------
slony1-engine/tools:
test_slony_state-dbi.pl (r1.1)
-------------- next part --------------
--- /dev/null
+++ tools/test_slony_state-dbi.pl
@@ -0,0 +1,317 @@
+#!perl # -*- perl -*-
+# $Id: test_slony_state-dbi.pl,v 1.1 2005/03/15 16:41:04 cbbrowne Exp $
+# Christopher Browne
+# Copyright 2005
+# PostgreSQL Global Development Group
+
+# This script, given DSN parameters to access a Slony-I cluster,
+# submits a number of queries to test the state of the nodes in the
+# cluster.
+
+use DBI;
+use Getopt::Long;
+#use strict;
+my %PROBLEMS;
+
+my $sleep_seconds = 4;
+
+my $goodopts = GetOptions("help", "database=s", "host=s", "user=s", "cluster=s",
+ "password=s", "port=s", "recipient=s", "mailprog=s");
+if (defined($opt_help)) {
+ show_usage();
+}
+my ($database,$user, $port, $cluster, $host, $password, $set, $finalquery);
+
+$database = $opt_database if (defined($opt_database));
+$port = 5432;
+$port = $opt_port if (defined($opt_port));
+$user = $opt_user if (defined($opt_user));
+$password = $opt_password if (defined($opt_password));
+$host = $opt_host if (defined($opt_host));
+$cluster = $opt_cluster if (defined($opt_cluster));
+$recipient = $opt_recipient if (defined($opt_recipient));
+$mailprog = $opt_mailprog if (defined($opt_mailprog));
+
+my $initialDSN = "dbi:Pg:dbname=$database;host=$host;port=$port";
+$initialDSN = $initialDSN . ";password=$password" if defined($opt_password);
+
+print "DSN: $initialDSN\n===========================\n";
+
+my $dbh = DBI->connect($initialDSN) or die "Unable to connect: $DBI::errstr\n";
+
+print "Rummage for DSNs\n=============================\n";
+# Query to find live DSNs
+my $dsnsquery = qq{
+ select p.pa_server, p.pa_conninfo
+ from "_$cluster".sl_path p
+ where exists (select * from "_$cluster".sl_subscribe s where
+ (s.sub_provider = p.pa_server or s.sub_receiver = p.pa_server) and
+ sub_active = 't')
+ group by pa_server, pa_conninfo;
+};
+
+print "Query:\n$dsnsquery\n";
+$tq = $dbh->prepare($dsnsquery);
+$tq->execute();
+
+my %DSN;
+while (my @row = $tq->fetchrow_array) {
+ my ($node, $dsn) = @row;
+ $DSN{$node} = $dsn;
+}
+
+foreach my $node (keys %DSN) {
+ my $dsn = $DSN{$node};
+ test_node($node, $dsn);
+}
+
+report_on_problems ();
+
+sub test_node {
+ my ($node, $dsn) = @_;
+
+ print "\nTests for node $node - DSN = $dsn\n========================================\n";
+
+ my $listener_query = "select relpages, reltuples from pg_catalog.pg_class where relname = 'pg_listener';";
+ my $res = $dbh->prepare($listener_query);
+ $res->execute();
+ my ($relpages, $reltuples);
+ while (my @row = $res->fetchrow_array) {
+ ($relpages, $reltuples) = @row;
+ }
+ print qq{pg_listener info:
+Pages: $relpages
+Tuples: $reltuples
+};
+
+ my $HILISTENPAGES = 5000;
+ if ($relpages > $HILISTENPAGES) {
+ add_problem ($node, "pg_listener relpages high - $relpages",
+ qq{Number of pages in table pg_listener is $relpages
+This is higher than the warning level of $HILISTENPAGES.
+
+Perhaps a long running transaction is preventing pg_listener from
+being vacuumed out?
+});
+ }
+
+ my $HILISTENTUPLES = 200000;
+ if ($reltuples > $HILISTENTUPLES) {
+ add_problem ($node, "pg_listener reltuples high - $reltuples",
+ qq{Number of tuples in system table pg_listener is $reltuples.
+This is higher than the warning level of $HILISTENTUPLES.
+
+Perhaps a long running transaction is preventing pg_listener from
+being vacuumed out?
+});
+ }
+
+ my $HISLTUPLES=200000;
+ print "\nSize Tests\n================================================\n";
+ my $sizequeries = qq{select relname, relpages, reltuples from pg_catalog.pg_class where relname in ('sl_log_1', 'sl_log_2', 'sl_seqlog') order by relname;};
+ $res = $dbh->prepare($sizequeries);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($relname, $relpages, $reltuples) = @row;
+ printf "%15s %8d %9f\n", $relname, $relpages, $reltuples;
+ if ($reltuples > $HISLTUPLES) {
+ add_problem($node, "$relname tuples = $reltuples > $HISLTUPLES",
+ qq{Number of tuples in Slony-I table $relname is $reltuples which
+exceeds $HISLTUPLES.
+
+You may wish to investigate whether or not a node is down, or perhaps
+if sl_confirm entries have not been propagating properly.
+});
+
+ }
+ }
+
+ print "\nListen Path Analysis\n===================================================\n";
+ my $inadequate_paths = qq{
+select li_origin, count(*) from "_$cluster".sl_listen
+group by li_origin
+having count(*) < (select count(*) - 1 from "_$cluster".sl_node );
+};
+ $res = $dbh->prepare($inadequate_paths);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($origin, $count) = @row;
+ printf "Problem node: %4d Listen path count for node: %d\n", $origin, $count;
+ $listenproblems++;
+ }
+ my $missing_paths = qq{
+ select * from (select n1.no_id as origin, n2.no_id as receiver
+ from "_$cluster".sl_node n1, "_$cluster".sl_node n2 where n1.no_id != n2.no_id) as foo
+ where not exists (select 1 from "_$cluster".sl_listen
+ where li_origin = origin and li_receiver = receiver);
+};
+ $res = $dbh->prepare($missing_paths);
+ $res->execute();
+ my $allmissingpaths;
+ while (my @row = $res->fetchrow_array) {
+ my ($origin, $receiver) = @row;
+ my $string = sprintf "(origin,receiver) where there is exists a direct path missing in sl_listen: (%d,%d)\n",
+ $origin, $receiver;
+ print $string;
+ $listenproblems++;
+ $allmissingpaths .= $string;
+ }
+ if ($allmissingpaths) {
+ add_problem($node, "Missing sl_listen paths", qq{$allmissingpaths
+
+Please check contents of table sl_listen; some STORE LISTEN requests may be
+necessary.
+});
+ }
+
+ # Each subscriber node must have a direct listen path
+ my $no_direct_path = qq{
+ select sub_set, sub_provider, sub_receiver from "_$cluster".sl_subscribe where not exists
+ (select 1 from "_$cluster".sl_listen
+ where li_origin = sub_provider and li_receiver = sub_receiver and li_provider = sub_provider);
+};
+ $res = $dbh->prepare($no_direct_path);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($set, $provider, $receiver) = @row;
+ my $string = sprintf "No direct path found for set %5d from provider %5d to receiver %5d\n", $set, $provider, $receiver;
+ print $string;
+ add_problem($node, "Missing path from $provider to $receiver", qq{Missing sl_listen entry - $string
+
+Please check contents of table sl_listen; some STORE LISTEN requests may be
+necessary.
+});
+ $listenproblems++;
+ }
+
+ if ($listenproblems > 0) {
+ print "sl_listen problems found: $listenproblems\n";
+ } else {
+ print "No problems found with sl_listen\n";
+ }
+
+ print "\n--------------------------------------------------------------------------------\n";
+ print "Summary of event info\n";
+ printf "%7s %9s %9s %12s %12s\n", "Origin", "Min SYNC", "Max SYNC", "Min SYNC Age", "Max SYNC Age";
+ print "================================================================================\n";
+
+ my $WANTAGE = "00:30:00";
+ my $event_summary = qq{
+ select ev_origin, min(ev_seqno), max(ev_seqno),
+ date_trunc('minutes', min(now() - ev_timestamp)),
+ date_trunc('minutes', max(now() - ev_timestamp)),
+ min(now() - ev_timestamp) > '$WANTAGE' as agehi
+ from "_$cluster".sl_event group by ev_origin;
+ };
+ $res = $dbh->prepare($event_summary);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($origin, $minsync, $maxsync, $minage, $maxage, $agehi) = @row;
+ printf "%7s %9d %9d %12s %12s %4s\n", $origin, $minsync, $maxsync, $minage, $maxage, $agehi;
+ if ($agehi eq 't') {
+ add_problem($origin, "Events not propagating to node $origin",
+ qq{Events not propagating quickly in sl_event -
+For origin node $origin, earliest propagated event of age $minage > $WANTAGE
+
+Are slons running for both nodes?
+
+Could listen paths be missing so that events are not propagating?
+});
+ }
+ }
+ print "\n";
+
+ print "\n---------------------------------------------------------------------------------\n";
+ print "Summary of sl_confirm aging\n";
+ printf "%9s %9s %9s %9s %12s %12s\n", "Origin", "Receiver", "Min SYNC", "Max SYNC", "Age of latest SYNC", "Age of eldest SYNC";
+ print "=================================================================================\n";
+ my $WANTCONFIRM = "00:30:00";
+ my $confirm_summary = qq{
+
+ select con_origin, con_received, min(con_seqno) as minseq,
+ max(con_seqno) as maxseq, date_trunc('minutes', min(now()-con_timestamp)) as age1,
+ date_trunc('minutes', max(now()-con_timestamp)) as age2,
+ min(now() - con_timestamp) > '$WANTCONFIRM' as tooold
+ from "_$cluster".sl_confirm
+ group by con_origin, con_received
+ order by con_origin, con_received;
+ };
+
+ $res = $dbh->prepare($confirm_summary);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($origin, $receiver, $minsync, $maxsync, $minage, $maxage, $agehi) = @row;
+ printf "%9s %9s %9s %9s %12s %12s %4s\n", $origin, $receiver, $minsync, $maxsync, $minage, $maxage, $agehi;
+ if ($agehi eq 't') {
+ add_problem($origin, "Confirmations not propagating from $origin to $receiver",
+ qq{Confirmations not propagating quickly in sl_confirm -
+
+For origin node $origin, receiver node $receiver, earliest propagated
+confirmation has age $minage > $WANTCONFIRM
+
+Are slons running for both nodes?
+
+Could listen paths be missing so that confirmations are not propagating?
+});
+ }
+ }
+ print "\n";
+
+ print "\n------------------------------------------------------------------------------\n";
+ print "\nListing of old open connections\n";
+ printf "%15s %15s %15s %12s %20s\n", "Database", "PID", "User", "Query Age", "Query";
+ print "================================================================================\n";
+
+ my $ELDERLY_TXN = "01:30:00";
+ my $old_conn_query = qq{
+ select datname, procpid, usename, date_trunc('minutes', now() - query_start), substr(current_query,0,20)
+ from pg_stat_activity
+ where (now() - query_start) > '$ELDERLY_TXN'::interval and
+ current_query <> ''
+ order by query_start;
+ };
+
+ $res = $dbh->prepare($old_conn_query);
+ $res->execute();
+ while (my @row = $res->fetchrow_array) {
+ my ($db, $pid, $user, $age, $query) = @row;
+ printf "%15s %15d %15s %12s %20s\n", $db, $pid, $user, $age, $query;
+ add_problem($origin, "Old Transactions Kept Open",
+ qq{Old Transaction still running with age $age > $ELDERLY_TXN
+
+Query: $query
+});
+ }
+ print "\n";
+
+}
+
+sub show_usage {
+ my ($inerr) = @_;
+ if ($inerr) {
+ chomp $inerr;
+ print $inerr, "\n";
+ }
+ die "$0 --host --database --user --cluster --port=integer --password --recipient --mailprog";
+}
+
+sub add_problem {
+ my ($node, $short, $long) = @_;
+ $PROBLEMS{"$node $short"} = $long;
+}
+
+sub report_on_problems {
+ my ($totalproblems, $message);
+ foreach my $key (sort keys %PROBLEMS) {
+ $totalproblems++;
+ $message .= "\nNode: $key\n================================================\n" . $PROBLEMS{$key} . "\n";
+ }
+ if ($totalproblems) {
+ open(MAIL, "|$mailprog -s \"Slony State Test Warning - Cluster $cluster\" $recipient");
+ print MAIL "\n";
+ print MAIL $message;
+ close (MAIL);
+ print "\n\nSending message thus - |$mailprog -s \"Slony State Test Warning - Cluster $cluster\" $recipient\n";
+ print "Message:\n\n$message\n";
+ }
+}
From cvsuser Tue Mar 15 16:43:00 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Need to properly quote cluster name,
as reported by Thomas
Message-ID: <20050315164258.B7B1BB1CDDE@gborg.postgresql.org>
Log Message:
-----------
Need to properly quote cluster name, as reported by Thomas Pundt...
Modified Files:
--------------
slony1-engine/tools:
test_slony_state.pl (r1.2 -> r1.3)
-------------- next part --------------
Index: test_slony_state.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/test_slony_state.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/test_slony_state.pl -Ltools/test_slony_state.pl -u -w -r1.2 -r1.3
--- tools/test_slony_state.pl
+++ tools/test_slony_state.pl
@@ -45,8 +45,8 @@
my $dsnsquery =
"
select p.pa_server, p.pa_conninfo
- from _$cluster.sl_path p
- where exists (select * from _$cluster.sl_subscribe s where
+ from "_$cluster".sl_path p
+ where exists (select * from "_$cluster".sl_subscribe s where
(s.sub_provider = p.pa_server or s.sub_receiver = p.pa_server) and
sub_active = 't')
group by pa_server, pa_conninfo;
@@ -126,9 +126,9 @@
print "\nListen Path Analysis\n===================================================\n";
my $inadequate_paths = qq{
-select li_origin, count(*) from _$cluster.sl_listen
+select li_origin, count(*) from "_$cluster".sl_listen
group by li_origin
-having count(*) < (select count(*) - 1 from _$cluster.sl_node );
+having count(*) < (select count(*) - 1 from "_$cluster".sl_node );
};
$res = $dbh->exec($inadequate_paths);
while (my @row = $res->fetchrow) {
@@ -138,8 +138,8 @@
}
my $missing_paths = qq{
select * from (select n1.no_id as origin, n2.no_id as receiver
- from _$cluster.sl_node n1, _$cluster.sl_node n2 where n1.no_id != n2.no_id) as foo
- where not exists (select 1 from _$cluster.sl_listen
+ from "_$cluster".sl_node n1, "_$cluster".sl_node n2 where n1.no_id != n2.no_id) as foo
+ where not exists (select 1 from "_$cluster".sl_listen
where li_origin = origin and li_receiver = receiver);
};
$res = $dbh->exec($missing_paths);
@@ -162,8 +162,8 @@
# Each subscriber node must have a direct listen path
my $no_direct_path = qq{
- select sub_set, sub_provider, sub_receiver from _$cluster.sl_subscribe where not exists
- (select 1 from _$cluster.sl_listen
+ select sub_set, sub_provider, sub_receiver from "_$cluster".sl_subscribe where not exists
+ (select 1 from "_$cluster".sl_listen
where li_origin = sub_provider and li_receiver = sub_receiver and li_provider = sub_provider);
};
$res = $dbh->exec($no_direct_path);
@@ -196,7 +196,7 @@
date_trunc('minutes', min(now() - ev_timestamp)),
date_trunc('minutes', max(now() - ev_timestamp)),
min(now() - ev_timestamp) > '$WANTAGE' as agehi
- from _$cluster.sl_event group by ev_origin;
+ from "_$cluster".sl_event group by ev_origin;
};
$res = $dbh->exec($event_summary);
while (my @row = $res->fetchrow) {
@@ -226,7 +226,7 @@
max(con_seqno) as maxseq, date_trunc('minutes', min(now()-con_timestamp)) as age1,
date_trunc('minutes', max(now()-con_timestamp)) as age2,
min(now() - con_timestamp) > '$WANTCONFIRM' as tooold
- from _$cluster.sl_confirm
+ from "_$cluster".sl_confirm
group by con_origin, con_received
order by con_origin, con_received;
};
From cvsuser Tue Mar 15 16:45:09 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Cluster names need to be quoted in
case they are in caps.
Message-ID: <20050315164507.5AE47B1CDDE@gborg.postgresql.org>
Log Message:
-----------
Cluster names need to be quoted in case they are in caps.
Modified Files:
--------------
slony1-engine/tools:
test_slony_replication.pl (r1.2 -> r1.3)
-------------- next part --------------
Index: test_slony_replication.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/test_slony_replication.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/test_slony_replication.pl -Ltools/test_slony_replication.pl -u -w -r1.2 -r1.3
--- tools/test_slony_replication.pl
+++ tools/test_slony_replication.pl
@@ -49,8 +49,8 @@
# Query to find the "master" node
my $masterquery = "
select sub_provider
- from _$cluster.sl_subscribe s1
- where not exists (select * from _$cluster.sl_subscribe s2
+ from "_$cluster".sl_subscribe s1
+ where not exists (select * from "_$cluster".sl_subscribe s2
where s2.sub_receiver = s1.sub_provider and
s1.sub_set = $set and s2.sub_set = $set and
s1.sub_active = 't' and s2.sub_active = 't')
@@ -72,8 +72,8 @@
my $dsnsquery =
"
select p.pa_server, p.pa_conninfo
- from _$cluster.sl_path p
- where exists (select * from _$cluster.sl_subscribe s where
+ from "_$cluster".sl_path p
+ where exists (select * from "_$cluster".sl_subscribe s where
s.sub_set = $set and
(s.sub_provider = p.pa_server or s.sub_receiver = p.pa_server) and
sub_active = 't')
@@ -253,7 +253,7 @@
print "Status: PGRES_CONNECTION_BAD\n";
return;
}
- my $livequery = qq{ select * from _$cluster.sl_subscribe s1 where sub_set = $set and sub_receiver = $node and sub_active;};
+ my $livequery = qq{ select * from "_$cluster".sl_subscribe s1 where sub_set = $set and sub_receiver = $node and sub_active;};
print "Query: $livequery\n";
my $result = $slave->exec($livequery);
while (my @row = $result->fetchrow) {
From cvsuser Tue Mar 15 17:26:46 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Fix a bunch of misspellings
Message-ID: <20050315172645.8EB3AB1CDC8@gborg.postgresql.org>
Log Message:
-----------
Fix a bunch of misspellings
Modified Files:
--------------
slony1-engine/src/ducttape:
test_1_pgbench (r1.19 -> r1.20)
test_1_pgbench_no_node1 (r1.1 -> r1.2)
test_2_pgbench (r1.12 -> r1.13)
test_3_pgbench (r1.9 -> r1.10)
test_4_pgbench (r1.6 -> r1.7)
test_5_pgbench (r1.5 -> r1.6)
test_7_defines (r1.1 -> r1.2)
-------------- next part --------------
Index: test_2_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_2_pgbench,v
retrieving revision 1.12
retrieving revision 1.13
diff -Lsrc/ducttape/test_2_pgbench -Lsrc/ducttape/test_2_pgbench -u -w -r1.12 -r1.13
--- src/ducttape/test_2_pgbench
+++ src/ducttape/test_2_pgbench
@@ -27,7 +27,7 @@
DB2=slony_test2
DB3=slony_test3
-PGBENCH_SCALE=1
+PGBENCH_SCALE=10
PGBENCH_CLIENTS=5
PGBENCH_TRANS=`expr 50000 / $PGBENCH_CLIENTS`
@@ -134,7 +134,7 @@
exit -1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -230,7 +230,7 @@
exit -1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB2"
xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
@@ -306,7 +306,7 @@
exit -1
fi
-echo "**** starting the Slony-I node deamon for $DB3"
+echo "**** starting the Slony-I node daemon for $DB3"
xterm -title "Slon node 3" -e sh -c "slon -d2 T1 dbname=$DB3; echo -n 'Enter>'; read line" &
slon3_pid=$!
echo "slon[$slon3_pid] on dbname=$DB3"
Index: test_1_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_1_pgbench,v
retrieving revision 1.19
retrieving revision 1.20
diff -Lsrc/ducttape/test_1_pgbench -Lsrc/ducttape/test_1_pgbench -u -w -r1.19 -r1.20
--- src/ducttape/test_1_pgbench
+++ src/ducttape/test_1_pgbench
@@ -80,7 +80,7 @@
#####
# Create the "Primary Node"
#####
-echo "**** creating database for Node 1"
+echo "**** creating database for Node 11"
createdb $DB1 || exit 1
pgbench -i -s $PGBENCH_SCALE $DB1
@@ -107,49 +107,50 @@
# and create a replication set containing the pgbench tables.
######################################################################
+PREAMBLE=" cluster name = T1;
+ node 11 admin conninfo = 'dbname=$DB1';
+ node 22 admin conninfo = 'dbname=$DB2';
+"
+
echo "**** initializing $DB1 as Primary Node for Slony-I cluster T1"
slonik <<_EOF_
- cluster name = T1;
- node 1 admin conninfo = 'dbname=$DB1';
- node 2 admin conninfo = 'dbname=$DB2';
- init cluster (id = 1, comment = 'Node 1');
- echo 'Database $DB1 initialized as Node 1';
+$PREAMBLE
+ init cluster (id = 11, comment = 'Node 11');
+ echo 'Database $DB1 initialized as Node 11';
_EOF_
if [ $? -ne 0 ] ; then
kill $pgbench_pid;
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
-xterm -title "Slon node 1" -e sh -c "slon -d2 -s10000 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+echo "**** starting the Slony-I node daemon for $DB1"
+xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
echo "**** creating a replication set containing the 4 pgbench tables ... "
slonik <<_EOF_
- cluster name = T1;
- node 1 admin conninfo = 'dbname=$DB1';
- node 2 admin conninfo = 'dbname=$DB2';
+$PREAMBLE
try {
- table add key (node id = 1, fully qualified name = 'public.history');
+ table add key (node id = 11, fully qualified name = 'public.history');
}
on error {
exit 1;
}
try {
- create set (id = 1, origin = 1, comment = 'Set 1 - pgbench tables');
- set add table (set id = 1, origin = 1,
+ create set (id = 1, origin = 11, comment = 'Set 1 - pgbench tables');
+ set add table (set id = 1, origin = 11,
id = 1, fully qualified name = 'public.accounts',
comment = 'Table accounts');
- set add table (set id = 1, origin = 1,
+ set add table (set id = 1, origin = 11,
id = 2, fully qualified name = 'public.branches',
comment = 'Table branches');
- set add table (set id = 1, origin = 1,
+ set add table (set id = 1, origin = 11,
id = 3, fully qualified name = 'public.tellers',
comment = 'Table tellers');
- set add table (set id = 1, origin = 1,
+ set add table (set id = 1, origin = 11,
id = 4, fully qualified name = 'public.history',
key = serial, comment = 'Table accounts');
}
@@ -187,25 +188,32 @@
# Setup DB2 as a subscriber node and let it subscribe the replication
# set of the running pgbench
######################################################################
-echo "**** creating database for Node 2"
+echo "**** creating database for node 22"
if ! createdb $DB2 ; then
kill $pgbench_pid 2>/dev/null
kill $slon1_pid 2>/dev/null
exit 1
fi
-echo "**** initializing $DB2 as Node 2 of Slony-I cluster T1"
+echo "**** initializing $DB2 as node 22 of Slony-I cluster T1"
slonik <<_EOF_
- cluster name = T1;
- node 1 admin conninfo = 'dbname=$DB1';
- node 2 admin conninfo = 'dbname=$DB2';
+$PREAMBLE
+ echo 'Creating node 22';
+ try {
+ store node (id = 22, comment = 'node 22', event node = 11);
+ } on error {
+ echo 'could not establish node 22';
+ exit -1;
+ }
try {
- store node (id = 2, comment = 'Node 2');
- store path (server = 1, client = 2, conninfo = 'dbname=$DB1');
- store path (server = 2, client = 1, conninfo = 'dbname=$DB2');
+ store path (server = 11, client = 22, conninfo = 'dbname=$DB1');
+ store path (server = 22, client = 11, conninfo = 'dbname=$DB2');
+ }
+ on error {
+ echo 'could not establish paths between 11 and 22';
+ exit -1;
}
- on error { exit -1; }
- echo 'Database $DB2 added as Node 2';
+ echo 'Database $DB2 added as node 22';
_EOF_
if [ $? -ne 0 ] ; then
kill $pgbench_pid 2>/dev/null
@@ -213,8 +221,8 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
-xterm -title "Slon node 2" -e sh -c "slon -d2 -s10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+echo "**** starting the Slony-I node daemon for $DB1"
+xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
@@ -231,16 +239,14 @@
# And now comes the moment where the big elephant starts to pee
# and the attendants in the first row climb on their chairs ...
######################################################################
-echo "**** creating pgbench tables and subscribing Node 2 to set 1"
+echo "**** creating pgbench tables and subscribing node 22 to set 1"
(
cat pgbench_schema.sql
) | psql -q $DB2
slonik <<_EOF_
- cluster name = T1;
- node 1 admin conninfo = 'dbname=$DB1';
- node 2 admin conninfo = 'dbname=$DB2';
+$PREAMBLE
- subscribe set ( id = 1, provider = 1, receiver = 2, forward = yes );
+ subscribe set ( id = 1, provider = 11, receiver = 22, forward = yes );
_EOF_
echo ""
Index: test_7_defines
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_7_defines,v
retrieving revision 1.1
retrieving revision 1.2
diff -Lsrc/ducttape/test_7_defines -Lsrc/ducttape/test_7_defines -u -w -r1.1 -r1.2
--- src/ducttape/test_7_defines
+++ src/ducttape/test_7_defines
@@ -132,7 +132,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -229,7 +229,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_1_pgbench_no_node1
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_1_pgbench_no_node1,v
retrieving revision 1.1
retrieving revision 1.2
diff -Lsrc/ducttape/test_1_pgbench_no_node1 -Lsrc/ducttape/test_1_pgbench_no_node1 -u -w -r1.1 -r1.2
--- src/ducttape/test_1_pgbench_no_node1
+++ src/ducttape/test_1_pgbench_no_node1
@@ -123,7 +123,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -226,7 +226,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_4_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_4_pgbench,v
retrieving revision 1.6
retrieving revision 1.7
diff -Lsrc/ducttape/test_4_pgbench -Lsrc/ducttape/test_4_pgbench -u -w -r1.6 -r1.7
--- src/ducttape/test_4_pgbench
+++ src/ducttape/test_4_pgbench
@@ -113,7 +113,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -205,7 +205,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_3_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_3_pgbench,v
retrieving revision 1.9
retrieving revision 1.10
diff -Lsrc/ducttape/test_3_pgbench -Lsrc/ducttape/test_3_pgbench -u -w -r1.9 -r1.10
--- src/ducttape/test_3_pgbench
+++ src/ducttape/test_3_pgbench
@@ -173,7 +173,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -276,7 +276,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_5_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_5_pgbench,v
retrieving revision 1.5
retrieving revision 1.6
diff -Lsrc/ducttape/test_5_pgbench -Lsrc/ducttape/test_5_pgbench -u -w -r1.5 -r1.6
--- src/ducttape/test_5_pgbench
+++ src/ducttape/test_5_pgbench
@@ -124,7 +124,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -219,7 +219,7 @@
exit 1
fi
-echo "**** starting the Slony-I node deamon for $DB1"
+echo "**** starting the Slony-I node daemon for $DB1"
xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
From cvsuser Tue Mar 15 18:59:31 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Fix widespread problem with checking
Make version; needed
Message-ID: <20050315185929.45B5BB1D126@gborg.postgresql.org>
Log Message:
-----------
Fix widespread problem with checking Make version; needed to
quote $CGNU because it typically contains a multiword result.
Modified Files:
--------------
slony1-engine/src/ducttape:
test_1_pgbench (r1.20 -> r1.21)
test_1_pgbench_no_node1 (r1.2 -> r1.3)
test_2_pgbench (r1.13 -> r1.14)
test_3_pgbench (r1.10 -> r1.11)
test_4_pgbench (r1.7 -> r1.8)
test_5_pgbench (r1.6 -> r1.7)
test_6_autolisten (r1.3 -> r1.4)
test_7_defines (r1.2 -> r1.3)
test_8_logship (r1.1 -> r1.2)
-------------- next part --------------
Index: test_1_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_1_pgbench,v
retrieving revision 1.20
retrieving revision 1.21
diff -Lsrc/ducttape/test_1_pgbench -Lsrc/ducttape/test_1_pgbench -u -w -r1.20 -r1.21
--- src/ducttape/test_1_pgbench
+++ src/ducttape/test_1_pgbench
@@ -54,7 +54,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_6_autolisten
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_6_autolisten,v
retrieving revision 1.3
retrieving revision 1.4
diff -Lsrc/ducttape/test_6_autolisten -Lsrc/ducttape/test_6_autolisten -u -w -r1.3 -r1.4
--- src/ducttape/test_6_autolisten
+++ src/ducttape/test_6_autolisten
@@ -126,10 +126,10 @@
# Make sure the install is up to date
#####
WGM=`which gmake`
-if [[ $WGM == "" ]]; then
+if [ -z $WGM ]; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [[ $CGNU == "" ]]; then
+ if [ -z "$CGNU" ]; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_8_logship
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_8_logship,v
retrieving revision 1.1
retrieving revision 1.2
diff -Lsrc/ducttape/test_8_logship -Lsrc/ducttape/test_8_logship -u -w -r1.1 -r1.2
--- src/ducttape/test_8_logship
+++ src/ducttape/test_8_logship
@@ -60,7 +60,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_5_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_5_pgbench,v
retrieving revision 1.6
retrieving revision 1.7
diff -Lsrc/ducttape/test_5_pgbench -Lsrc/ducttape/test_5_pgbench -u -w -r1.6 -r1.7
--- src/ducttape/test_5_pgbench
+++ src/ducttape/test_5_pgbench
@@ -58,7 +58,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_2_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_2_pgbench,v
retrieving revision 1.13
retrieving revision 1.14
diff -Lsrc/ducttape/test_2_pgbench -Lsrc/ducttape/test_2_pgbench -u -w -r1.13 -r1.14
--- src/ducttape/test_2_pgbench
+++ src/ducttape/test_2_pgbench
@@ -65,7 +65,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_7_defines
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_7_defines,v
retrieving revision 1.2
retrieving revision 1.3
diff -Lsrc/ducttape/test_7_defines -Lsrc/ducttape/test_7_defines -u -w -r1.2 -r1.3
--- src/ducttape/test_7_defines
+++ src/ducttape/test_7_defines
@@ -57,7 +57,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_1_pgbench_no_node1
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_1_pgbench_no_node1,v
retrieving revision 1.2
retrieving revision 1.3
diff -Lsrc/ducttape/test_1_pgbench_no_node1 -Lsrc/ducttape/test_1_pgbench_no_node1 -u -w -r1.2 -r1.3
--- src/ducttape/test_1_pgbench_no_node1
+++ src/ducttape/test_1_pgbench_no_node1
@@ -57,7 +57,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_3_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_3_pgbench,v
retrieving revision 1.10
retrieving revision 1.11
diff -Lsrc/ducttape/test_3_pgbench -Lsrc/ducttape/test_3_pgbench -u -w -r1.10 -r1.11
--- src/ducttape/test_3_pgbench
+++ src/ducttape/test_3_pgbench
@@ -54,7 +54,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
Index: test_4_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_4_pgbench,v
retrieving revision 1.7
retrieving revision 1.8
diff -Lsrc/ducttape/test_4_pgbench -Lsrc/ducttape/test_4_pgbench -u -w -r1.7 -r1.8
--- src/ducttape/test_4_pgbench
+++ src/ducttape/test_4_pgbench
@@ -47,7 +47,7 @@
if [ -z $WGM ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
- if [ -z $CGNU ] ; then
+ if [ -z "$CGNU" ] ; then
echo "GNU Make not found - please install GNU Make"
exit 1
fi
From cvsuser Tue Mar 15 20:10:46 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Discussion of added diagram
Message-ID: <20050315201044.B0E07B1D126@gborg.postgresql.org>
Log Message:
-----------
Discussion of added diagram
Modified Files:
--------------
slony1-engine/doc/adminguide:
plainpaths.sgml (r1.4 -> r1.5)
-------------- next part --------------
Index: plainpaths.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/plainpaths.sgml,v
retrieving revision 1.4
retrieving revision 1.5
diff -Ldoc/adminguide/plainpaths.sgml -Ldoc/adminguide/plainpaths.sgml -u -w -r1.4 -r1.5
--- doc/adminguide/plainpaths.sgml
+++ doc/adminguide/plainpaths.sgml
@@ -42,6 +42,56 @@
the issue where nodes may not be able to all talk to one another via a
uniform set of network addresses.
+ Consider the attached diagram, which describes a set of six
+nodes
+
+
+ Symmetric Multisites
+
+
+
+
+ DB1 and DB2 are databases residing in a secure
+database layer, firewalled against outside access
+except from specifically controlled locations.
+
+ Let's suppose that DB1 is the origin node for the replication
+system.
+
+ DB3 resides in a DMZ at the same site;
+it is intended to be used as a &slony1; provider for
+remote locations.
+ DB4 is a counterpart to DB3 in a DMZ
+at a secondary/failover site. Its job, in the present configuration,
+is to feed servers in the secure database layers at the
+secondary site.
+ DB5 and and DB6 are counterparts to DB1 and DB2, but
+are, at present, configured as subscribers.
+
+ Supposing disaster were to strike at the primary
+site, the secondary site would be well-equipped to take over servicing
+the applications that use this data.
+
+ Managers paying bills are likely to be reluctant to let the
+machines at the secondary site merely be backups; they
+would doubtless prefer for them to be useful, and that can certainly
+be the case. If the primary site is being used for
+transactional activities, the replicas at the secondary
+site may be used for running time-oriented reports that are allowed to
+be a little bit behind.
+
+ The symmetry of the configuration means that if you
+had two transactional applications needing
+protection from failure, it would be straightforward to have
+additional replication sets so that each site is normally
+primary for one application, and where destruction of
+one site could be addressed by consolidating services at the remaining
+site.
+
+
+
+
+
There is also room for discussion of SSH tunnelling here...
From cvsuser Tue Mar 15 20:11:14 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Added a network diagram for discussion
purposes
Message-ID: <20050315201112.6BD50B1D37F@gborg.postgresql.org>
Log Message:
-----------
Added a network diagram for discussion purposes
Added Files:
-----------
slony1-engine/doc/adminguide:
complexenv.dia (r1.1)
complexenv.png (r1.1)
-------------- next part --------------
From cvsuser Tue Mar 15 20:12:04 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Added link to slonconf.sgml,
and make sure that new
Message-ID: <20050315201202.9352DB1D126@gborg.postgresql.org>
Log Message:
-----------
Added link to slonconf.sgml, and make sure that new documentation
is included
Modified Files:
--------------
slony1-engine/doc/adminguide:
slon.sgml (r1.12 -> r1.13)
slonik.sgml (r1.12 -> r1.13)
slonik_ref.sgml (r1.18 -> r1.19)
slony.sgml (r1.15 -> r1.16)
-------------- next part --------------
Index: slon.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slon.sgml,v
retrieving revision 1.12
retrieving revision 1.13
diff -Ldoc/adminguide/slon.sgml -Ldoc/adminguide/slon.sgml -u -w -r1.12 -r1.13
--- doc/adminguide/slon.sgml
+++ doc/adminguide/slon.sgml
@@ -224,6 +224,10 @@
File from which to read slon configuration.
+
+ This configuration is discussed further in .
+
Index: slonik_ref.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonik_ref.sgml,v
retrieving revision 1.18
retrieving revision 1.19
diff -Ldoc/adminguide/slonik_ref.sgml -Ldoc/adminguide/slonik_ref.sgml -u -w -r1.18 -r1.19
--- doc/adminguide/slonik_ref.sgml
+++ doc/adminguide/slonik_ref.sgml
@@ -2033,7 +2033,7 @@
The node to refresh.
-
+
Example
Index: slonik.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonik.sgml,v
retrieving revision 1.12
retrieving revision 1.13
diff -Ldoc/adminguide/slonik.sgml -Ldoc/adminguide/slonik.sgml -u -w -r1.12 -r1.13
--- doc/adminguide/slonik.sgml
+++ doc/adminguide/slonik.sgml
@@ -46,20 +46,21 @@
script.
Nearly all of the real configuration work is actually done by
- calling stored procedures after loading the Slony-I
- support base into a database. Slonik was created
- because these stored procedures have special requirements as to on
- which particular node in the replication system they are called.
- The absence of named parameters for stored procedures makes it
- rather hard to do this from the psql prompt, and
- psql lacks the ability to maintain multiple
- connections with open transactions to multiple databases.
-
- The format of the Slonik language is very similar to
- that of SQL, and the parser is based on a similar set of formatting
- rules for such things as numbers and strings. Note that slonik is
- declarative, using literal values throughout, and does
- not have the notion of variables. It is
+ calling stored procedures after loading the
+ Slony-I support base into a database.
+ Slonik was created because these stored
+ procedures have special requirements as to on which particular node
+ in the replication system they are called. The absence of named
+ parameters for stored procedures makes it rather hard to do this
+ from the psql prompt, and
+ psql lacks the ability to maintain
+ multiple connections with open transactions to multiple
+ databases.
+
+ The format of the Slonik language is very
+ similar to that of SQL, and the parser is based on a similar set of
+ formatting rules for such things as numbers and strings. Note that
+ slonik is declarative, using literal values throughout. It is
anticipated that Slonik scripts will typically be
generated by scripts, such as Bash or Perl, and
these sorts of scripting languages already have perfectly good ways
Index: slony.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slony.sgml,v
retrieving revision 1.15
retrieving revision 1.16
diff -Ldoc/adminguide/slony.sgml -Ldoc/adminguide/slony.sgml -u -w -r1.15 -r1.16
--- doc/adminguide/slony.sgml
+++ doc/adminguide/slony.sgml
@@ -75,15 +75,16 @@
worth documenting.
&faq;
- &schemadoc;
Core &slony1; Programs
&slon;
+ &slonconf;
&slonik;
&slonikref;
+&schemadoc;
&bookindex;
From cvsuser Wed Mar 16 17:03:29 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: New Directory
Message-ID: <20050316170327.145B1B1CBBF@gborg.postgresql.org>
Update of /usr/local/cvsroot/slony1/slony1-engine/debian
In directory gborg.postgresql.org:/tmp/cvs-serv46452/debian
Log Message:
Directory /usr/local/cvsroot/slony1/slony1-engine/debian added to the repository
From cvsuser Wed Mar 16 17:05:56 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Add Debian packaging control files,
contributed by Tim
Message-ID: <20050316170555.56388B1CDDE@gborg.postgresql.org>
Log Message:
-----------
Add Debian packaging control files, contributed by Tim Goodaire
Added Files:
-----------
slony1-engine/debian:
changelog (r1.1)
compat (r1.1)
control (r1.1)
copyright (r1.1)
dirs (r1.1)
docs (r1.1)
files (r1.1)
rules (r1.1)
slony1.substvars (r1.1)
-------------- next part --------------
--- /dev/null
+++ debian/changelog
@@ -0,0 +1,6 @@
+slony1 (1.1-1) unstable; urgency=low
+
+ * Initial Release.
+
+ -- Tim Goodaire Thu, 11 Nov 2004 22:38:04 -0500
+
--- /dev/null
+++ debian/files
@@ -0,0 +1 @@
+slony1_1.1-1_i386.deb misc optional
--- /dev/null
+++ debian/copyright
@@ -0,0 +1,36 @@
+This package was debianized by Tim Goodaire on
+Wed Mar 16 10:20:58 EST 2005 -0500.
+
+It was downloaded from http://www.slony.org
+
+Copyright:
+
+Jan Wieck janwieck@yahoo.com
+Christopher Browne cbbrowne@ca.afilias.info
+Darcy Buskermolen darcy@wavefire.com
+David Fetter david@fetter.org
+Steve Simms ssimms@steve.deefs.net
+
+License:
+
+Slony-I - A replication system for the PostgreSQL Database Management System
+
+Copyright (c) 2003-2004, PostgreSQL Global Development Group
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose, without fee, and without a written agreement
+is hereby granted, provided that the above copyright notice and this
+paragraph and the following two paragraphs appear in all copies.
+
+IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
+PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
--- /dev/null
+++ debian/slony1.substvars
@@ -0,0 +1 @@
+shlibs:Depends=libc6 (>= 2.3.2.ds1-4), libpq3 (>= 7.4)
--- /dev/null
+++ debian/rules
@@ -0,0 +1,107 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+# Sample debian/rules that uses debhelper.
+# This file was originally written by Joey Hess and Craig Small.
+# As a special exception, when this file is copied by dh-make into a
+# dh-make output file, you may use that output file without restriction.
+# This special exception was added by Craig Small in version 0.37 of dh-make.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+
+# These are used for cross-compiling and for saving the configure script
+# from having to guess our platform (since we know it already)
+DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
+DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
+
+
+CFLAGS = -Wall -g
+
+ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS)))
+ CFLAGS += -O0
+else
+ CFLAGS += -O2
+endif
+
+config.status: configure
+ dh_testdir
+ # Add here commands to configure the package.
+ CFLAGS="$(CFLAGS)" ./configure --host=$(DEB_HOST_GNU_TYPE) --build=$(DEB_BUILD_GNU_TYPE) --mandir=\$${prefix}/share/man --infodir=\$${prefix}/share/info
+
+
+build: build-stamp
+
+build-stamp: config.status
+ dh_testdir
+
+ # Add here commands to compile the package.
+ $(MAKE)
+ #docbook-to-man debian/slony1.sgml > slony1.1
+
+ touch build-stamp
+
+clean:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp
+
+ # Add here commands to clean up after the build process.
+ -$(MAKE) distclean
+ifneq "$(wildcard /usr/share/misc/config.sub)" ""
+ cp -f /usr/share/misc/config.sub config.sub
+endif
+ifneq "$(wildcard /usr/share/misc/config.guess)" ""
+ cp -f /usr/share/misc/config.guess config.guess
+endif
+
+
+ dh_clean
+
+install: build
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+ dh_installdirs
+
+ # Add here commands to install the package into debian/slony1.
+ $(MAKE) install DESTDIR=$(CURDIR)/debian/slony1
+
+
+# Build architecture-independent files here.
+binary-indep: build install
+# We have nothing to do by default.
+
+# Build architecture-dependent files here.
+binary-arch: build install
+ dh_testdir
+ dh_testroot
+ dh_installchangelogs
+ dh_installdocs
+# dh_installexamples
+ dh_install
+# dh_installmenu
+# dh_installdebconf
+# dh_installlogrotate
+# dh_installemacsen
+# dh_installpam
+# dh_installmime
+# dh_installinit
+# dh_installcron
+# dh_installinfo
+ dh_installman
+ dh_link
+ dh_strip
+ dh_compress
+ dh_fixperms
+# dh_perl
+# dh_python
+# dh_makeshlibs
+ dh_installdeb
+ dh_shlibdeps
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install
--- /dev/null
+++ debian/compat
@@ -0,0 +1 @@
+4
--- /dev/null
+++ debian/dirs
@@ -0,0 +1,3 @@
+usr/lib/postgresql/bin
+usr/share/postgresql
+usr/share/doc/slony1-1
--- /dev/null
+++ debian/docs
@@ -0,0 +1,9 @@
+doc/adminguide.tar
+doc/howto/slony-I-basic-mstr-slv.txt
+doc/howto/slony-I-basic-mstr-slv.txt.de
+doc/howto/slony-I-failover.txt
+doc/howto/slony-I-failover.txt.de
+doc/howto/slony-I-install.txt
+doc/howto/slony-I-install.txt.de
+doc/howto/slony-I-overview.txt
+doc/howto/slony-I-overview.txt.de
--- /dev/null
+++ debian/control
@@ -0,0 +1,17 @@
+Source: slony1
+Section: misc
+Priority: optional
+Maintainer: Tim Goodaire
+Build-Depends: debhelper (>= 4.0.0), docbook-dsssl
+Standards-Version: 3.6.1
+
+Package: slony1
+Architecture: any
+Depends: postgresql (>= 7.3.3), postgresql-dev (>= 7.3.3)
+Description: Slony-I is a replication system for postgresql
+ Slony-I is a "master to multiple slaves" replication system with cascading and
+ slave promotion. The big picture for the development of Slony-I is a
+ master-slave system that includes all features and capabilities needed to
+ replicate large databases to a reasonably limited number of slave systems.
+ Slony-I is a system for data centers and backup sites, where the normal mode
+ of operation is that all nodes are available.
From cvsuser Wed Mar 16 17:15:43 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Change the SUBSCRIBE_SET event so that
it starts by
Message-ID: <20050316171540.F4189B1D368@gborg.postgresql.org>
Log Message:
-----------
Change the SUBSCRIBE_SET event so that it starts by checking for
the availability of tables on the subscriber node, and subscribes
sequences _BEFORE_ copying all the data.
That way we find problems before doing 8h of copying of data...
As suggested by Hannu Krosing
Modified Files:
--------------
slony1-engine/src/slon:
remote_worker.c (r1.77 -> r1.78)
-------------- next part --------------
Index: remote_worker.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/remote_worker.c,v
retrieving revision 1.77
retrieving revision 1.78
diff -Lsrc/slon/remote_worker.c -Lsrc/slon/remote_worker.c -u -w -r1.77 -r1.78
--- src/slon/remote_worker.c
+++ src/slon/remote_worker.c
@@ -2352,6 +2352,180 @@
}
}
+ /* cbbrowne - in progress - check tables/sequences in set to
+ * make sure they are there and in good order. Don't copy any
+ * data yet; we want to just do a first pass that finds "bozo
+ * errors" */
+
+ /* Check tables and sequences in set to make sure they are all
+ * appropriately configured... */
+
+ /*
+ * Select the list of all tables the provider currently has in the set.
+ */
+ slon_mkquery(&query1,
+ "select T.tab_id, "
+ " \"pg_catalog\".quote_ident(PGN.nspname) || '.' || "
+ " \"pg_catalog\".quote_ident(PGC.relname) as tab_fqname, "
+ " T.tab_idxname, T.tab_comment "
+ "from %s.sl_table T, "
+ " \"pg_catalog\".pg_class PGC, "
+ " \"pg_catalog\".pg_namespace PGN "
+ "where T.tab_set = %d "
+ " and T.tab_reloid = PGC.oid "
+ " and PGC.relnamespace = PGN.oid "
+ "order by tab_id; ",
+ rtcfg_namespace, set_id);
+ res1 = PQexec(pro_dbconn, dstring_data(&query1));
+ if (PQresultStatus(res1) != PGRES_TUPLES_OK)
+ {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s",
+ node->no_id, dstring_data(&query1),
+ PQresultErrorMessage(res1));
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ ntuples1 = PQntuples(res1);
+
+ /*
+ * For each table in the set
+ */
+ for (tupno1 = 0; tupno1 < ntuples1; tupno1++)
+ {
+ int tab_id = strtol(PQgetvalue(res1, tupno1, 0), NULL, 10);
+ char *tab_fqname = PQgetvalue(res1, tupno1, 1);
+ char *tab_idxname = PQgetvalue(res1, tupno1, 2);
+ char *tab_comment = PQgetvalue(res1, tupno1, 3);
+ int64 copysize = 0;
+
+ gettimeofday(&tv_start2, NULL);
+ slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
+ "prepare to copy table %s\n",
+ node->no_id, tab_fqname);
+
+ /*
+ * Find out if the table we're copying has the special slony serial
+ * number key on the provider DB
+ */
+ slon_mkquery(&query1,
+ "select %s.tableHasSerialKey('%q');",
+ rtcfg_namespace, tab_fqname);
+ res2 = PQexec(pro_dbconn, dstring_data(&query1));
+ if (PQresultStatus(res2) != PGRES_TUPLES_OK)
+ {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s",
+ node->no_id, dstring_data(&query1),
+ PQresultErrorMessage(res2));
+ PQclear(res2);
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ rc = *PQgetvalue(res2, 0, 0) == 't';
+ PQclear(res2);
+
+ if (rc)
+ {
+ /*
+ * It has, check if the table has this on the local DB too.
+ */
+ slon_log(SLON_DEBUG3, "remoteWorkerThread_%d: "
+ "table %s will require Slony-I serial key\n",
+ node->no_id, tab_fqname);
+ res2 = PQexec(loc_dbconn, dstring_data(&query1));
+ if (PQresultStatus(res2) != PGRES_TUPLES_OK)
+ {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s",
+ node->no_id, dstring_data(&query1),
+ PQresultErrorMessage(res2));
+ PQclear(res2);
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ rc = *PQgetvalue(res2, 0, 0) == 't';
+ PQclear(res2);
+
+ if (!rc)
+ {
+ slon_log(SLON_DEBUG3, "remoteWorkerThread_%d: "
+ "table %s Slony-I serial key to be added local\n",
+ node->no_id, tab_fqname);
+ }
+ }
+ else
+ {
+ slon_log(SLON_DEBUG3, "remoteWorkerThread_%d: "
+ "table %s does not require Slony-I serial key\n",
+ node->no_id, tab_fqname);
+ }
+ }
+ PQclear(res1);
+
+ slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
+ "all tables for set %d found on subscriber\n",
+ node->no_id, set_id);
+ /*
+ * Add in the sequences contained in the set
+ */
+ slon_mkquery(&query1,
+ "select SQ.seq_id, "
+ " \"pg_catalog\".quote_ident(PGN.nspname) || '.' || "
+ " \"pg_catalog\".quote_ident(PGC.relname), "
+ " SQ.seq_comment "
+ " from %s.sl_sequence SQ, "
+ " \"pg_catalog\".pg_class PGC, "
+ " \"pg_catalog\".pg_namespace PGN "
+ " where SQ.seq_set = %d "
+ " and PGC.oid = SQ.seq_reloid "
+ " and PGN.oid = PGC.relnamespace; ",
+ rtcfg_namespace, set_id);
+ res1 = PQexec(pro_dbconn, dstring_data(&query1));
+ if (PQresultStatus(res1) != PGRES_TUPLES_OK)
+ {
+ slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s",
+ node->no_id, dstring_data(&query1),
+ PQresultErrorMessage(res1));
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ ntuples1 = PQntuples(res1);
+ for (tupno1 = 0; tupno1 < ntuples1; tupno1++)
+ {
+ char *seq_id = PQgetvalue(res1, tupno1, 0);
+ char *seq_fqname = PQgetvalue(res1, tupno1, 1);
+ char *seq_comment = PQgetvalue(res1, tupno1, 2);
+
+ slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
+ "copy sequence %s\n",
+ node->no_id, seq_fqname);
+
+ slon_mkquery(&query1,
+ "select %s.setAddSequence_int(%d, %s, '%q', '%q')",
+ rtcfg_namespace, set_id, seq_id,
+ seq_fqname, seq_comment);
+ if (query_execute(node, loc_dbconn, &query1) < 0)
+ {
+ PQclear(res1);
+ slon_disconnectdb(pro_conn);
+ dstring_free(&query1);
+ terminate_log_archive();
+ return -1;
+ }
+ }
+ PQclear(res1);
+
+
/*
* Select the list of all tables the provider currently has in the set.
*/
@@ -2842,55 +3016,60 @@
/*
* Copy the sequences contained in the set
*/
- slon_mkquery(&query1,
- "select SQ.seq_id, "
- " \"pg_catalog\".quote_ident(PGN.nspname) || '.' || "
- " \"pg_catalog\".quote_ident(PGC.relname), "
- " SQ.seq_comment "
- " from %s.sl_sequence SQ, "
- " \"pg_catalog\".pg_class PGC, "
- " \"pg_catalog\".pg_namespace PGN "
- " where SQ.seq_set = %d "
- " and PGC.oid = SQ.seq_reloid "
- " and PGN.oid = PGC.relnamespace; ",
- rtcfg_namespace, set_id);
- res1 = PQexec(pro_dbconn, dstring_data(&query1));
- if (PQresultStatus(res1) != PGRES_TUPLES_OK)
- {
- slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s",
- node->no_id, dstring_data(&query1),
- PQresultErrorMessage(res1));
- PQclear(res1);
- slon_disconnectdb(pro_conn);
- dstring_free(&query1);
- terminate_log_archive();
- return -1;
- }
- ntuples1 = PQntuples(res1);
- for (tupno1 = 0; tupno1 < ntuples1; tupno1++)
- {
- char *seq_id = PQgetvalue(res1, tupno1, 0);
- char *seq_fqname = PQgetvalue(res1, tupno1, 1);
- char *seq_comment = PQgetvalue(res1, tupno1, 2);
-
- slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: "
- "copy sequence %s\n",
- node->no_id, seq_fqname);
- slon_mkquery(&query1,
- "select %s.setAddSequence_int(%d, %s, '%q', '%q')",
- rtcfg_namespace, set_id, seq_id,
- seq_fqname, seq_comment);
- if (query_execute(node, loc_dbconn, &query1) < 0)
- {
- PQclear(res1);
- slon_disconnectdb(pro_conn);
- dstring_free(&query1);
- terminate_log_archive();
- return -1;
- }
- }
- PQclear(res1);
+ /* The copy of sequences is being done earlier, before we
+ * start doing tables, so that if anything is missing, that is
+ * noticed BEFORE 8 hours of copying of data takes place... */
+
+/* slon_mkquery(&query1, */
+/* "select SQ.seq_id, " */
+/* " \"pg_catalog\".quote_ident(PGN.nspname) || '.' || " */
+/* " \"pg_catalog\".quote_ident(PGC.relname), " */
+/* " SQ.seq_comment " */
+/* " from %s.sl_sequence SQ, " */
+/* " \"pg_catalog\".pg_class PGC, " */
+/* " \"pg_catalog\".pg_namespace PGN " */
+/* " where SQ.seq_set = %d " */
+/* " and PGC.oid = SQ.seq_reloid " */
+/* " and PGN.oid = PGC.relnamespace; ", */
+/* rtcfg_namespace, set_id); */
+/* res1 = PQexec(pro_dbconn, dstring_data(&query1)); */
+/* if (PQresultStatus(res1) != PGRES_TUPLES_OK) */
+/* { */
+/* slon_log(SLON_ERROR, "remoteWorkerThread_%d: \"%s\" %s", */
+/* node->no_id, dstring_data(&query1), */
+/* PQresultErrorMessage(res1)); */
+/* PQclear(res1); */
+/* slon_disconnectdb(pro_conn); */
+/* dstring_free(&query1); */
+/* terminate_log_archive(); */
+/* return -1; */
+/* } */
+/* ntuples1 = PQntuples(res1); */
+/* for (tupno1 = 0; tupno1 < ntuples1; tupno1++) */
+/* { */
+/* char *seq_id = PQgetvalue(res1, tupno1, 0); */
+/* char *seq_fqname = PQgetvalue(res1, tupno1, 1); */
+/* char *seq_comment = PQgetvalue(res1, tupno1, 2); */
+
+/* slon_log(SLON_DEBUG2, "remoteWorkerThread_%d: " */
+/* "copy sequence %s\n", */
+/* node->no_id, seq_fqname); */
+
+/* slon_mkquery(&query1, */
+/* "select %s.setAddSequence_int(%d, %s, '%q', '%q')", */
+/* rtcfg_namespace, set_id, seq_id, */
+/* seq_fqname, seq_comment); */
+/* if (query_execute(node, loc_dbconn, &query1) < 0) */
+/* { */
+/* PQclear(res1); */
+/* slon_disconnectdb(pro_conn); */
+/* dstring_free(&query1); */
+/* terminate_log_archive(); */
+/* return -1; */
+/* } */
+/* } */
+/* PQclear(res1); */
/*
* And copy over the sequence last_value corresponding to the
From cvsuser Wed Mar 16 18:57:24 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Vary the sleep_seconds by up to 50% in
either direction.
Message-ID: <20050316185721.0D979B1CDDA@gborg.postgresql.org>
Log Message:
-----------
Vary the sleep_seconds by up to 50% in either direction. This will
help prevent watchdog processes from all firing at the same time.
Modified Files:
--------------
slony1-engine/tools/altperl:
slon_watchdog.pl (r1.10 -> r1.11)
-------------- next part --------------
Index: slon_watchdog.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_watchdog.pl,v
retrieving revision 1.10
retrieving revision 1.11
diff -Ltools/altperl/slon_watchdog.pl -Ltools/altperl/slon_watchdog.pl -u -w -r1.10 -r1.11
--- tools/altperl/slon_watchdog.pl
+++ tools/altperl/slon_watchdog.pl
@@ -59,8 +59,8 @@
system "date >> $LOGDIR/slon_watchdog.log";
print LOG "Found slon daemon running for the $CLUSTER_NAME cluster, PID $pid\n";
print LOG "Looks Ok\n";
- print LOG "Sleeping for $sleep seconds\n";
+ print LOG "Sleeping for $sleep +/- " . int($sleep/2) . " seconds\n";
}
close(PSOUT);
- sleep $sleep;
+ sleep $sleep + (rand($sleep) - $sleep/2);
}
From cvsuser Wed Mar 16 20:37:56 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Add an option to only kill watchdogs,
not slons.
Message-ID: <20050316203754.63051B1D393@gborg.postgresql.org>
Log Message:
-----------
Add an option to only kill watchdogs, not slons.
Some mild reformatting as well.
Modified Files:
--------------
slony1-engine/tools/altperl:
slon_kill.pl (r1.11 -> r1.12)
-------------- next part --------------
Index: slon_kill.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_kill.pl,v
retrieving revision 1.11
retrieving revision 1.12
diff -Ltools/altperl/slon_kill.pl -Ltools/altperl/slon_kill.pl -u -w -r1.11 -r1.12
--- tools/altperl/slon_kill.pl
+++ tools/altperl/slon_kill.pl
@@ -1,6 +1,6 @@
#!@@PERL@@
# $Id$
-# Kill all slon instances for the current setname
+# Kill all slon instances for the current cluster
# Author: Christopher Browne
# Copyright 2004 Afilias Canada
@@ -9,16 +9,23 @@
# Defaults
$CONFIG_FILE = '@@SYSCONFDIR@@/slon_tools.conf';
$SHOW_USAGE = 0;
+$WATCHDOG_ONLY = 0;
# Read command-line options
GetOptions("config=s" => \$CONFIG_FILE,
- "help" => \$SHOW_USAGE);
+ "help" => \$SHOW_USAGE,
+ "w|watchdog" => \$WATCHDOG_ONLY);
my $USAGE =
-"Usage: slon_kill [--config file]
+"Usage: slon_kill [--config file] [-w|--watchdog]
- Kills all running slon and slon_watchdog instances for the set
- specified in the config file.
+ --config file Location of the slon_tools.conf file
+
+ -w
+ --watchdog Only kill the watchdog process(es)
+
+ Kills all running slon and slon_watchdog on this machine for every
+ node in the cluster.
";
@@ -32,14 +39,16 @@
print "slon_kill.pl... Killing all slon and slon_watchdog instances for the cluster $CLUSTER_NAME\n";
print "1. Kill slon watchdogs\n";
-# kill the watchdog
+# kill the watchdog
open(PSOUT, ps_args() . " | egrep '[s]lon_watchdog' | sort -n | awk '{print \$2}'|");
shut_off_processes();
close(PSOUT);
if ($found eq 'n') {
print "No watchdogs found\n";
}
+
+unless ($WATCHDOG_ONLY) {
print "\n2. Kill slon processes\n";
# kill the slon daemon
@@ -50,6 +59,7 @@
if ($found eq 'n') {
print "No slon processes found\n";
}
+}
sub shut_off_processes {
$found="n";
From cvsuser Wed Mar 16 21:15:14 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Added a "-c" to execute_script.pl,
a la psql.
Message-ID: <20050316211511.4107BB1D38F@gborg.postgresql.org>
Log Message:
-----------
Added a "-c" to execute_script.pl, a la psql. If you only want to run
a small number of commands, this saves you the trouble of putting them
into a file first.
Removed the "node#" argument since this is derivable. Replaced it
with a --node option so that it can be overridden if necessary.
Added an error check to make sure the set exists in the configuration
file.
Modified Files:
--------------
slony1-engine/tools/altperl:
execute_script.pl (r1.1 -> r1.2)
-------------- next part --------------
Index: execute_script.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/execute_script.pl,v
retrieving revision 1.1
retrieving revision 1.2
diff -Ltools/altperl/execute_script.pl -Ltools/altperl/execute_script.pl -u -w -r1.1 -r1.2
--- tools/altperl/execute_script.pl
+++ tools/altperl/execute_script.pl
@@ -8,17 +8,35 @@
# Defaults
$CONFIG_FILE = '@@SYSCONFDIR@@/slon_tools.conf';
$SHOW_USAGE = 0;
+$SCRIPT_ARG = "";
+
+# Allow this to be specified. Otherwise it will be pulled from
+# the get_set function.
+$node = 0;
# Read command-line options
GetOptions("config=s" => \$CONFIG_FILE,
- "help" => \$SHOW_USAGE);
+ "help" => \$SHOW_USAGE,
+ "c=s" => \$SCRIPT_ARG,
+ "n|node=s" => \$node);
my $USAGE =
-"Usage: execute_script [--config file] set# node# full_path_to_sql_script_file
-
- Executes the contents of a SQL script file on the specified set and node.
-
- The script only needs to exist on the machine running the slon daemon.
+"Usage:
+ execute_script [options] set# full_path_to_sql_script_file
+ execute_script [options] -c SCRIPT set#
+
+ Executes the contents of a SQL script file on the specified set.
+ The script only needs to exist on the machine running the slon
+ daemon.
+
+ set# The set to which this script applies.
+
+ -c SCRIPT Pass the SQL to be executed via the command line instead
+ of as a file.
+
+ -n NUM
+ --node=NUM Override the set origin specified in the configuration
+ file.
";
@@ -30,7 +48,7 @@
require '@@PGLIBDIR@@/slon-tools.pm';
require $CONFIG_FILE;
-my ($set, $node, $file) = @ARGV;
+my ($set, $file) = @ARGV;
if ($set =~ /^(?:set)?(\d+)$/) {
$set = $1;
} else {
@@ -38,15 +56,27 @@
die $USAGE;
}
-if ($node =~ /^(?:node)?(\d+)$/) {
- $node = $1;
-} else {
- print "Invalid node identifier\n\n";
- die $USAGE;
-}
+get_set($set) or die "Non-existent set specified.\n";
+$node = $SET_ORIGIN unless $node;
+# We can either have -c SCRIPT or a filename as an argument. The
+# latter takes precedence.
+if ($file) {
unless ($file =~ /^\// and -f $file) {
- print "SQL script path needs to be a full path, e.g. /tmp/my_script.sql\n\n";
+ print STDERR "SQL script path needs to be a full path, e.g. /tmp/my_script.sql\n\n";
+ die $USAGE;
+ }
+}
+elsif ($SCRIPT_ARG) {
+ # Put the script into a file
+ $file = "/tmp/execute_script.sql.$$";
+ my $fh;
+ open $fh, ">", $file;
+ print $fh $SCRIPT_ARG;
+ close $fh;
+}
+else {
+ print STDERR "You must include either a filename or a SQL statement on the command line to be run.\n\n";
die $USAGE;
}
From cvsuser Thu Mar 17 19:43:33 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: cleanup thread uses Slony-I stored
procedure getMinXid()
Message-ID: <20050317194331.63410B1CE8E@gborg.postgresql.org>
Log Message:
-----------
cleanup thread uses Slony-I stored procedure getMinXid() instead of
trying to draw it out of pg_locks, eliminating the need to compare
values.
Modified Files:
--------------
slony1-engine/src/slon:
cleanup_thread.c (r1.22 -> r1.23)
-------------- next part --------------
Index: cleanup_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/cleanup_thread.c,v
retrieving revision 1.22
retrieving revision 1.23
diff -Lsrc/slon/cleanup_thread.c -Lsrc/slon/cleanup_thread.c -u -w -r1.22 -r1.23
--- src/slon/cleanup_thread.c
+++ src/slon/cleanup_thread.c
@@ -260,40 +260,23 @@
pthread_exit(NULL);
}
+/* "_T1".getMinXid(); */
static unsigned long get_earliest_xid (PGconn *dbconn) {
- unsigned long lo = 2147483647;
- unsigned long minhi = -1;
- unsigned long minlo = lo;
- unsigned long xid;
+ long long xid;
long n,t;
PGresult *res;
SlonDString query1;
dstring_init(&query1);
- slon_mkquery(&query1, "select transaction from pg_catalog.pg_locks where transaction is not null;");
+ slon_mkquery(&query1, "select %s.getMinXid();", rtcfg_namespace);
res = PQexec(dbconn, dstring_data(&query1));
if (PQresultStatus(res) != PGRES_TUPLES_OK) {
slon_log(SLON_FATAL, "cleanupThread: could not read locks from pg_locks!");
PQclear(res);
slon_abort();
return -1;
- } else {
- n = PQntuples(res);
- for (t = 0; t < n; t++) {
- xid = atoi(PQgetvalue(res, t, 0));
- printf ("xid: %d\n", xid);
- if (xid > lo) {
- if (xid < minlo)
- minlo = xid;
- } else {
- if (xid < minhi)
- minhi = xid;
}
- }
- }
- printf("minhi: %d minlo: %d\n", minlo, minhi);
- if ((minhi - lo) < minlo)
- return minlo;
- else
- return minhi;
+ xid = strtoll(PQgetvalue(res, 0, 0), NULL, 10);
+ slon_log(SLON_DEBUG3, "cleanupThread: minxid: %d\n", xid);
+ return (unsigned long) xid;
}
From cvsuser Thu Mar 17 22:54:28 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By darcyb: Update sigaction flags to conform to
POSIX standard.
Message-ID: <20050317225426.65FBEB1CE64@gborg.postgresql.org>
Log Message:
-----------
Update sigaction flags to conform to POSIX standard. Add configure test to ensure we have SA_NODEFER as a sigaction flag.
Modified Files:
--------------
slony1-engine:
aclocal.m4 (r1.8 -> r1.9)
config.h.in (r1.10 -> r1.11)
configure (r1.43 -> r1.44)
configure.ac (r1.42 -> r1.43)
slony1-engine/src/slon:
slon.c (r1.46 -> r1.47)
Added Files:
-----------
slony1-engine/config:
libc.m4 (r1.1)
-------------- next part --------------
Index: config.h.in
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/config.h.in,v
retrieving revision 1.10
retrieving revision 1.11
diff -Lconfig.h.in -Lconfig.h.in -u -w -r1.10 -r1.11
--- config.h.in
+++ config.h.in
@@ -73,4 +73,7 @@
/* Set to 1 if server/utils/typcache.h exists */
#undef HAVE_TYPCACHE
+/* Set to 1 if we have POSIX signals */
+#undef HAVE_POSIX_SIGNALS
+
#endif /* SLONY_I_CONFIG_H */
Index: configure.ac
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/configure.ac,v
retrieving revision 1.42
retrieving revision 1.43
diff -Lconfigure.ac -Lconfigure.ac -u -w -r1.42 -r1.43
--- configure.ac
+++ configure.ac
@@ -101,6 +101,7 @@
AC_CHECK_TYPES([int32_t, uint32_t, u_int32_t])
AC_CHECK_TYPES([int64_t, uint64_t, u_int64_t])
AC_CHECK_TYPES([ssize_t])
+SLON_AC_FUNC_POSIX_SIGNALS()
# ----
# Locate PostgreSQL paths
Index: configure
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/configure,v
retrieving revision 1.43
retrieving revision 1.44
diff -Lconfigure -Lconfigure -u -w -r1.43 -r1.44
--- configure
+++ configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.59 for postgresql-slony1-engine HEAD_20050222.
+# Generated by GNU Autoconf 2.59 for postgresql-slony1-engine HEAD_20050316.
#
# Copyright (C) 2003 Free Software Foundation, Inc.
# This configure script is free software; the Free Software Foundation
@@ -267,8 +267,8 @@
# Identity of this package.
PACKAGE_NAME='postgresql-slony1-engine'
PACKAGE_TARNAME='postgresql-slony1-engine'
-PACKAGE_VERSION='HEAD_20050222'
-PACKAGE_STRING='postgresql-slony1-engine HEAD_20050222'
+PACKAGE_VERSION='HEAD_20050316'
+PACKAGE_STRING='postgresql-slony1-engine HEAD_20050316'
PACKAGE_BUGREPORT=''
ac_unique_file="src"
@@ -309,7 +309,7 @@
# include
#endif"
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os enable_debug CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT PERL TAR LEX YACC SED LD YFLAGS LEXFLAGS HEAD_20050222 with_gnu_ld acx_pthread_config PTHREAD_CC PTHREAD_LIBS PTHREAD_CFLAGS CPP EGREP PGINCLUDEDIR PGINCLUDESERVERDIR PGLIBDIR PGPKGLIBDIR PGSHAREDIR PGBINDIR HAVE_NETSNMP NETSNMP_CFLAGS NETSNMP_AGENTLIBS TOOLSBIN SLONYPATH HOST_OS PORTNAME GROFF PS2PDF DJPEG PNMTOPS PGAUTODOC NSGMLS JADE have_docbook DOCBOOKSTYLE COLLATEINDEX SGMLSPL LIBOBJS LTLIBOBJS'
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os enable_debug CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT PERL TAR LEX YACC SED LD YFLAGS LEXFLAGS HEAD_20050316 with_gnu_ld acx_pthread_config PTHREAD_CC PTHREAD_LIBS PTHREAD_CFLAGS CPP EGREP HAVE_POSIX_SIGNALS PGINCLUDEDIR PGINCLUDESERVERDIR PGLIBDIR PGPKGLIBDIR PGSHAREDIR PGBINDIR HAVE_NETSNMP NETSNMP_CFLAGS NETSNMP_AGENTLIBS TOOLSBIN SLONYPATH HOST_OS PORTNAME GROFF PS2PDF DJPEG PNMTOPS PGAUTODOC NSGMLS JADE have_docbook DOCBOOKSTYLE COLLATEINDEX SGMLSPL LIBOBJS LTLIBOBJS'
ac_subst_files=''
# Initialize some variables set by options.
@@ -782,7 +782,7 @@
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures postgresql-slony1-engine HEAD_20050222 to adapt to many kinds of systems.
+\`configure' configures postgresql-slony1-engine HEAD_20050316 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -843,7 +843,7 @@
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of postgresql-slony1-engine HEAD_20050222:";;
+ short | recursive ) echo "Configuration of postgresql-slony1-engine HEAD_20050316:";;
esac
cat <<\_ACEOF
@@ -975,7 +975,7 @@
test -n "$ac_init_help" && exit 0
if $ac_init_version; then
cat <<\_ACEOF
-postgresql-slony1-engine configure HEAD_20050222
+postgresql-slony1-engine configure HEAD_20050316
generated by GNU Autoconf 2.59
Copyright (C) 2003 Free Software Foundation, Inc.
@@ -989,7 +989,7 @@
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by postgresql-slony1-engine $as_me HEAD_20050222, which was
+It was created by postgresql-slony1-engine $as_me HEAD_20050316, which was
generated by GNU Autoconf 2.59. Invocation command line was
$ $0 $@
@@ -5905,6 +5905,79 @@
fi
+echo "$as_me:$LINENO: checking for POSIX signal interface" >&5
+echo $ECHO_N "checking for POSIX signal interface... $ECHO_C" >&6
+if test "${slonac_cv_func_posix_signals+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include
+int
+main ()
+{
+struct sigaction act, oact;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_NODEFER;
+ sigaction(0, &act, &oact);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ slonac_cv_func_posix_signals=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+slonac_cv_func_posix_signals=no
+fi
+rm -f conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $slonac_cv_func_posix_signals" >&5
+echo "${ECHO_T}$slonac_cv_func_posix_signals" >&6
+ if test x"$slonac_cv_func_posix_signals" = xyes ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_POSIX_SIGNALS
+_ACEOF
+
+ HAVE_POSIX_SIGNALS=$slonac_cv_func_posix_signals
+ else
+ echo "$as_me:$LINENO: result: \"error\"" >&5
+echo "${ECHO_T}\"error\"" >&6
+ { { echo "$as_me:$LINENO: error: Slony requires a POSIX compatible signal interface." >&5
+echo "$as_me: error: Slony requires a POSIX compatible signal interface." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+
# ----
# Locate PostgreSQL paths
@@ -10032,7 +10105,7 @@
} >&5
cat >&5 <<_CSEOF
-This file was extended by postgresql-slony1-engine $as_me HEAD_20050222, which was
+This file was extended by postgresql-slony1-engine $as_me HEAD_20050316, which was
generated by GNU Autoconf 2.59. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -10092,7 +10165,7 @@
cat >>$CONFIG_STATUS <<_ACEOF
ac_cs_version="\\
-postgresql-slony1-engine config.status HEAD_20050222
+postgresql-slony1-engine config.status HEAD_20050316
configured by $0, generated by GNU Autoconf 2.59,
with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
@@ -10307,7 +10380,7 @@
s,@LD@,$LD,;t t
s,@YFLAGS@,$YFLAGS,;t t
s,@LEXFLAGS@,$LEXFLAGS,;t t
-s,@HEAD_20050222@,$HEAD_20050222,;t t
+s,@HEAD_20050316@,$HEAD_20050316,;t t
s,@with_gnu_ld@,$with_gnu_ld,;t t
s,@acx_pthread_config@,$acx_pthread_config,;t t
s,@PTHREAD_CC@,$PTHREAD_CC,;t t
@@ -10315,6 +10388,7 @@
s,@PTHREAD_CFLAGS@,$PTHREAD_CFLAGS,;t t
s,@CPP@,$CPP,;t t
s,@EGREP@,$EGREP,;t t
+s,@HAVE_POSIX_SIGNALS@,$HAVE_POSIX_SIGNALS,;t t
s,@PGINCLUDEDIR@,$PGINCLUDEDIR,;t t
s,@PGINCLUDESERVERDIR@,$PGINCLUDESERVERDIR,;t t
s,@PGLIBDIR@,$PGLIBDIR,;t t
Index: aclocal.m4
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/aclocal.m4,v
retrieving revision 1.8
retrieving revision 1.9
diff -Laclocal.m4 -Laclocal.m4 -u -w -r1.8 -r1.9
--- aclocal.m4
+++ aclocal.m4
@@ -2,6 +2,7 @@
m4_include([config/perl.m4])
m4_include([config/libtool.m4])
m4_include([config/compiler.m4])
+m4_include([config/libc.m4])
m4_include([config/acx_pthread.m4])
m4_include([config/acx_libpq.m4])
m4_include([config/acx_netsnmp.m4])
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.46
retrieving revision 1.47
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.46 -r1.47
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -673,7 +673,7 @@
act.sa_handler = &sighandler;
sigemptyset(&act.sa_mask);
- act.sa_flags = SA_NOMASK;
+ act.sa_flags = SA_NODEFER;
if (sigaction(SIGHUP,&act,NULL) < 0)
{
--- /dev/null
+++ config/libc.m4
@@ -0,0 +1,18 @@
+AC_DEFUN([SLON_AC_FUNC_POSIX_SIGNALS],
+ [AC_CACHE_CHECK(for POSIX signal interface, slonac_cv_func_posix_signals,
+ [AC_TRY_LINK([#include ],
+ [struct sigaction act, oact;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_NODEFER;
+ sigaction(0, &act, &oact);],
+ [slonac_cv_func_posix_signals=yes],
+ [slonac_cv_func_posix_signals=no])])
+ if test x"$slonac_cv_func_posix_signals" = xyes ; then
+ AC_DEFINE(HAVE_POSIX_SIGNALS,, [Define to 1 if you have the POSIX signal interface.])
+ HAVE_POSIX_SIGNALS=$slonac_cv_func_posix_signals
+ else
+ AC_MSG_RESULT("error")
+ AC_MSG_ERROR(Slony requires a POSIX compatible signal interface.)
+ fi
+ AC_SUBST(HAVE_POSIX_SIGNALS)]
+)# SLON_AC_FUNC_POSIX_SIGNALS
From cvsuser Fri Mar 18 15:15:05 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By xfade: Change slon/misc.h and slon/snmp_thread.c
to include c.h in
Message-ID: <20050318151503.8FFFCB1CEB4@gborg.postgresql.org>
Log Message:
-----------
Change slon/misc.h and slon/snmp_thread.c to include c.h in the same manner as slon.c does. This fixes compiling when the includedir is not the parent dir of includedir-server.
Modified Files:
--------------
slony1-engine/src/slon:
misc.h (r1.5 -> r1.6)
snmp_thread.c (r1.1 -> r1.2)
-------------- next part --------------
Index: misc.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/misc.h,v
retrieving revision 1.5
retrieving revision 1.6
diff -Lsrc/slon/misc.h -Lsrc/slon/misc.h -u -w -r1.5 -r1.6
--- src/slon/misc.h
+++ src/slon/misc.h
@@ -5,7 +5,7 @@
* ----------
*/
#include "config.h"
-#include "server/c.h"
+#include "c.h"
typedef enum
{
Index: snmp_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/snmp_thread.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -Lsrc/slon/snmp_thread.c -Lsrc/slon/snmp_thread.c -u -w -r1.1 -r1.2
--- src/slon/snmp_thread.c
+++ src/slon/snmp_thread.c
@@ -7,7 +7,7 @@
#include
#include "libpq-fe.h"
-#include "server/c.h"
+#include "c.h"
#include "slon.h"
From cvsuser Fri Mar 18 21:40:47 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Backwards-compatibility fix: allow set#
to be passed as
Message-ID: <20050318214045.784C6B1C8AD@gborg.postgresql.org>
Log Message:
-----------
Backwards-compatibility fix: allow set# to be passed as set\d+ in
addition to \d+.
I accidentally broke this while adding setname support.
Modified Files:
--------------
slony1-engine/tools/altperl:
slon-tools.pm (r1.21 -> r1.22)
-------------- next part --------------
Index: slon-tools.pm
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon-tools.pm,v
retrieving revision 1.21
retrieving revision 1.22
diff -Ltools/altperl/slon-tools.pm -Ltools/altperl/slon-tools.pm -u -w -r1.21 -r1.22
--- tools/altperl/slon-tools.pm
+++ tools/altperl/slon-tools.pm
@@ -228,7 +228,8 @@
if ($SLONY_SETS->{$set}) {
$match = $SLONY_SETS->{$set};
}
- elsif ($set =~ /^\d+$/) {
+ elsif ($set =~ /^(?:set)?(\d+)$/) {
+ $set = $1;
my ($name) = grep { $SLONY_SETS->{$_}->{"set_id"} == $set } keys %{$SLONY_SETS};
$match = $SLONY_SETS->{$name};
}
@@ -248,7 +249,6 @@
return $match->{"set_id"};
}
-
# This function checks to see if there is a still-in-progress subscription
# It does so by looking to see if there is a SUBSCRIBE_SET event corresponding
# to a sl_subscribe entry that is not yet active.
From cvsuser Mon Mar 21 17:13:59 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Fixed problem with evaluation of
whether to use "make" or
Message-ID: <20050321171357.34203B1C8AD@gborg.postgresql.org>
Log Message:
-----------
Fixed problem with evaluation of whether to use "make" or "gmake";
running this on AIX showed off that the output of "which gmake"
can be an enormous string as opposed to the shell returning
bad return codes...
Modified Files:
--------------
slony1-engine/src/ducttape:
test_1_pgbench (r1.21 -> r1.22)
test_2_pgbench (r1.14 -> r1.15)
test_3_pgbench (r1.11 -> r1.12)
test_4_pgbench (r1.8 -> r1.9)
test_5_pgbench (r1.7 -> r1.8)
test_6_autolisten (r1.4 -> r1.5)
test_7_defines (r1.3 -> r1.4)
test_8_logship (r1.2 -> r1.3)
-------------- next part --------------
Index: test_1_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_1_pgbench,v
retrieving revision 1.21
retrieving revision 1.22
diff -Lsrc/ducttape/test_1_pgbench -Lsrc/ducttape/test_1_pgbench -u -w -r1.21 -r1.22
--- src/ducttape/test_1_pgbench
+++ src/ducttape/test_1_pgbench
@@ -19,6 +19,7 @@
TMPOUT=/tmp/output.$$
DB1=slony_test1
DB2=slony_test2
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=5
@@ -50,8 +51,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -124,7 +125,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 11" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -222,7 +223,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 22" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_6_autolisten
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_6_autolisten,v
retrieving revision 1.4
retrieving revision 1.5
diff -Lsrc/ducttape/test_6_autolisten -Lsrc/ducttape/test_6_autolisten -u -w -r1.4 -r1.5
--- src/ducttape/test_6_autolisten
+++ src/ducttape/test_6_autolisten
@@ -18,6 +18,7 @@
export PATH
TMPOUT=/tmp/output.$$
ORIGIN=slony_test1
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=5
PGBENCH_TRANS=`expr 50000 / $PGBENCH_CLIENTS`
@@ -85,7 +86,7 @@
function slon_start () {
local NODE=$1
echo "**** starting the Slony-I node daemon for slony_test$NODE"
- xterm -title "Slon node $NODE" -e sh -c "slon -d$NODE T1 dbname=slony_test$NODE; echo -n 'Enter>'; read line" &
+ xterm -title "Slon node $NODE" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=slony_test$NODE; echo -n 'Enter>'; read line" &
PID[$NODE]=$!
pid=$PID[$NODE]
echo "slon[$pid] on dbname=slony_test$NODE"
@@ -125,8 +126,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ]; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ]; then
Index: test_8_logship
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_8_logship,v
retrieving revision 1.2
retrieving revision 1.3
diff -Lsrc/ducttape/test_8_logship -Lsrc/ducttape/test_8_logship -u -w -r1.2 -r1.3
--- src/ducttape/test_8_logship
+++ src/ducttape/test_8_logship
@@ -28,7 +28,7 @@
PGBENCH_SCALE=1
PGBENCH_CLIENTS=5
PGBENCH_TRANS=`expr 30000 / $PGBENCH_CLIENTS`
-DEBUGLEVEL=4
+DEBUG_LEVEL=2
trap '
echo ""
@@ -56,8 +56,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -135,7 +135,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 11" -e sh -c "slon -d$DEBUGLEVEL -s500 -g10 $CLUSTERNAME dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 11" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 $CLUSTERNAME dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -232,7 +232,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 22" -e sh -c "slon -d$DEBUGLEVEL -s10000 -o10000 -g10 -a $LOGSHIPDIR $CLUSTERNAME dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 22" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -o10000 -g10 -a $LOGSHIPDIR $CLUSTERNAME dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_5_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_5_pgbench,v
retrieving revision 1.7
retrieving revision 1.8
diff -Lsrc/ducttape/test_5_pgbench -Lsrc/ducttape/test_5_pgbench -u -w -r1.7 -r1.8
--- src/ducttape/test_5_pgbench
+++ src/ducttape/test_5_pgbench
@@ -23,6 +23,7 @@
TMPOUT=/tmp/output.$$
DB1=slony_test1
DB2=slony_test2
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=2
@@ -54,8 +55,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -125,7 +126,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 1" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -220,7 +221,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 2" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_2_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_2_pgbench,v
retrieving revision 1.14
retrieving revision 1.15
diff -Lsrc/ducttape/test_2_pgbench -Lsrc/ducttape/test_2_pgbench -u -w -r1.14 -r1.15
--- src/ducttape/test_2_pgbench
+++ src/ducttape/test_2_pgbench
@@ -26,6 +26,7 @@
DB1=slony_test1
DB2=slony_test2
DB3=slony_test3
+DEBUG_LEVEL=2
PGBENCH_SCALE=10
PGBENCH_CLIENTS=5
@@ -61,8 +62,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -135,7 +136,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 1" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -231,7 +232,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB2"
-xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 2" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
@@ -307,7 +308,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB3"
-xterm -title "Slon node 3" -e sh -c "slon -d2 T1 dbname=$DB3; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 3" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB3; echo -n 'Enter>'; read line" &
slon3_pid=$!
echo "slon[$slon3_pid] on dbname=$DB3"
Index: test_7_defines
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_7_defines,v
retrieving revision 1.3
retrieving revision 1.4
diff -Lsrc/ducttape/test_7_defines -Lsrc/ducttape/test_7_defines -u -w -r1.3 -r1.4
--- src/ducttape/test_7_defines
+++ src/ducttape/test_7_defines
@@ -22,6 +22,7 @@
TMPOUT=/tmp/output.$$
DB1=slony_test1
DB2=slony_test2
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=5
@@ -53,8 +54,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -133,7 +134,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 11" -e sh -c "slon -d2 -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 11" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -230,7 +231,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 22" -e sh -c "slon -d2 -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 22" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -o10000 -g10 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_3_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_3_pgbench,v
retrieving revision 1.11
retrieving revision 1.12
diff -Lsrc/ducttape/test_3_pgbench -Lsrc/ducttape/test_3_pgbench -u -w -r1.11 -r1.12
--- src/ducttape/test_3_pgbench
+++ src/ducttape/test_3_pgbench
@@ -22,6 +22,7 @@
TMPOUT=/tmp/output.$$
DB1=slony_test1
DB2=slony_test2
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=5
@@ -50,8 +51,8 @@
# have the "application" (pgbench) running.
######################################################################
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -174,7 +175,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 1" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -277,7 +278,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 2" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
Index: test_4_pgbench
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/test_4_pgbench,v
retrieving revision 1.8
retrieving revision 1.9
diff -Lsrc/ducttape/test_4_pgbench -Lsrc/ducttape/test_4_pgbench -u -w -r1.8 -r1.9
--- src/ducttape/test_4_pgbench
+++ src/ducttape/test_4_pgbench
@@ -12,6 +12,7 @@
TMPOUT=/tmp/output.$$
DB1=slony_test1
DB2=slony_test2
+DEBUG_LEVEL=2
PGBENCH_SCALE=1
PGBENCH_CLIENTS=2
@@ -43,8 +44,8 @@
#####
# Make sure the install is up to date
#####
-WGM=`which gmake`
-if [ -z $WGM ] ; then
+WGM=`which gmake | egrep '^/'`
+if [ -z "$WGM" ] ; then
MAKE=make
CGNU=`make -v | grep GNU`
if [ -z "$CGNU" ] ; then
@@ -114,7 +115,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 1" -e sh -c "slon -d2 T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 1" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
@@ -206,7 +207,7 @@
fi
echo "**** starting the Slony-I node daemon for $DB1"
-xterm -title "Slon node 2" -e sh -c "slon -d2 T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+xterm -title "Slon node 2" -e sh -c "slon -d$DEBUG_LEVEL T1 dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
From cvsuser Tue Mar 22 17:30:13 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By smsimms: Patch from Vivek Khera: This patch
de-bash-ifies the
Message-ID: <20050322173011.48072B1CBD9@gborg.postgresql.org>
Log Message:
-----------
Patch from Vivek Khera:
This patch de-bash-ifies the script, so it runs on /bin/sh under
freebsd. it still runs under bash, of course...
Modified Files:
--------------
slony1-engine/tools:
check_slony_cluster.sh (r1.2 -> r1.3)
-------------- next part --------------
Index: check_slony_cluster.sh
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/check_slony_cluster.sh,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/check_slony_cluster.sh -Ltools/check_slony_cluster.sh -u -w -r1.2 -r1.3
--- tools/check_slony_cluster.sh
+++ tools/check_slony_cluster.sh
@@ -20,7 +20,7 @@
# Copyright 2005
# check parameters are valid
-if [[ $# -ne 3 ]]
+if [ $# -ne 3 ]
then
echo "Invalid parameters need CLUSTERNAME DBNAME DBHOST"
exit 2
From cvsuser Wed Mar 23 17:41:28 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Update to RPM spec file per Devrim
GUNDUZ -
Message-ID: <20050323174125.04A5BB1C06F@gborg.postgresql.org>
Log Message:
-----------
Update to RPM spec file per Devrim GUNDUZ - devrim@gunduz.org
Tags:
----
REL_1_0_STABLE
Modified Files:
--------------
slony1-engine:
postgresql-slony1-engine.spec.in (r1.3.2.1 -> r1.3.2.2)
-------------- next part --------------
Index: postgresql-slony1-engine.spec.in
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/postgresql-slony1-engine.spec.in,v
retrieving revision 1.3.2.1
retrieving revision 1.3.2.2
diff -Lpostgresql-slony1-engine.spec.in -Lpostgresql-slony1-engine.spec.in -u -w -r1.3.2.1 -r1.3.2.2
--- postgresql-slony1-engine.spec.in
+++ postgresql-slony1-engine.spec.in
@@ -1,14 +1,21 @@
+%define _unpackaged_files_terminate_build 0
+%{!?pgversion:%define pgversion 8.0.1}
+%{!?pgsourcetree:%define pgsourcetree /usr/src/redhat/SOURCES/postgresql-%pgversion}
+
Summary: A "master to multiple slaves" replication system with cascading and failover.
Name: postgresql-@PACKAGE_NAME@
Version: @PACKAGE_VERSION@
-Release: 1
+Release: 3_pgsql%pgversion
License: Berkeley/BSD
+Packager: Devrim Gunduz
Group: Applications/Databases
URL: http://slony.info/
Source0: @PACKAGE_NAME@-%{version}.tar.gz
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
+Buildroot: %{_tmppath}/%{name}-%{version}-root
+
+Autoreqprov: no
BuildRequires: postgresql-devel
-Requires: postgresql
+Requires: postgresql = %pgversion
%description
Slony-I will be a "master to multiple slaves" replication
@@ -25,14 +32,24 @@
%prep
%setup -q -n @PACKAGE_NAME@-%{version}
-./configure --bindir /usr/bin --libdir /usr/lib/pgsql --includedir /usr/include/pgsql --datadir /usr/share/pgsql
%build
-make CFLAGS="-I /usr/kerberos/include"
+CFLAGS="${CFLAGS:-%optflags}" ; export CFLAGS
+CFLAGS="${CFLAGS} -I%{_includedir}/et" ; export CFLAGS
+
+export LIBNAME=%{_lib}
+./configure --bindir /usr/bin --libdir %{_libdir}/pgsql --includedir %{_includedir}/pgsql --datadir /usr/share/pgsql --with-pgsourcetree=%pgsourcetree
+make
%install
rm -rf $RPM_BUILD_ROOT
-make install DESTDIR=$RPM_BUILD_ROOT
+make DESTDIR=$RPM_BUILD_ROOT install
+
+mkdir -p $RPM_BUILD_ROOT/usr/share/pgsql
+mkdir -p $RPM_BUILD_ROOT%{_libdir}/pgsql
+install -m 755 src/backend/*.sql $RPM_BUILD_ROOT/usr/share/pgsql
+install -m 755 src/xxid/xxid.so $RPM_BUILD_ROOT%{_libdir}/pgsql
+install -m 755 src/backend/slony1_funcs.so $RPM_BUILD_ROOT%{_libdir}/pgsql
%clean
rm -rf $RPM_BUILD_ROOT
@@ -40,13 +57,19 @@
%files
%defattr(-,root,root,-)
%doc COPYRIGHT
-/usr/bin/slon
-/usr/bin/slonik
-/usr/lib/pgsql/slony1_funcs.so
-/usr/lib/pgsql/xxid.so
+%{_bindir}/slon
+%{_bindir}/slonik
/usr/share/pgsql/*.sql
-
+%{_libdir}/pgsql/slony1_funcs.so
+%{_libdir}/pgsql/xxid.so
%changelog
+* Fri Mar 19 2005 Devrim Gunduz postgresql-slony1-engine postgresql-slony1-engine 1.0.5-3
+- Added predefined value for pgsourcetree
+- Allowed slony to be installed for different versions on the same machine.
+
+* Fri Mar 18 2005 Devrim Gunduz postgresql-slony1-engine
+- Fix spec file so that Slony can be installed on a server that PostgreSQL is installed from RPMs.
+
* Thu Mar 18 2004 Daniel Berrange postgresql-slony1-engine
- Initial RPM packaging
From cvsuser Wed Mar 23 20:12:57 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Fixed makefile to build HTML output if
you just type 'make'
Message-ID: <20050323201255.5DEBEB1C243@gborg.postgresql.org>
Log Message:
-----------
Fixed makefile to build HTML output if you just type 'make'
Updates to log shipping documentation
Fixes to slon documentation, including adding in the names of internal
variables that are controlled by the slon command line parameters
Modified Files:
--------------
slony1-engine/doc/adminguide:
Makefile (r1.8 -> r1.9)
README (r1.2 -> r1.3)
logshipping.sgml (r1.4 -> r1.5)
slon.sgml (r1.13 -> r1.14)
-------------- next part --------------
Index: slon.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slon.sgml,v
retrieving revision 1.13
retrieving revision 1.14
diff -Ldoc/adminguide/slon.sgml -Ldoc/adminguide/slon.sgml -u -w -r1.13 -r1.14
--- doc/adminguide/slon.sgml
+++ doc/adminguide/slon.sgml
@@ -44,8 +44,9 @@
debuglevel
- Specifies the level of verbosity that slon should
- use when logging its activity.
+ The log_level specifies the level of verbosity
+ that slon should use when logging its
+ activity.
The eight levels of logging are:
@@ -68,14 +69,16 @@
- Specifies the interval, in milliseconds, in which
- slon should check to see if a SYNC
- should be introduced even if none has been mandated by data
- creation. Default is 10000 ms.
+ The sync_interval, measured in milliseconds,
+ indicates how often slon should check
+ to see if a SYNC should be introduced.
+ Default is 10000 ms. The main loop in
+ sync_Thread_main() sleeps for intervals of
+ sync_interval milliseconds between iterations.
- Short sync times keep the origin on a short
+ Short sync check intervals keep the origin on a short
leash, updating its subscribers more frequently. If you
have replicated sequences that are frequently updated
without there being tables that are
@@ -85,9 +88,9 @@
- If the node is not an origin, so no updates are coming in, it
- will continue on to the timeout and generate
- a SYNC anyways.
+ If the node is not an origin for any replication set, so no
+ updates are coming in, it is somewhat wasteful for this value to
+ be much less the sync_interval_timeout value.
@@ -99,22 +102,22 @@
- At the end of each such timeout period, a SYNC will be generated
- on the local node even if there has been no
- replicatable data updated that would have pushed out a SYNC.
-
-
- Default is 60000 ms.
-
-
- Note that SYNC events are also generated on subscriber nodes.
- Since they are not actually generating any data to replicate to
- other nodes, such SYNC events are of little value. You might
- want to increase this parameter to something quite a bit higher
- than the SYNC check interval, so that
- subscriber nodes won't generate and propagate as many SYNC
- events. The once per minute that is the default seems amply
- often.
+ At the end of each sync_interval_timeout timeout
+ period, a SYNC will be generated on the
+ local node even if there has been no replicatable
+ data updated that would have pushed out a
+ SYNC.
+
+
+ Default, and maximum, is 60000 ms, so that you can expect each
+ node to report in with a SYNC
+ once each minute.
+
+
+ Note that SYNC events are also generated on
+ subscriber nodes. Since they are not actually generating any
+ data to replicate to other nodes, these SYNC
+ events are of not terribly much value.
@@ -123,50 +126,84 @@
group size
- Maximum SYNC group size; defaults to 6. Thus, if a particular
- node is behind by 200 SYNCs, it will try to group them together
- into groups of 6. This would be expected to reduce transaction
- overhead due to having fewer transactions to COMMIT.
-
-
- The default of 6 is probably suitable for small systems
- that can devote only very limited bits of memory to slon. If you
- have plenty of memory, it would be reasonable to increase this,
- as it will increase the amount of work done in each transaction,
- and will allow a subscriber that is behind by a lot to catch up
- more quickly.
-
-
- Slon processes usually stay pretty small; even with large
- value for this option, slon would be expected to only grow to a
- few MB in size.
+ This controls the maximum SYNC group size,
+ sync_group_maxsize; defaults to 6. Thus, if a
+ particular node is behind by 200 SYNCs, it
+ will try to group them together into groups of a maximum size of
+ sync_group_maxsize. This can be expected to
+ reduce transaction overhead due to having fewer transactions to
+ COMMIT.
+
+
+ The default of 6 is probably suitable for small systems that can
+ devote only very limited bits of memory to
+ slon. If you have plenty of memory,
+ it would be reasonable to increase this, as it will increase the
+ amount of work done in each transaction, and will allow a
+ subscriber that is behind by a lot to catch up more quickly.
+
+
+ Slon processes usually stay pretty small; even with large value
+ for this option, slon would be
+ expected to only grow to a few MB in size.
The big advantage in increasing this parameter comes from
cutting down on the number of transaction
- COMMITs; moving from 1 to 2 should provide
- substantial benefit, but the benefits will progressively fall
+ COMMITs; moving from 1 to 2 will provide
+ considerable benefit, but the benefits will progressively fall
off once the transactions being processed get to be reasonably
large. There isn't likely to be a material difference in
performance between 80 and 90; at that point, whether
bigger is better will depend on whether the
- bigger set of SYNCs makes the LOG cursor behave badly due to
- consuming more memory and requiring more time to sortt.
+ bigger set of SYNCs makes the
+ LOG cursor behave badly due to consuming more
+ memory and requiring more time to sortt.
+
+
+ In &slony1; version 1.0, slon will
+ always attempt to group SYNCs together to
+ this maximum, which won't be ideal if
+ replication has been somewhat destabilized by there being very
+ large updates (e.g. - a single transaction
+ that updates hundreds of thousands of rows) or by
+ SYNCs being disrupted on an origin node with
+ the result that there are a few SYNCs that
+ are very large. You might run into the problem that grouping
+ together some very large SYNCs knocks over a
+ slon process. When it picks up
+ again, it will try to process the same large grouped set of
+ SYNCs, and run into the same problem over and
+ over until an administrator interrupts this and changes the
+ value to break this deadlock.
+
+
+ In &slony1; version 1.0, the slon
+ instead adaptively ramps up from doing 1
+ SYNC at a time towards the maximum group
+ size. As a result, if there are a couple of
+ SYNCs that cause problems, the
+ slon will (with any relevant watchdog
+ assistance) always be able to get to the point where it
+ processes the troublesome SYNCs one by one,
+ hopefully making operator assistance unnecessary.
desired sync time
- A maximum time planned for grouped SYNCs.
+ A maximum time planned for grouped SYNCs. If replication is running behind, slon will gradually
- increase the numbers of SYNCs grouped together, targetting that
- (based on the time taken for the last group
- of SYNCs) they shouldn't take more than ths specified
- desired sync time.
+ increase the numbers of SYNCs grouped
+ together, targetting that (based on the time taken for the
+ last group of SYNCs) they
+ shouldn't take more than the specified
+ desired_sync_time value.
- The default value is 60000ms, equal to one minute.
+ The default value for desired_sync_time is
+ 60000ms, equal to one minute. That way, you can expect (or at least hope!) that you'll
get a COMMIT roughly once per minute.
@@ -174,13 +211,14 @@
It isn't totally predictable, as it
is entirely possible for someone to request a very
large update, all as one transaction, that can
- blow up the length of the resulting SYNC to be
- nearly arbitrarily long. In such a case, the heuristic will back
- off for the next group.
+ blow up the length of the resulting
+ SYNC to be nearly arbitrarily long. In such a
+ case, the heuristic will back off for the
+ next group.
The overall effect is to improve
Slony-I's ability to cope with
- variations in traffic. By starting with 1 SYNC, and gradually
+ variations in traffic. By starting with 1 SYNC, and gradually
moving to more, even if there turn out to be variations large
enough to cause PostgreSQL backends to
crash, Slony-I will back off down to
@@ -193,7 +231,8 @@
cleanup cycles
- How often to VACUUM in cleanup cycles.
+ The value vac_frequency indicates how often to
+ VACUUM in cleanup cycles.
Set this to zero to disable
@@ -205,15 +244,31 @@
lot of dead tuples that should be vacuumed
frequently.
+
+ In &slony1; version 1.1, this changes a little; the
+ cleanup thread tracks, from iteration to iteration, the earliest
+ transaction ID still active in the system. If this doesn't
+ change, from one iteration to the next, then an old transaction
+ is still active, and therefore a VACUUM will
+ do no good. The cleanup thread instead merely does an
+ ANALYZE on these tables to update the
+ statistics in pg_statistics.
+
-
PID filename
- Filename in which the PID (process ID) of the slon is stored.
+ pid_file contains the filename in which the PID
+ (process ID) of the slon is stored.
+
+
+
+ This may make it easier to construct scripts to monitor multiple
+ slon processes running on a single
+ host.
@@ -234,8 +289,9 @@
archive directory
- Directory in which to place a sequence of SYNC archive files for
- use in log shipping mode.
+ archive_dir indicates a directory in which to
+ place a sequence of SYNC archive files for
+ use in log shipping mode.
Index: logshipping.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/logshipping.sgml,v
retrieving revision 1.4
retrieving revision 1.5
diff -Ldoc/adminguide/logshipping.sgml -Ldoc/adminguide/logshipping.sgml -u -w -r1.4 -r1.5
--- doc/adminguide/logshipping.sgml
+++ doc/adminguide/logshipping.sgml
@@ -62,20 +62,31 @@
generate them by adding the option.
+
- What takes place when a failover/MOVE SET takes place?
- Nothing special. So long as the archiving node remains
-a subscriber, it will continue to generate logs.
+ What takes place when a failover/MOVE SET takes
+place?
+
+ Nothing too special. So long as the archiving node
+remains a subscriber, it will continue to generate
+logs. What if we run out of spool space?
- The node will stop accepting SYNCs until this problem
-is alleviated. The database being subscribed to will also fall
-behind.
+ The node will stop accepting SYNCs
+until this problem is alleviated. The database being subscribed to
+will also fall behind.
+
+ There will be the further side-effect on the cluster that
+confirmations won't be able to propagate (because the node has not
+processed the updates), which will cause and to
+grow.
+
@@ -107,28 +118,14 @@
sniffing the data applied at a particular subscriber
node. As a result, you must have at least one regular
node; you cannot have a cluster that consists solely of an origin and
-a set of log shipping nodes..
-
- The log shipping node tracks the
-entirety of the traffic going to a subscriber. You cannot separate
-things out if there are multiple replication sets.
-
- The log shipping node presently tracks
-only SYNC events. This should be sufficient to cope with
-some changes in cluster configuration, but not
-others.
+a set of log shipping nodes..
Log shipping does not process certain
additional events, with the implication that the introduction of any
of the following events can invalidate the relationship between the
SYNCs and the dump created using
-slony1_dump.sh so that you'll likely need
-to rerun slony1_dump.sh:
-
-
- SUBSCRIBE_SET
-
-
+slony1_dump.sh so that you may need to
+rerun slony1_dump.sh.
A number of event types are handled in
such a way that log shipping copes with them:
@@ -140,6 +137,8 @@
DDL_SCRIPT is handled.
+ SUBSCRIBE_SET
+
UNSUBSCRIBE_SET This event, much like SUBSCRIBE_SET is not
@@ -226,10 +225,11 @@
trying the next file.
If the setsyncTracking_offline()
- call succeeds, then you have the right next SYNC file, and
-should apply it. You should probably ROLLBACK the
-transaction, and then use psql to apply the
-entire file full of updates.
+ call succeeds, then you have the right next
+SYNC file, and should apply it. You should
+probably ROLLBACK the transaction, and then use
+psql to apply the entire file full of
+updates.
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/Makefile,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ldoc/adminguide/Makefile -Ldoc/adminguide/Makefile -u -w -r1.8 -r1.9
--- doc/adminguide/Makefile
+++ doc/adminguide/Makefile
@@ -52,7 +52,7 @@
## HTML
##
-all: html
+all: html man
.PHONY: html
Index: README
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/README,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ldoc/adminguide/README -Ldoc/adminguide/README -u -w -r1.2 -r1.3
--- doc/adminguide/README
+++ doc/adminguide/README
@@ -1,4 +1,5 @@
This was sourced from the Wiki at
http://cbbrowne.dyndns.info:8741/cgi-bin/twiki/view/Sandbox/SlonyIAdministration
-It has been transformed into DocBook SGML as is used for PostgreSQL documentation.
+It has been transformed into DocBook SGML as is used for PostgreSQL
+documentation.
\ No newline at end of file
From cvsuser Wed Mar 23 22:59:29 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By darcyb: Whole raft of changes.
Message-ID: <20050323225926.C35A2B1C243@gborg.postgresql.org>
Log Message:
-----------
Whole raft of changes.
A) use signal() on win32 since there is no sigaction under that platform
B) have make install now install documentation if --with-docdir= is defined, This will have to be updated to have docs installed by default.
Modified Files:
--------------
slony1-engine:
Makefile.global.in (r1.25 -> r1.26)
configure (r1.44 -> r1.45)
configure.ac (r1.43 -> r1.44)
slony1-engine/config:
docs.m4 (r1.4 -> r1.5)
libc.m4 (r1.1 -> r1.2)
slony1-engine/doc:
Makefile (r1.10 -> r1.11)
slony1-engine/doc/adminguide:
Makefile (r1.9 -> r1.10)
bookindex.sgml (r1.3 -> r1.4)
slony1-engine/doc/concept:
Makefile (r1.6 -> r1.7)
slony1-engine/doc/howto:
Makefile (r1.6 -> r1.7)
slony1-engine/doc/implementation:
Makefile (r1.5 -> r1.6)
slony1-engine/makefiles:
Makefile.win (r1.4 -> r1.5)
slony1-engine/src/slon:
slon.c (r1.47 -> r1.48)
-------------- next part --------------
Index: Makefile.global.in
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/Makefile.global.in,v
retrieving revision 1.25
retrieving revision 1.26
diff -LMakefile.global.in -LMakefile.global.in -u -w -r1.25 -r1.26
--- Makefile.global.in
+++ Makefile.global.in
@@ -60,6 +60,7 @@
DOCBOOKSTYLE= @DOCBOOKSTYLE@
COLLATEINDEX= @COLLATEINDEX@
PGAUTODOC= @PGAUTODOC@
+DOCDIR= @DOCDIR@
# support programs
PERL= @PERL@
Index: configure.ac
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/configure.ac,v
retrieving revision 1.43
retrieving revision 1.44
diff -Lconfigure.ac -Lconfigure.ac -u -w -r1.43 -r1.44
--- configure.ac
+++ configure.ac
@@ -35,7 +35,9 @@
freebsd*) template=freebsd ;;
hpux*) template=hpux ;;
irix*) template=irix ;;
- linux*) template=linux ;;
+ linux*|gnu*|k*bsd*-gnu)
+ template=linux ;;
+ mingw*) template=win ;;
netbsd*) template=netbsd ;;
nextstep*) template=nextstep ;;
openbsd*) template=openbsd ;;
@@ -53,6 +55,18 @@
ultrix*) template=ultrix4 ;;
esac
+if test x"$template" = x"" ; then
+ AC_MSG_ERROR([[
+*******************************************************************
+Slony-I has apparently not been ported to your platform yet.
+To try a manual configuration, look into the src/template directory
+for a similar platform and use the '--with-template=' option.
+
+Please also contact to see about
+rectifying this. Include the above 'checking host system type...'
+line.
+*******************************************************************]])
+fi
AC_MSG_RESULT([$template])
# Checks for programs.
@@ -116,6 +130,7 @@
AC_ARG_WITH(pgsharedir, [ --with-pgsharedir= Location of the PostgreSQL share dir. E.g. postgresql.conf.sample ])
AC_ARG_WITH(netsnmp, [ --with-netsnmp= Enable snmp support is the location of net-snmp-config. ])
AC_ARG_WITH(perltools, [ --with-perltools= Location to install the perl management tools Default $PREFIX/bin. ])
+AC_ARG_WITH(docdir, [ --with-docdir= Location to install all the documentation Default is $PREFIX/doc. ])
#Our current path
SLONYPATH=`pwd`
@@ -125,6 +140,7 @@
ACX_LIBPQ()
ACX_LIBSNMP()
ACX_SLONYTOOLS()
+ACX_SLONYDOCS()
AC_SUBST(PGINCLUDEDIR, $PG_INCLUDEDIR)
AC_SUBST(PGINCLUDESERVERDIR, $PG_INCLUDESERVERDIR)
@@ -136,6 +152,7 @@
AC_SUBST(NETSNMP_CFLAGS, $NETSNMP_CFLAGS)
AC_SUBST(NETSNMP_AGENTLIBS, $NETSNMP_AGENTLIBS)
AC_SUBST(TOOLSBIN, $TOOLSBIN)
+AC_SUBST(DOCDIR, $DOCDIR)
AC_SUBST(SLONYPATH)
AC_SUBST(HOST_OS,$host_os)
@@ -159,8 +176,7 @@
SLON_AC_CHECK_DOCBOOK(4.2)
SLON_AC_PATH_DOCBOOK_STYLESHEETS
SLON_AC_PATH_COLLATEINDEX
-AC_CHECK_PROGS(SGMLSPL, sgmlspl)
-
+AC_SUBST(SGMLSPL, $SGMLSPL)
AC_CONFIG_FILES([
Makefile.global GNUmakefile
Index: configure
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/configure,v
retrieving revision 1.44
retrieving revision 1.45
diff -Lconfigure -Lconfigure -u -w -r1.44 -r1.45
--- configure
+++ configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.59 for postgresql-slony1-engine HEAD_20050316.
+# Generated by GNU Autoconf 2.59 for postgresql-slony1-engine HEAD_20050323.
#
# Copyright (C) 2003 Free Software Foundation, Inc.
# This configure script is free software; the Free Software Foundation
@@ -267,8 +267,8 @@
# Identity of this package.
PACKAGE_NAME='postgresql-slony1-engine'
PACKAGE_TARNAME='postgresql-slony1-engine'
-PACKAGE_VERSION='HEAD_20050316'
-PACKAGE_STRING='postgresql-slony1-engine HEAD_20050316'
+PACKAGE_VERSION='HEAD_20050323'
+PACKAGE_STRING='postgresql-slony1-engine HEAD_20050323'
PACKAGE_BUGREPORT=''
ac_unique_file="src"
@@ -309,7 +309,7 @@
# include
#endif"
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os enable_debug CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT PERL TAR LEX YACC SED LD YFLAGS LEXFLAGS HEAD_20050316 with_gnu_ld acx_pthread_config PTHREAD_CC PTHREAD_LIBS PTHREAD_CFLAGS CPP EGREP HAVE_POSIX_SIGNALS PGINCLUDEDIR PGINCLUDESERVERDIR PGLIBDIR PGPKGLIBDIR PGSHAREDIR PGBINDIR HAVE_NETSNMP NETSNMP_CFLAGS NETSNMP_AGENTLIBS TOOLSBIN SLONYPATH HOST_OS PORTNAME GROFF PS2PDF DJPEG PNMTOPS PGAUTODOC NSGMLS JADE have_docbook DOCBOOKSTYLE COLLATEINDEX SGMLSPL LIBOBJS LTLIBOBJS'
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os enable_debug CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT PERL TAR LEX YACC SED LD YFLAGS LEXFLAGS HEAD_20050323 with_gnu_ld acx_pthread_config PTHREAD_CC PTHREAD_LIBS PTHREAD_CFLAGS CPP EGREP HAVE_POSIX_SIGNALS PGINCLUDEDIR PGINCLUDESERVERDIR PGLIBDIR PGPKGLIBDIR PGSHAREDIR PGBINDIR HAVE_NETSNMP NETSNMP_CFLAGS NETSNMP_AGENTLIBS TOOLSBIN DOCDIR SLONYPATH HOST_OS PORTNAME GROFF PS2PDF DJPEG PNMTOPS PGAUTODOC NSGMLS JADE have_docbook DOCBOOKSTYLE COLLATEINDEX SGMLSPL LIBOBJS LTLIBOBJS'
ac_subst_files=''
# Initialize some variables set by options.
@@ -782,7 +782,7 @@
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures postgresql-slony1-engine HEAD_20050316 to adapt to many kinds of systems.
+\`configure' configures postgresql-slony1-engine HEAD_20050323 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -843,7 +843,7 @@
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of postgresql-slony1-engine HEAD_20050316:";;
+ short | recursive ) echo "Configuration of postgresql-slony1-engine HEAD_20050323:";;
esac
cat <<\_ACEOF
@@ -865,6 +865,7 @@
--with-pgsharedir= Location of the PostgreSQL share dir. E.g. postgresql.conf.sample
--with-netsnmp= Enable snmp support is the location of net-snmp-config.
--with-perltools= Location to install the perl management tools Default $PREFIX/bin.
+ --with-docdir= Location to install all the documentation Default is $PREFIX/doc.
Some influential environment variables:
CC C compiler command
@@ -975,7 +976,7 @@
test -n "$ac_init_help" && exit 0
if $ac_init_version; then
cat <<\_ACEOF
-postgresql-slony1-engine configure HEAD_20050316
+postgresql-slony1-engine configure HEAD_20050323
generated by GNU Autoconf 2.59
Copyright (C) 2003 Free Software Foundation, Inc.
@@ -989,7 +990,7 @@
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by postgresql-slony1-engine $as_me HEAD_20050316, which was
+It was created by postgresql-slony1-engine $as_me HEAD_20050323, which was
generated by GNU Autoconf 2.59. Invocation command line was
$ $0 $@
@@ -1423,7 +1424,9 @@
freebsd*) template=freebsd ;;
hpux*) template=hpux ;;
irix*) template=irix ;;
- linux*) template=linux ;;
+ linux*|gnu*|k*bsd*-gnu)
+ template=linux ;;
+ mingw*) template=win ;;
netbsd*) template=netbsd ;;
nextstep*) template=nextstep ;;
openbsd*) template=openbsd ;;
@@ -1441,6 +1444,29 @@
ultrix*) template=ultrix4 ;;
esac
+if test x"$template" = x"" ; then
+ { { echo "$as_me:$LINENO: error:
+*******************************************************************
+Slony-I has apparently not been ported to your platform yet.
+To try a manual configuration, look into the src/template directory
+for a similar platform and use the '--with-template=' option.
+
+Please also contact to see about
+rectifying this. Include the above 'checking host system type...'
+line.
+*******************************************************************" >&5
+echo "$as_me: error:
+*******************************************************************
+Slony-I has apparently not been ported to your platform yet.
+To try a manual configuration, look into the src/template directory
+for a similar platform and use the '--with-template=' option.
+
+Please also contact to see about
+rectifying this. Include the above 'checking host system type...'
+line.
+*******************************************************************" >&2;}
+ { (exit 1); exit 1; }; }
+fi
echo "$as_me:$LINENO: result: $template" >&5
echo "${ECHO_T}$template" >&6
@@ -5970,12 +5996,17 @@
HAVE_POSIX_SIGNALS=$slonac_cv_func_posix_signals
else
+ if x"$template" = xwin ; then
+ echo "$as_me:$LINENO: result: \"Skipping Error on win32\"" >&5
+echo "${ECHO_T}\"Skipping Error on win32\"" >&6
+ else
echo "$as_me:$LINENO: result: \"error\"" >&5
echo "${ECHO_T}\"error\"" >&6
{ { echo "$as_me:$LINENO: error: Slony requires a POSIX compatible signal interface." >&5
echo "$as_me: error: Slony requires a POSIX compatible signal interface." >&2;}
{ (exit 1); exit 1; }; }
fi
+ fi
@@ -6037,6 +6068,12 @@
withval="$with_perltools"
fi;
+
+# Check whether --with-docdir or --without-docdir was given.
+if test "${with_docdir+set}" = set; then
+ withval="$with_docdir"
+
+fi;
#Our current path
SLONYPATH=`pwd`
@@ -9215,6 +9252,37 @@
+
+
+
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ if test -n "${with_docdir}" ; then
+ if test ${with_docdir} != "yes"; then
+ LOCATION="${with_docdir}"
+ else
+ if test ${prefix} = "NONE"; then
+ LOCATION="${ac_default_prefix}/doc"
+ else
+ LOCATION="${prefix}/doc"
+ fi
+ fi
+ fi
+ if test x"${LOCATION}" != x; then
+ DOCDIR="${LOCATION}"
+ fi
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
PGINCLUDEDIR=$PG_INCLUDEDIR
PGINCLUDESERVERDIR=$PG_INCLUDESERVERDIR
@@ -9235,6 +9303,8 @@
TOOLSBIN=$TOOLSBIN
+DOCDIR=$DOCDIR
+
HOST_OS=$host_os
@@ -9697,46 +9767,7 @@
done
fi
-for ac_prog in sgmlspl
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_SGMLSPL+set}" = set; then
- echo $ECHO_N "(cached) $ECHO_C" >&6
-else
- if test -n "$SGMLSPL"; then
- ac_cv_prog_SGMLSPL="$SGMLSPL" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_prog_SGMLSPL="$ac_prog"
- echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
-done
-
-fi
-fi
-SGMLSPL=$ac_cv_prog_SGMLSPL
-if test -n "$SGMLSPL"; then
- echo "$as_me:$LINENO: result: $SGMLSPL" >&5
-echo "${ECHO_T}$SGMLSPL" >&6
-else
- echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
-fi
-
- test -n "$SGMLSPL" && break
-done
-
+SGMLSPL=$SGMLSPL
ac_config_files="$ac_config_files Makefile.global GNUmakefile"
@@ -10105,7 +10136,7 @@
} >&5
cat >&5 <<_CSEOF
-This file was extended by postgresql-slony1-engine $as_me HEAD_20050316, which was
+This file was extended by postgresql-slony1-engine $as_me HEAD_20050323, which was
generated by GNU Autoconf 2.59. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -10165,7 +10196,7 @@
cat >>$CONFIG_STATUS <<_ACEOF
ac_cs_version="\\
-postgresql-slony1-engine config.status HEAD_20050316
+postgresql-slony1-engine config.status HEAD_20050323
configured by $0, generated by GNU Autoconf 2.59,
with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
@@ -10380,7 +10411,7 @@
s,@LD@,$LD,;t t
s,@YFLAGS@,$YFLAGS,;t t
s,@LEXFLAGS@,$LEXFLAGS,;t t
-s,@HEAD_20050316@,$HEAD_20050316,;t t
+s,@HEAD_20050323@,$HEAD_20050323,;t t
s,@with_gnu_ld@,$with_gnu_ld,;t t
s,@acx_pthread_config@,$acx_pthread_config,;t t
s,@PTHREAD_CC@,$PTHREAD_CC,;t t
@@ -10399,6 +10430,7 @@
s,@NETSNMP_CFLAGS@,$NETSNMP_CFLAGS,;t t
s,@NETSNMP_AGENTLIBS@,$NETSNMP_AGENTLIBS,;t t
s,@TOOLSBIN@,$TOOLSBIN,;t t
+s,@DOCDIR@,$DOCDIR,;t t
s,@SLONYPATH@,$SLONYPATH,;t t
s,@HOST_OS@,$HOST_OS,;t t
s,@PORTNAME@,$PORTNAME,;t t
Index: libc.m4
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/config/libc.m4,v
retrieving revision 1.1
retrieving revision 1.2
diff -Lconfig/libc.m4 -Lconfig/libc.m4 -u -w -r1.1 -r1.2
--- config/libc.m4
+++ config/libc.m4
@@ -11,8 +11,12 @@
AC_DEFINE(HAVE_POSIX_SIGNALS,, [Define to 1 if you have the POSIX signal interface.])
HAVE_POSIX_SIGNALS=$slonac_cv_func_posix_signals
else
+ if x"$template" = xwin ; then
+ AC_MSG_RESULT("Skipping Error on win32")
+ else
AC_MSG_RESULT("error")
AC_MSG_ERROR(Slony requires a POSIX compatible signal interface.)
fi
+ fi
AC_SUBST(HAVE_POSIX_SIGNALS)]
)# SLON_AC_FUNC_POSIX_SIGNALS
Index: docs.m4
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/config/docs.m4,v
retrieving revision 1.4
retrieving revision 1.5
diff -Lconfig/docs.m4 -Lconfig/docs.m4 -u -w -r1.4 -r1.5
--- config/docs.m4
+++ config/docs.m4
@@ -18,3 +18,25 @@
AC_DEFUN([SLON_AC_PROG_PGAUTODOC],
[AC_CHECK_PROGS([PGAUTODOC],[postgresql_autodoc pgautodoc])])
+
+AC_DEFUN([ACX_SLONYDOCS], [
+ AC_REQUIRE([AC_CANONICAL_HOST])
+ AC_LANG_SAVE
+ AC_LANG_C
+ if test -n "${with_docdir}" ; then
+ if test ${with_docdir} != "yes"; then
+ LOCATION="${with_docdir}"
+ else
+ if test ${prefix} = "NONE"; then
+ LOCATION="${ac_default_prefix}/doc"
+ else
+ LOCATION="${prefix}/doc"
+ fi
+ fi
+ fi
+ if test x"${LOCATION}" != x; then
+ DOCDIR="${LOCATION}"
+ fi
+ AC_LANG_RESTORE
+]) dnl ACX_SLONYDOCS
+
Index: bookindex.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/bookindex.sgml,v
retrieving revision 1.3
retrieving revision 1.4
diff -Ldoc/adminguide/bookindex.sgml -Ldoc/adminguide/bookindex.sgml -u -w -r1.3 -r1.4
--- doc/adminguide/bookindex.sgml
+++ doc/adminguide/bookindex.sgml
@@ -19,17 +19,136 @@
sequences of pages into a range.
-->
+C
+
+
+ cluster_name configuration parameter,
+ Connection settings
+
+
+
+
+ configuration
+
+ of the slon daemon,
+ Run-time Configuration
+
+
+
+
+ conn_info configuration parameter,
+ Connection settings
+
+
+
+
+D
+
+
+ desired_sync_time configuration parameter,
+ Event Tuning
+
+
+
+
+L
+
+
+ log_level configuration parameter,
+ Logging
+
+
+
+
+ log_pid configuration parameter,
+ Logging
+
+
+
+
+ log_timestamp configuration parameter,
+ Logging
+
+
+
+
+ log_timestamp_format configuration parameter,
+ Logging
+
+
+
+
+P
+
+
+ pid_file configuration parameter,
+ Logging
+
+
+
+Sslon,
- slon
+ slonslonik,
- slonik
+ slonik
+
+
+
+
+ sql_on_connection configuration parameter,
+ Connection settings
+
+
+
+
+ sync_group_maxsize configuration parameter,
+ Event Tuning
+
+
+
+
+ sync_interval configuration parameter,
+ Event Tuning
+
+
+
+
+ sync_interval_timeout configuration parameter,
+ Event Tuning
+
+
+
+
+ syslog configuration parameter,
+ Logging
+
+
+
+
+ syslog_facility configuration parameter,
+ Logging
+
+
+
+
+ syslog_ident configuration parameter,
+ Logging
+
+
+
+
+V
+
+
+ vac_frequency configuration parameter,
+ Event Tuning
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/Makefile,v
retrieving revision 1.9
retrieving revision 1.10
diff -Ldoc/adminguide/Makefile -Ldoc/adminguide/Makefile -u -w -r1.9 -r1.10
--- doc/adminguide/Makefile
+++ doc/adminguide/Makefile
@@ -40,13 +40,18 @@
DEFAULTSECTION := $(sqlmansect_dummy)
man: slony.sgml $(ALLSGML)
+ifdef NSGMLS
+ifdef SGMLSPL
+ifdef D2MSCRIPT
$(NSGMLS) $(NSGMLS_FLAGS) $< | $(SGMLSPL) $(D2MSCRIPT) --lowercase --section $(DEFAULTSECTION) --date "`date '+%Y-%m-%d'`"
# One more time, to resolve cross-references
$(NSGMLS) $(NSGMLS_FLAGS) $< | $(SGMLSPL) $(D2MSCRIPT) --lowercase --section $(DEFAULTSECTION) --date "`date '+%Y-%m-%d'`"
mkdir -p man1 man$(DEFAULTSECTION)
mv *.1 man1/
mv *.$(DEFAULTSECTION) man$(DEFAULTSECTION)/
-
+endif
+endif
+endif
##
## HTML
@@ -54,6 +59,30 @@
all: html man
+installdirs:
+ifdef DOCDIR
+ $(mkinstalldirs) $(DOCDIR)/sgml
+ $(mkinstalldirs) $(DOCDIR)/html
+ $(mkinstalldirs) $(DOCDIR)/man1
+ $(mkinstalldirs) $(DOCDIR)/man$(DEFAULTSECTION)
+endif
+
+install: all man installdirs
+ifdef DOCDIR
+ for file in $(ALLSGML) ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/sgml; \
+ done
+ for file in $(wildcard man1/*) ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/man1 ;\
+ done
+ for file in $(wildcard man$(DEFAULTSECTION)/*) ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/man$(DEFAULTSECTION) ;\
+ done
+ for file in $(wildcard *.html) stylesheet.css ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/html ;\
+ done
+endif
+
.PHONY: html
ifndef JADE
@@ -61,7 +90,7 @@
@echo "You must have jade installed to build the documentation." && exit;
else
html: slony.sgml $(ALLSGML) stylesheet.dsl
- @rm -f *.html
+ @rm -f *.html ;\
$(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -ioutput-html -t sgml $<
ifeq ($(vpath_build), yes)
@cp $(srcdir)/stylesheet.css .
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/concept/Makefile,v
retrieving revision 1.6
retrieving revision 1.7
diff -Ldoc/concept/Makefile -Ldoc/concept/Makefile -u -w -r1.6 -r1.7
--- doc/concept/Makefile
+++ doc/concept/Makefile
@@ -11,12 +11,8 @@
slony_top_builddir = ../..
include $(slony_top_builddir)/Makefile.global
-GROFF = groff
GOPTS = -U -t -p -ms -mpspic
-PS2PDF = ps2pdf
-DJPEG = djpeg
-PNMTOPS = pnmtops
# IMG_WID = 1799
# IMG_HT = 1440
IMG_WID = 900
@@ -46,7 +42,17 @@
all-txt: $(TXT)
-install installdirs: all
+installdirs:
+ifdef DOCDIR
+ $(mkinstalldirs) $(DOCDIR)/concept
+endif
+
+install: installdirs all
+ifdef DOCDIR
+ for file in $(PS) $(PDF) $(TXT) Slon_$(IMG_WID)x$(IMG_HT).jpg ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/concept ; \
+ done
+endif
clean distclean maintainer-clean:
rm -f $(PS) $(PDF) $(TXT) tmp.* Slon.eps
@@ -56,9 +62,12 @@
Slony-I-concept.txt: $(SRC_CONCEPT)
Slon.eps: Slon_$(IMG_WID)x$(IMG_HT).jpg
- djpeg $< | pnmtops -rle -noturn >$@
+ifdef DJPEG
+ $(DJPEG) $< | $(PNMTOPS) -rle -noturn >$@
+endif
%.ps: %.nr
+ifdef GROFF
@echo ".XS 1" > tmp.idx
@echo "dummy" >>tmp.idx
@echo ".XE" >>tmp.idx
@@ -73,8 +82,10 @@
$(GROFF) $(GOPTS) $< >$@ ; \
done
@rm -f tmp.*
+endif
%.txt: %.nr
+ifdef GROFF
@echo ".XS 1" > tmp.idx
@echo "dummy" >>tmp.idx
@echo ".XE" >>tmp.idx
@@ -89,9 +100,12 @@
$(GROFF) -Tlatin1 $(GOPTS) $< >$@ ; \
done
@rm -f tmp.*
+endif
%.pdf: %.ps
+ifdef PS2PDF
$(PS2PDF) $<
+endif
distdir: $(DISTFILES)
mkdir $(distdir)/$(subdir)
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/howto/Makefile,v
retrieving revision 1.6
retrieving revision 1.7
diff -Ldoc/howto/Makefile -Ldoc/howto/Makefile -u -w -r1.6 -r1.7
--- doc/howto/Makefile
+++ doc/howto/Makefile
@@ -18,35 +18,16 @@
cp $$file $(distdir)/$(subdir)/$$file ; \
done
-# Here's a somewhat fiddly way of generating documentation for the set
-# of functions and tables using Rod Taylor's postgresql_autodoc tool
-# schemadoc is actually likely to be checked into CVS, so you don't
-# _always_ want to recreate it
+all:
-# Assumptions:
-# - it's safe to create database "schemadoc" on a local database
-# - my "createlang" hides in a bit of an odd place
-# - "make clean" should really drop the database
-# - you need to manually drop the database before regenning the docs
+installdirs:
+ifdef DOCDIR
+ $(mkinstalldirs) $(DOCDIR)/howto
+endif
-BASEDIR=$(slony_top_builddir)/src/backend
-BASESQL=$(BASEDIR)/slony1_base.sql
-BASEFUNS=$(BASEDIR)/slony1_funcs.sql
-XIDSQL=localxid.sql
-# Might want to add version-specific functions, too...
-TEMPDB=schemadoc
-TEMPSCHEMA=schemadoc
-CREATELANG=$(pgbindir)/createlang # That's how it is for me...
-AUTODOC=postgresql_autodoc
-
-schemadoc.html: $(BASESQL) $(BASEFUNS) $(XIDDIR)
- $(pgbindir)/createdb $(TEMPDB) && ( \
- $(CREATELANG) plpgsql $(TEMPDB) && \
- echo "drop schema $(TEMPSCHEMA);create schema $(TEMPSCHEMA);" | $(pgbindir)/psql $(TEMPDB) && \
- cat $(XIDSQL) $(BASEFUNS) $(BASESQL) | sed -e "s/@NAMESPACE@/$(TEMPSCHEMA)/g" -e "s/@CLUSTERNAME@/$(TEMPSCHEMA)/g" | $(pgbindir)/psql $(TEMPDB) && \
- $(AUTODOC) -d $(TEMPDB) -s $(TEMPSCHEMA) -t html ;\
- @$(pgbindir)/dropdb $(TEMPDB) >/dev/null 2>&1
- ) || echo "unable to createdb $(TEMPDB)"
-
-clean:
- @$(pgbindir)/dropdb $(TEMPDB) || echo "unable to dropdb $(TEMPDB)"
+install: all installdirs
+ifdef DOCDIR
+ for file in $(wildcard *.txt) $(wildcard *.html ); do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/howto ; \
+ done
+endif
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/implementation/Makefile,v
retrieving revision 1.5
retrieving revision 1.6
diff -Ldoc/implementation/Makefile -Ldoc/implementation/Makefile -u -w -r1.5 -r1.6
--- doc/implementation/Makefile
+++ doc/implementation/Makefile
@@ -11,12 +11,8 @@
slony_top_builddir = ../..
include $(slony_top_builddir)/Makefile.global
-GROFF = groff
GOPTS = -U -t -p -ms -mpspic
-PS2PDF = ps2pdf
-DJPEG = djpeg
-PNMTOPS = pnmtops
# IMG_WID = 1799
# IMG_HT = 1440
IMG_WID = 900
@@ -44,7 +40,17 @@
all-txt: $(TXT)
-install installdirs: all
+installdirs:
+ifdef DOCDIR
+ $(mkinstalldirs) $(DOCDIR)/implementation
+endif
+
+install: all installdirs
+ifdef DOCDIR
+ for file in $(PS) $(PDF) $(TXT) Slon.eps Slon_$(IMG_WID)x$(IMG_HT).jpg ; do \
+ $(INSTALL_DATA) $$file $(DOCDIR)/implementation ; \
+ done
+endif
clean distclean maintainer-clean:
rm -f $(PS) $(PDF) $(TXT) tmp.* Slon.eps
@@ -54,9 +60,12 @@
Slony-I-implementation.txt: $(SRC_CONCEPT)
Slon.eps: Slon_$(IMG_WID)x$(IMG_HT).jpg
- djpeg $< | pnmtops -rle -noturn >$@
+ifdef DJPEG
+ $(DJPEG) $< | $(PNMTOPS) -rle -noturn >$@
+endif
%.ps: %.nr
+ifdef GROFF
@echo ".XS 1" > tmp.idx
@echo "dummy" >>tmp.idx
@echo ".XE" >>tmp.idx
@@ -71,8 +80,10 @@
$(GROFF) $(GOPTS) $< >$@ ; \
done
@rm -f tmp.*
+endif
%.txt: %.nr
+ifdef GROFF
@echo ".XS 1" > tmp.idx
@echo "dummy" >>tmp.idx
@echo ".XE" >>tmp.idx
@@ -87,9 +98,12 @@
$(GROFF) -Tlatin1 $(GOPTS) $< >$@ ; \
done
@rm -f tmp.*
+endif
%.pdf: %.ps
+ifdef PS2PDF
$(PS2PDF) $<
+endif
distdir: $(DISTFILES)
mkdir $(distdir)/$(subdir)
Index: Makefile.win
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/makefiles/Makefile.win,v
retrieving revision 1.4
retrieving revision 1.5
diff -Lmakefiles/Makefile.win -Lmakefiles/Makefile.win -u -w -r1.4 -r1.5
--- makefiles/Makefile.win
+++ makefiles/Makefile.win
@@ -14,7 +14,7 @@
AROPT = crs
DLSUFFIX = .dll
CFLAGS_SL =
-override CFLAGS += -I/usr/include/postgresql/server -DCYGWIN=1
+override CFLAGS += -DCYGWIN=1
%.dll: %.o
$(DLLTOOL) --export-all --output-def $*.def $<
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/Makefile,v
retrieving revision 1.10
retrieving revision 1.11
diff -Ldoc/Makefile -Ldoc/Makefile -u -w -r1.10 -r1.11
--- doc/Makefile
+++ doc/Makefile
@@ -11,13 +11,11 @@
slony_top_builddir = ..
include $(slony_top_builddir)/Makefile.global
-SUBDIRS = concept implementation howto adminguide support
+SUBDIRS = concept implementation howto adminguide
DISTFILES = Makefile
-all install installdirs:
-
-clean distclean maintainer-clean:
+all clean distclean maintainer-clean install installdirs:
@for subdir in $(SUBDIRS) ; do \
$(MAKE) -C $$subdir $@ ; \
done
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.47
retrieving revision 1.48
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.47 -r1.48
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -82,8 +82,10 @@
pid_t pid;
extern int optind;
extern char *optarg;
- struct sigaction act;
+#ifndef CYGWIN
+ struct sigaction act;
+#endif
InitializeConfOptions();
while ((c = getopt(argc, argv, "f:a:d:s:t:g:c:p:o:hv")) != EOF)
@@ -671,15 +673,20 @@
slon_log(SLON_DEBUG2, "slon: begin signal handler setup\n");
+#ifndef (CYGWIN)
act.sa_handler = &sighandler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_NODEFER;
if (sigaction(SIGHUP,&act,NULL) < 0)
+#else
+ if (signal(SIGHUP,sighandler) == SIG_ERR)
+#endif
{
slon_log(SLON_FATAL, "slon: SIGHUP signal handler setup failed -(%d) %s\n", errno,strerror(errno));
slon_exit(-1);
}
+
if (signal(SIGINT,sighandler) == SIG_ERR)
{
slon_log(SLON_FATAL, "slon: SIGINT signal handler setup failed -(%d) %s\n", errno,strerror(errno));
From cvsuser Wed Mar 23 23:06:53 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By darcyb: From Andreas Pflug
: In
Message-ID: <20050323230651.26A88B1C243@gborg.postgresql.org>
Log Message:
-----------
>From Andreas Pflug :
In schedule.c and slon.c, pthread_self() result is compared with a
pthread_t variable, which isn't the recommended way to check for
equality; instead pthread_equal should be used. Under win32 this fails,
because pthread_t isn't a simple value. The attached file provides a
patch for this, and should make this portable for all pthread_t platforms.
Modified Files:
--------------
slony1-engine/src/slon:
slon.c (r1.48 -> r1.49)
scheduler.c (r1.19 -> r1.20)
-------------- next part --------------
Index: scheduler.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/scheduler.c,v
retrieving revision 1.19
retrieving revision 1.20
diff -Lsrc/slon/scheduler.c -Lsrc/slon/scheduler.c -u -w -r1.19 -r1.20
--- src/slon/scheduler.c
+++ src/slon/scheduler.c
@@ -655,7 +655,7 @@
* Lock the master mutex and make sure that we are the main thread
*/
pthread_mutex_lock(&sched_master_lock);
- if (pthread_self() != sched_main_thread)
+ if (!pthread_equal(pthread_self(), sched_main_thread))
{
slon_log(SLON_FATAL, "sched_sighandler: called in non-main thread\n");
slon_abort();
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.48
retrieving revision 1.49
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.48 -r1.49
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -739,7 +739,7 @@
static void
main_sigalrmhandler(int signo)
{
- if (main_thread == pthread_self())
+ if (pthread_equal(main_thread, pthread_self()))
{
alarm(0);
slon_log(SLON_WARN, "main: shutdown timeout exiting\n");
From cvsuser Fri Mar 25 09:45:30 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By xfade: Fix make clean.
Message-ID: <20050325094527.E5EECB1C23B@gborg.postgresql.org>
Log Message:
-----------
Fix make clean. Added -f to the rm statements. This prevents an error from being raised if a file is not there.
Modified Files:
--------------
slony1-engine/tools/altperl:
Makefile (r1.8 -> r1.9)
-------------- next part --------------
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/Makefile,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ltools/altperl/Makefile -Ltools/altperl/Makefile -u -w -r1.8 -r1.9
--- tools/altperl/Makefile
+++ tools/altperl/Makefile
@@ -59,6 +59,6 @@
clean distclean maintainer-clean:
@for file in $(wildcard *.pl) $(wildcard *.pm); do \
tmpname=`echo $$file | $(SED) "s#\.p[lm]##"` ; \
- rm $$tmpname ; \
+ rm -f $$tmpname ; \
done
From cvsuser Fri Mar 25 09:49:39 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By xfade: Fix cygwin build.
Message-ID: <20050325094936.89BC7B1C23B@gborg.postgresql.org>
Log Message:
-----------
Fix cygwin build. Added a cygwin case to the phread macro. Added the CPPFLAGS to the Makefile.win. Override the CFLAGS in slon/Makefile to ensure that config.h can be found.
Modified Files:
--------------
slony1-engine/config:
acx_pthread.m4 (r1.6 -> r1.7)
slony1-engine/makefiles:
Makefile.win (r1.5 -> r1.6)
slony1-engine/src/slon:
Makefile (r1.28 -> r1.29)
-------------- next part --------------
Index: acx_pthread.m4
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/config/acx_pthread.m4,v
retrieving revision 1.6
retrieving revision 1.7
diff -Lconfig/acx_pthread.m4 -Lconfig/acx_pthread.m4 -u -w -r1.6 -r1.7
--- config/acx_pthread.m4
+++ config/acx_pthread.m4
@@ -71,6 +71,9 @@
acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags"
;;
+ *cygwin*)
+ acx_pthread_flags="-lpthread $acx_pthread_flags"
+ ;;
esac
if test x"$acx_pthread_ok" = xno; then
Index: Makefile.win
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/makefiles/Makefile.win,v
retrieving revision 1.5
retrieving revision 1.6
diff -Lmakefiles/Makefile.win -Lmakefiles/Makefile.win -u -w -r1.5 -r1.6
--- makefiles/Makefile.win
+++ makefiles/Makefile.win
@@ -2,7 +2,7 @@
LDFLAGS+= -g
DLLTOOL= dlltool
DLLWRAP= dllwrap
-DLLLIBS= -lcygipc -lcrypt -lpostgres
+DLLLIBS= -lcygipc -lcrypt -lpostgres -lpthread
BE_DLLLIBS=-lpostgres
MK_NO_LORDER=true
MAKE_DLL=true
@@ -21,6 +21,6 @@
$(DLLWRAP) -o $@ --def $*.def $< $(DLLLIBS)
rm -f $*.def
%.o: %.c
- $(CC) -c $(CFLAGS) -o $@ $<
+ $(CC) -c $(CFLAGS) $(CPPFLAGS) -o $@ $<
sqlmansect = 7
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/Makefile,v
retrieving revision 1.28
retrieving revision 1.29
diff -Lsrc/slon/Makefile -Lsrc/slon/Makefile -u -w -r1.28 -r1.29
--- src/slon/Makefile
+++ src/slon/Makefile
@@ -13,7 +13,7 @@
CC = $(PTHREAD_CC)
-CFLAGS+= $(PTHREAD_CFLAGS) -I$(slony_top_builddir) -I$(pgincludedir)
+override CFLAGS += $(PTHREAD_CFLAGS) -I$(slony_top_builddir) -I$(pgincludedir)
LDFLAGS+= $(rpath) $(PTHREAD_LIBS) -L$(pglibdir) -lpq
From cvsuser Fri Mar 25 13:30:57 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By xfade: Fix cywin macro.
Message-ID: <20050325133053.20BAFB1C674@gborg.postgresql.org>
Log Message:
-----------
Fix cywin macro.
Modified Files:
--------------
slony1-engine/src/slon:
slon.c (r1.49 -> r1.50)
-------------- next part --------------
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.49
retrieving revision 1.50
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.49 -r1.50
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -673,7 +673,7 @@
slon_log(SLON_DEBUG2, "slon: begin signal handler setup\n");
-#ifndef (CYGWIN)
+#ifndef CYGWIN
act.sa_handler = &sighandler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_NODEFER;
From cvsuser Tue Mar 29 17:24:52 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: If desired sync time is zero,
then eliminate evaluation of
Message-ID: <20050329162451.3F6ADB1C733@gborg.postgresql.org>
Log Message:
-----------
If desired sync time is zero, then eliminate evaluation of the
"desired sync time" logic.
Modified Files:
--------------
slony1-engine/src/slon:
remote_worker.c (r1.78 -> r1.79)
-------------- next part --------------
Index: remote_worker.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/remote_worker.c,v
retrieving revision 1.78
retrieving revision 1.79
diff -Lsrc/slon/remote_worker.c -Lsrc/slon/remote_worker.c -u -w -r1.78 -r1.79
--- src/slon/remote_worker.c
+++ src/slon/remote_worker.c
@@ -215,7 +215,7 @@
int last_sync_group_size;
int next_sync_group_size;
-int desired_sync_time = 60000;
+int desired_sync_time;
int ideal_sync ;
struct timeval sync_start;
struct timeval sync_end;
@@ -476,14 +476,18 @@
* Estimate an "ideal" number of syncs based on how long
* they took last time
*/
+ if (desired_sync_time != 0) {
ideal_sync = (last_sync_group_size * desired_sync_time) / last_sync_length;
+ } else {
+ ideal_sync = sync_group_maxsize;
+ }
max_sync = ((last_sync_group_size * 110) / 100) + 1;
next_sync_group_size = ideal_sync;
if (next_sync_group_size > max_sync)
next_sync_group_size = max_sync;
if (next_sync_group_size < 1)
next_sync_group_size = 1;
- slon_log(SLON_DEBUG2, "calc sync size - last time: %d last length: %d ideal: %d proposed size: %d\n",
+ slon_log(SLON_DEBUG3, "calc sync size - last time: %d last length: %d ideal: %d proposed size: %d\n",
last_sync_group_size, last_sync_length, ideal_sync, next_sync_group_size);
}
From cvsuser Tue Mar 29 17:28:34 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Two changes to options: 1.
Message-ID: <20050329162832.5910EB1C733@gborg.postgresql.org>
Log Message:
-----------
Two changes to options:
1. Maximum SYNC grouping changed to 10000
Rod Taylor reports a need for Big Grouping for cases where replication
falls WAY behind after copying Very Large replication sets. Query plans
can 'head pathological' such that things revert to Seq Scans, and if
you're doing a huge Seq Scan, you might as well process a bunch of groups
in view of the up-front cost of the Seq Scan
2. Maximum desired_sync_time increased to 6000000
To support the above, potentially...
Modified Files:
--------------
slony1-engine/src/slon:
confoptions.h (r1.17 -> r1.18)
-------------- next part --------------
Index: confoptions.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/confoptions.h,v
retrieving revision 1.17
retrieving revision 1.18
diff -Lsrc/slon/confoptions.h -Lsrc/slon/confoptions.h -u -w -r1.17 -r1.18
--- src/slon/confoptions.h
+++ src/slon/confoptions.h
@@ -154,7 +154,7 @@
&sync_group_maxsize,
20,
0,
- 500
+ 10000
},
{
{
@@ -167,8 +167,8 @@
},
&desired_sync_time,
60000,
- 10000,
- 600000
+ 0,
+ 6000000
},
#ifdef HAVE_SYSLOG
{
From cvsuser Tue Mar 29 17:33:57 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Changes to cleanup thread 1.
Message-ID: <20050329163354.41992B1C6F3@gborg.postgresql.org>
Log Message:
-----------
Changes to cleanup thread
1. Vacuum pg_statistic
2. Change calculation of random biases (e.g. - vac_bias and the
per-iteration random value) to be proportional to SLON_CLEANUP_SLEEP.
That way, if one modifies SLON_CLEANUP_SLEEP, the random adjustments
will neither dominate nor disappear.
3. Change log message to describe the _real_ error - being unable to
process the getMinXid() query. (It was referring to the old
approach where it was looking at pg_locks.)
Modified Files:
--------------
slony1-engine/src/slon:
cleanup_thread.c (r1.23 -> r1.24)
-------------- next part --------------
Index: cleanup_thread.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/cleanup_thread.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -Lsrc/slon/cleanup_thread.c -Lsrc/slon/cleanup_thread.c -u -w -r1.23 -r1.24
--- src/slon/cleanup_thread.c
+++ src/slon/cleanup_thread.c
@@ -43,8 +43,12 @@
"%s.sl_log_1",
"%s.sl_log_2",
"%s.sl_seqlog",
- "pg_catalog.pg_listener"};
-static char tstring[255];
+ "pg_catalog.pg_listener",
+ "pg_catalog.pg_statistic"};
+
+#define MAX_VAC_TABLE 9 /* Add to this if additional tables are added above */
+
+static char tstring[255]; /* string used to store table names for the VACUUM statements */
/*
* ---------- cleanupThread_main
@@ -75,7 +79,7 @@
/* Want the vacuum time bias to be between 0 and 100 seconds,
* hence between 0 and 100000 */
if (vac_bias == 0) {
- vac_bias = rand() % 100000;
+ vac_bias = rand() % ( SLON_CLEANUP_SLEEP * 166 );
}
slon_log(SLON_DEBUG4, "cleanupThread: bias = %d\n", vac_bias);
@@ -105,7 +109,7 @@
* slons hitting the same cluster will run into conflicts due
* to trying to vacuum pg_listener concurrently
*/
- while (sched_wait_time(conn, SCHED_WAIT_SOCK_READ, SLON_CLEANUP_SLEEP * 1000 + vac_bias + (rand() % 100000)) == SCHED_STATUS_OK)
+ while (sched_wait_time(conn, SCHED_WAIT_SOCK_READ, SLON_CLEANUP_SLEEP * 1000 + vac_bias + (rand() % (SLON_CLEANUP_SLEEP * 166))) == SCHED_STATUS_OK)
{
/*
* Call the stored procedure cleanupEvent()
@@ -210,7 +214,7 @@
*/
dstring_init(&query3);
gettimeofday(&tv_start, NULL);
- for (t=0; t < 8; t++) {
+ for (t=0; t < MAX_VAC_TABLE; t++) {
sprintf(tstring, table_list[t], rtcfg_namespace);
slon_mkquery(&query3,
"%s %s;",
@@ -260,7 +264,13 @@
pthread_exit(NULL);
}
-/* "_T1".getMinXid(); */
+/* get_earliest_xid() reads the earliest XID that is still active.
+
+ The idea is that if, between cleanupThread iterations, this XID has
+ not changed, then an old transaction is still in progress,
+ PostgreSQL is holding onto the tuples, and there is no value in
+ doing VACUUMs of the various Slony-I tables.
+*/
static unsigned long get_earliest_xid (PGconn *dbconn) {
long long xid;
@@ -271,7 +281,7 @@
slon_mkquery(&query1, "select %s.getMinXid();", rtcfg_namespace);
res = PQexec(dbconn, dstring_data(&query1));
if (PQresultStatus(res) != PGRES_TUPLES_OK) {
- slon_log(SLON_FATAL, "cleanupThread: could not read locks from pg_locks!");
+ slon_log(SLON_FATAL, "cleanupThread: could not getMinXid()!\n");
PQclear(res);
slon_abort();
return -1;
From cvsuser Wed Mar 30 16:24:17 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By xfade: First step of implementing
slon_quote_ident.
Message-ID: <20050330152413.F28E7B1C812@gborg.postgresql.org>
Log Message:
-----------
First step of implementing slon_quote_ident. This adds code taken from PostgreSQL -HEAD. Our function now quotes all identifiers known by your version of PostgreSQL.
Modified Files:
--------------
slony1-engine/src/backend:
slony1_funcs.c (r1.28 -> r1.29)
-------------- next part --------------
Index: slony1_funcs.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/backend/slony1_funcs.c,v
retrieving revision 1.28
retrieving revision 1.29
diff -Lsrc/backend/slony1_funcs.c -Lsrc/backend/slony1_funcs.c -u -w -r1.28 -r1.29
--- src/backend/slony1_funcs.c
+++ src/backend/slony1_funcs.c
@@ -3,7 +3,7 @@
*
* The C functions and triggers portion of Slony-I.
*
- * Copyright (c) 2003-2004, PostgreSQL Global Development Group
+ * Copyright (c) 2003-2005, PostgreSQL Global Development Group
* Author: Jan Wieck, Afilias USA INC.
*
* $Id$
@@ -46,6 +46,9 @@
PG_FUNCTION_INFO_V1(_Slony_I_terminateNodeConnections);
PG_FUNCTION_INFO_V1(_Slony_I_cleanupListener);
+PG_FUNCTION_INFO_V1(_slon_quote_ident);
+
+
Datum _Slony_I_createEvent(PG_FUNCTION_ARGS);
Datum _Slony_I_getLocalNodeId(PG_FUNCTION_ARGS);
Datum _Slony_I_getModuleVersion(PG_FUNCTION_ARGS);
@@ -58,6 +61,9 @@
Datum _Slony_I_terminateNodeConnections(PG_FUNCTION_ARGS);
Datum _Slony_I_cleanupListener(PG_FUNCTION_ARGS);
+Datum _slon_quote_ident(PG_FUNCTION_ARGS);
+
+
#ifdef CYGWIN
extern DLLIMPORT Node *newNodeMacroHolder;
#endif
@@ -110,6 +116,7 @@
static Slony_I_ClusterStatus *
getClusterStatus(Name cluster_name,
int need_plan_mask);
+const char * slon_quote_identifier(const char *ident);
static char *slon_quote_literal(char *str);
@@ -501,7 +508,7 @@
if ((col_value[i] = SPI_getvalue(new_row, tupdesc, i + 1)) == NULL)
continue;
- col_ident = (char *)quote_identifier(SPI_fname(tupdesc, i + 1));
+ col_ident = (char *)slon_quote_identifier(SPI_fname(tupdesc, i + 1));
col_value[i] = slon_quote_literal(col_value[i]);
cmddata_need = (cp - (char *)(cs->cmddata_buf)) + 16 +
@@ -688,7 +695,7 @@
else
need_comma = true;
- col_ident = (char *)quote_identifier(SPI_fname(tupdesc, i + 1));
+ col_ident = (char *)slon_quote_identifier(SPI_fname(tupdesc, i + 1));
if (new_isnull)
col_value = "NULL";
else
@@ -732,7 +739,7 @@
if (attkind[attkind_idx] == 'k')
break;
}
- col_ident = (char *)quote_identifier(SPI_fname(tupdesc, i + 1));
+ col_ident = (char *)slon_quote_identifier(SPI_fname(tupdesc, i + 1));
col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1));
cmddata_need = (cp - (char *)(cs->cmddata_buf)) + 16 +
@@ -774,7 +781,7 @@
attkind_idx++;
if (attkind[attkind_idx] != 'k')
continue;
- col_ident = (char *)quote_identifier(SPI_fname(tupdesc, i + 1));
+ col_ident = (char *)slon_quote_identifier(SPI_fname(tupdesc, i + 1));
col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1));
cmddata_need = (cp - (char *)(cs->cmddata_buf)) + 16 +
@@ -838,7 +845,7 @@
attkind_idx++;
if (attkind[attkind_idx] != 'k')
continue;
- col_ident = (char *)quote_identifier(SPI_fname(tupdesc, i + 1));
+ col_ident = (char *)slon_quote_identifier(SPI_fname(tupdesc, i + 1));
col_value = slon_quote_literal(SPI_getvalue(old_row, tupdesc, i + 1));
cmddata_need = (cp - (char *)(cs->cmddata_buf)) + 16 +
@@ -1097,7 +1104,7 @@
cp2 = result;
*cp2++ = '\'';
- while (len > 0)
+ while (len-- > 0)
{
if ((wl = pg_mblen(cp1)) != 1)
{
@@ -1113,8 +1120,6 @@
if (*cp1 == '\\')
*cp2++ = '\\';
*cp2++ = *cp1++;
-
- len--;
}
*cp2++ = '\'';
@@ -1124,6 +1129,124 @@
}
+/*
+ * slon_quote_identifier - Quote an identifier only if needed
+ *
+ * When quotes are needed, we palloc the required space; slightly
+ * space-wasteful but well worth it for notational simplicity.
+ *
+ * Version: pgsql/src/backend/utils/adt/ruleutils.c,v 1.188 2005/01/13 17:19:10
+ */
+const char *
+slon_quote_identifier(const char *ident)
+{
+ /*
+ * Can avoid quoting if ident starts with a lowercase letter or
+ * underscore and contains only lowercase letters, digits, and
+ * underscores, *and* is not any SQL keyword. Otherwise, supply
+ * quotes.
+ */
+ int nquotes = 0;
+ bool safe;
+ const char *ptr;
+ char *result;
+ char *optr;
+
+ /*
+ * would like to use macros here, but they might yield
+ * unwanted locale-specific results...
+ */
+ safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_');
+
+ for (ptr = ident; *ptr; ptr++)
+ {
+ char ch = *ptr;
+
+ if ((ch >= 'a' && ch <= 'z') ||
+ (ch >= '0' && ch <= '9') ||
+ (ch == '_'))
+ {
+ /* okay */
+ }
+ else
+ {
+ safe = false;
+ if (ch == '"')
+ nquotes++;
+ }
+ }
+
+ if (safe)
+ {
+ /*
+ * Check for keyword. This test is overly strong, since many of
+ * the "keywords" known to the parser are usable as column names,
+ * but the parser doesn't provide any easy way to test for whether
+ * an identifier is safe or not... so be safe not sorry.
+ *
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but
+ * that's fine, since we already know we have all-lower-case.
+ */
+ if (ScanKeywordLookup(ident) != NULL)
+ safe = false;
+ }
+
+ if (safe)
+ return ident; /* no change needed */
+
+ result = (char *) palloc(strlen(ident) + nquotes + 2 + 1);
+
+ optr = result;
+ *optr++ = '"';
+ for (ptr = ident; *ptr; ptr++)
+ {
+ char ch = *ptr;
+
+ if (ch == '"')
+ *optr++ = '"';
+ *optr++ = ch;
+ }
+ *optr++ = '"';
+ *optr = '\0';
+
+ return result;
+}
+
+
+
+/*
+ * _slon_quote_ident -
+ * returns a properly quoted identifier
+ *
+ * Version: pgsql/src/backend/utils/adt/quote.c,v 1.14.4.1 2005/03/21 16:29:31
+ */
+Datum
+_slon_quote_ident(PG_FUNCTION_ARGS)
+{
+ text *t = PG_GETARG_TEXT_P(0);
+ text *result;
+ const char *qstr;
+ char *str;
+ int len;
+
+ /* We have to convert to a C string to use quote_identifier */
+ len = VARSIZE(t) - VARHDRSZ;
+ str = (char *) palloc(len + 1);
+ memcpy(str, VARDATA(t), len);
+ str[len] = '\0';
+
+ qstr = slon_quote_identifier(str);
+
+ len = strlen(qstr);
+ result = (text *) palloc(len + VARHDRSZ);
+ VARATT_SIZEP(result) = len + VARHDRSZ;
+ memcpy(VARDATA(result), qstr, len);
+
+ PG_RETURN_TEXT_P(result);
+}
+
+
+
static Slony_I_ClusterStatus *
getClusterStatus(Name cluster_name, int need_plan_mask)
{
From cvsuser Wed Mar 30 16:32:04 2005
From: cvsuser (CVS User Account)
Date: Tue Feb 13 08:58:16 2007
Subject: [Slony1-commit] By cbbrowne: Add a link to a Russian HOWTO document
for Slony-I.
Message-ID: <20050330153202.6BC7EB1CA74@gborg.postgresql.org>
Log Message:
-----------
Add a link to a Russian HOWTO document for Slony-I.
Modified Files:
--------------
slony1-engine/doc/adminguide:
help.sgml (r1.13 -> r1.14)
-------------- next part --------------
Index: help.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/help.sgml,v
retrieving revision 1.13
retrieving revision 1.14
diff -Ldoc/adminguide/help.sgml -Ldoc/adminguide/help.sgml -u -w -r1.13 -r1.14
--- doc/adminguide/help.sgml
+++ doc/adminguide/help.sgml
@@ -40,6 +40,11 @@
KirovOpenSourceCommunity: Slony may be the place to
go.
+ A Russian Setup /
+Example / HOWTO is available for Russian readers.
+
+
pgpool