Merge lp:~percona-toolkit-dev/percona-toolkit/test-pt-osc-master-to-cluster into lp:percona-toolkit/2.2
- test-pt-osc-master-to-cluster
- Merge into 2.2
Proposed by
Daniel Nichter
Status: | Merged |
---|---|
Merged at revision: | 548 |
Proposed branch: | lp:~percona-toolkit-dev/percona-toolkit/test-pt-osc-master-to-cluster |
Merge into: | lp:percona-toolkit/2.2 |
Diff against target: |
526 lines (+182/-67) 9 files modified
bin/pt-heartbeat (+1/-1) lib/Sandbox.pm (+1/-1) sandbox/jenkins-test (+24/-6) sandbox/start-sandbox (+10/-3) sandbox/test-env (+5/-4) t/pt-heartbeat/pxc.t (+40/-31) t/pt-online-schema-change/pxc.t (+51/-1) t/pt-table-checksum/pxc.t (+35/-17) util/make-barebones (+15/-3) |
To merge this branch: | bzr merge lp:~percona-toolkit-dev/percona-toolkit/test-pt-osc-master-to-cluster |
Related bugs: | |
Related blueprints: |
PXC-certified Tools
(Essential)
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Daniel Nichter | Approve | ||
Review via email: mp+152515@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'bin/pt-heartbeat' |
2 | --- bin/pt-heartbeat 2013-03-04 17:28:43 +0000 |
3 | +++ bin/pt-heartbeat 2013-03-08 21:04:28 +0000 |
4 | @@ -5395,7 +5395,7 @@ |
5 | |
6 | =head1 SYNOPSIS |
7 | |
8 | -Usage: pt-heartbeat [OPTION...] [DSN] --update|--monitor|--check|--stop |
9 | +Usage: pt-heartbeat [OPTIONS] [DSN] --update|--monitor|--check|--stop |
10 | |
11 | pt-heartbeat measures replication lag on a MySQL or PostgreSQL server. You can |
12 | use it to update a master or monitor a replica. If possible, MySQL connection |
13 | |
14 | === modified file 'lib/Sandbox.pm' |
15 | --- lib/Sandbox.pm 2013-02-19 20:01:58 +0000 |
16 | +++ lib/Sandbox.pm 2013-03-08 21:04:28 +0000 |
17 | @@ -341,7 +341,7 @@ |
18 | sub { |
19 | my ($pong) = $slave2_dbh->selectrow_array( |
20 | "SELECT ping FROM percona_test.sentinel WHERE id=1 /* wait_for_slaves */"); |
21 | - return $ping eq $pong; |
22 | + return $ping eq ($pong || ''); |
23 | }, undef, 300 |
24 | ); |
25 | } |
26 | |
27 | === modified file 'sandbox/jenkins-test' |
28 | --- sandbox/jenkins-test 2013-02-19 20:01:58 +0000 |
29 | +++ sandbox/jenkins-test 2013-03-08 21:04:28 +0000 |
30 | @@ -41,11 +41,13 @@ |
31 | ARCH="32" |
32 | fi |
33 | |
34 | +APP="${FORK:-"mysql"}" |
35 | + |
36 | MYSQL_BIN_DIR="$HOME/mysql-bin" |
37 | [ -d "$MYSQL_BIN_DIR" ] || mkdir "$MYSQL_BIN_DIR" |
38 | |
39 | find_mysql_base_dir() { |
40 | - find "$MYSQL_BIN_DIR" -name "mysql-$1*" -type d | tail -n 1 |
41 | + find "$MYSQL_BIN_DIR" -name "$APP-$1*" -type d | tail -n 1 |
42 | } |
43 | |
44 | MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)" |
45 | @@ -59,14 +61,25 @@ |
46 | if [ -z "$MYSQL_BASE_DIR" ]; then |
47 | ( |
48 | cd $MYSQL_BIN_DIR |
49 | - wget -q -O mysql.tar.gz http://hackmysql.com/barebones/mysql/$MYSQL/$ARCH |
50 | + wget -q -O mysql.tar.gz http://hackmysql.com/barebones/$APP/$MYSQL/$ARCH \ |
51 | + || exit 1 |
52 | tar xvfz mysql.tar.gz |
53 | rm mysql.tar.gz |
54 | ) |
55 | MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)" |
56 | fi |
57 | |
58 | -if [ -z "$("$MYSQL_BASE_DIR/bin/mysqld" -V)" ]; then |
59 | +if [ $APP = "mysql" ]; then |
60 | + mysqld_check="$("$MYSQL_BASE_DIR/bin/mysqld" -V)" |
61 | +elif [ $APP = "pxc" ]; then |
62 | + ip="$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public')" |
63 | + mysqld_check="$("$MYSQL_BASE_DIR/bin/mysqld" -V --bind-address $ip)" |
64 | +else |
65 | + echo "Invalid FORK=$APP" >&2 |
66 | + exit 1 |
67 | +fi |
68 | + |
69 | +if [ -z "$mysqld_check" ]; then |
70 | echo "$MYSQL_BASE_DIR/bin/mysqld does not execute" >&2 |
71 | exit 1 |
72 | fi |
73 | @@ -95,7 +108,14 @@ |
74 | sandbox/test-env checkconfig || exit 1 |
75 | sandbox/test-env stop || exit 1 |
76 | sandbox/test-env kill || exit 1 |
77 | -sandbox/test-env start || exit 1 |
78 | +if [ $APP = "mysql" ]; then |
79 | + sandbox/test-env start || exit 1 |
80 | +elif [ $APP = "pxc" ]; then |
81 | + sandbox/test-env start cluster || exit 1 |
82 | +else |
83 | + echo "Invalid FORK=$app" >&2 |
84 | + exit 1 |
85 | +fi |
86 | |
87 | ####################### |
88 | # Set debug env vars. # |
89 | @@ -122,8 +142,6 @@ |
90 | ############# |
91 | # Clean up. # |
92 | ############# |
93 | -set +x |
94 | sandbox/test-env stop |
95 | -set -x |
96 | |
97 | exit $EXIT_STATUS |
98 | |
99 | === modified file 'sandbox/start-sandbox' |
100 | --- sandbox/start-sandbox 2013-01-30 16:00:03 +0000 |
101 | +++ sandbox/start-sandbox 2013-03-08 21:04:28 +0000 |
102 | @@ -50,7 +50,6 @@ |
103 | if [ "${type}" = "cluster" ]; then |
104 | cp $PERCONA_TOOLKIT_BRANCH/sandbox/servers/pxc/$version/my.sandbox.cnf /tmp/$port |
105 | |
106 | - local ip="$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public')" |
107 | local libgalera="$PERCONA_TOOLKIT_SANDBOX/lib/libgalera_smm.so" |
108 | local cluster_name="${CLUSTER_NAME:-"pt_sandbox_cluster"}" |
109 | local cluster_address="gcomm://" |
110 | @@ -143,7 +142,6 @@ |
111 | |
112 | /tmp/$port/use -e "CREATE DATABASE IF NOT EXISTS percona_test"; |
113 | /tmp/$port/use -e "CREATE TABLE IF NOT EXISTS percona_test.sentinel (id INT PRIMARY KEY, ping VARCHAR(64) NOT NULL DEFAULT '')"; |
114 | - /tmp/$port/use -e "REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '')"; |
115 | |
116 | if [ -n "${MYSQL_UPGRADE:-""}" ]; then |
117 | mysql_upgrade_on /tmp/$port/my.sandbox.cnf |
118 | @@ -225,7 +223,16 @@ |
119 | else |
120 | die "Cannot find executable mysqld in $PERCONA_TOOLKIT_SANDBOX/bin, $PERCONA_TOOLKIT_SANDBOX/sbin or $PERCONA_TOOLKIT_SANDBOX/libexec." |
121 | fi |
122 | -version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`; |
123 | + |
124 | +APP="${FORK:-"mysql"}" |
125 | + |
126 | +if [ $type = "cluster" -o $APP = "pxc" ]; then |
127 | + ip=$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public') |
128 | + version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V --bind-address $ip 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`; |
129 | +else |
130 | + version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`; |
131 | +fi |
132 | + |
133 | if [ ! -d "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version" ]; then |
134 | die "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version does not exist." |
135 | fi |
136 | |
137 | === modified file 'sandbox/test-env' |
138 | --- sandbox/test-env 2013-02-21 22:59:49 +0000 |
139 | +++ sandbox/test-env 2013-03-08 21:04:28 +0000 |
140 | @@ -334,10 +334,10 @@ |
141 | ../util/check-load-data |
142 | |
143 | ping=$(/tmp/12345/use -ss -e "SELECT MD5(RAND())") |
144 | - /tmp/12345/use -e "UPDATE percona_test.sentinel SET ping='$ping' WHERE id=1"; |
145 | + /tmp/12345/use -e "REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '$ping')"; |
146 | echo -n "Waiting for replication to finish..." |
147 | for i in $(_seq 60); do |
148 | - pong=$(/tmp/12347/use -ss -e 'SELECT ping FROM percona_test.sentinel WHERE id=1' 2>/dev/null) |
149 | + pong=$(/tmp/12347/use -ss -e "SELECT ping FROM percona_test.sentinel WHERE id=1 AND ping='$ping'" 2>/dev/null) |
150 | [ "$ping" = "$pong" ] && break |
151 | echo -n '.' |
152 | sleep 1 |
153 | @@ -379,8 +379,9 @@ |
154 | done |
155 | ;; |
156 | restart) |
157 | - $0 stop |
158 | - $0 start |
159 | + shift; |
160 | + $0 stop "$@" |
161 | + $0 start "$@" |
162 | ;; |
163 | status) |
164 | sandbox_status 'master' '12345' |
165 | |
166 | === modified file 't/pt-heartbeat/pxc.t' |
167 | --- t/pt-heartbeat/pxc.t 2012-12-14 01:38:40 +0000 |
168 | +++ t/pt-heartbeat/pxc.t 2013-03-08 21:04:28 +0000 |
169 | @@ -81,7 +81,7 @@ |
170 | exit 1; |
171 | } |
172 | push @exec_pids, $pid; |
173 | - |
174 | + |
175 | PerconaTest::wait_for_files($pidfile); |
176 | ok( |
177 | -f $pidfile, |
178 | @@ -115,11 +115,13 @@ |
179 | "Sanity check: All nodes are in the heartbeat table" |
180 | ); |
181 | |
182 | +# These values may be 0 or '' depending on whether or not a previous test |
183 | +# turned 12345 into a slave or not. For this purpose 0 == undef == ''. |
184 | my $only_slave_data = { |
185 | map { |
186 | $_ => { |
187 | - relay_master_log_file => $rows->{$_}->{relay_master_log_file}, |
188 | - exec_master_log_pos => $rows->{$_}->{exec_master_log_pos}, |
189 | + relay_master_log_file => $rows->{$_}->{relay_master_log_file} || undef, |
190 | + exec_master_log_pos => $rows->{$_}->{exec_master_log_pos} || undef, |
191 | } } keys %$rows |
192 | }; |
193 | |
194 | @@ -132,7 +134,7 @@ |
195 | 12347 => $same_data, |
196 | }, |
197 | "Sanity check: No slave data (relay log or master pos) is stored" |
198 | -); |
199 | +) or diag(Dumper($rows)); |
200 | |
201 | $output = output(sub{ |
202 | pt_heartbeat::main($node1_dsn, qw(-D test --check)), |
203 | @@ -235,11 +237,11 @@ |
204 | server => 'cslave1', |
205 | type => 'slave', |
206 | master => 'node1', |
207 | - env => q/BINLOG_FORMAT="ROW"/, |
208 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
209 | ); |
210 | |
211 | $sb->create_dbs($slave_dbh, ['test']); |
212 | - |
213 | +$sb->wait_for_slaves(master => 'node1', slave => 'cslave1'); |
214 | start_update_instance($sb->port_for('cslave1')); |
215 | PerconaTest::wait_for_table($slave_dbh, "test.heartbeat", "1=1"); |
216 | |
217 | @@ -292,31 +294,37 @@ |
218 | my ($master_dbh, $master_dsn) = $sb->start_sandbox( |
219 | server => 'cmaster', |
220 | type => 'master', |
221 | - env => q/BINLOG_FORMAT="ROW"/, |
222 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
223 | ); |
224 | |
225 | my $cmaster_port = $sb->port_for('cmaster'); |
226 | |
227 | $sb->create_dbs($master_dbh, ['test']); |
228 | - |
229 | +$master_dbh->do("INSERT INTO percona_test.sentinel (id, ping) VALUES (1, '')"); |
230 | $master_dbh->do("FLUSH LOGS"); |
231 | $master_dbh->do("RESET MASTER"); |
232 | |
233 | $sb->set_as_slave('node1', 'cmaster'); |
234 | +$sb->wait_for_slaves(master => 'cmaster', slave => 'node1'); |
235 | |
236 | start_update_instance($sb->port_for('cmaster')); |
237 | PerconaTest::wait_for_table($node1, "test.heartbeat", "server_id=$cmaster_port"); |
238 | |
239 | -$output = output(sub{ |
240 | - pt_heartbeat::main($node1_dsn, qw(-D test --check --print-master-server-id)), |
241 | - }, |
242 | +# Auto-detecting the master id only works when ran on node1, the direct |
243 | +# slave of the master, because other nodes aren't slaves, but this could |
244 | +# be made to work; see the node autodiscovery branch. |
245 | +$output = output( |
246 | + sub { |
247 | + pt_heartbeat::main($node1_dsn, |
248 | + qw(-D test --check --print-master-server-id) |
249 | + )}, |
250 | stderr => 1, |
251 | ); |
252 | |
253 | like( |
254 | $output, |
255 | qr/^\d.\d{2} $cmaster_port$/, |
256 | - "--print-master-id works for master -> $node1_port, when run from $node1_port" |
257 | + "Auto-detect master ID from node1" |
258 | ); |
259 | |
260 | # Wait until node2 & node3 get cmaster in their heartbeat tables |
261 | @@ -324,38 +332,39 @@ |
262 | $sb->wait_for_slaves(master => 'node1', slave => 'node3'); |
263 | |
264 | foreach my $test ( |
265 | - [ $node2_port, $node2_dsn, $node2 ], |
266 | - [ $node3_port, $node3_dsn, $node3 ], |
267 | + [ $node2_port, $node2_dsn, $node2, 'node2' ], |
268 | + [ $node3_port, $node3_dsn, $node3, 'node3' ], |
269 | ) { |
270 | - my ($port, $dsn, $dbh) = @$test; |
271 | + my ($port, $dsn, $dbh, $name) = @$test; |
272 | |
273 | - $output = output(sub{ |
274 | - pt_heartbeat::main($dsn, qw(-D test --check --print-master-server-id)), |
275 | - }, |
276 | + $output = output( |
277 | + sub { |
278 | + pt_heartbeat::main($dsn, |
279 | + qw(-D test --check --print-master-server-id) |
280 | + )}, |
281 | stderr => 1, |
282 | ); |
283 | |
284 | - # This could be made to work, see the node autodiscovery branch |
285 | - TODO: { |
286 | - local $::TODO = "cmaster -> node1, other nodes can't autodetect the master"; |
287 | - like( |
288 | - $output, |
289 | - qr/$cmaster_port/, |
290 | - "--print-master-id works for master -> $node1_port, when run from $port" |
291 | - ); |
292 | - } |
293 | + like( |
294 | + $output, |
295 | + qr/server's master could not be automatically determined/, |
296 | + "Limitation: cannot auto-detect master id from $name" |
297 | + ); |
298 | |
299 | - $output = output(sub{ |
300 | - pt_heartbeat::main($dsn, qw(-D test --check --master-server-id), $cmaster_port), |
301 | - }, |
302 | + $output = output( |
303 | + sub { |
304 | + pt_heartbeat::main($dsn, |
305 | + qw(-D test --check --master-server-id), $cmaster_port |
306 | + )}, |
307 | stderr => 1, |
308 | ); |
309 | |
310 | $output =~ s/\d\.\d{2}/0.00/g; |
311 | + |
312 | is( |
313 | $output, |
314 | "0.00\n", |
315 | - "--check + explicit --master-server-id work for master -> node1, run from $port" |
316 | + "$name --check --master-server-id $cmaster_port" |
317 | ); |
318 | } |
319 | |
320 | |
321 | === modified file 't/pt-online-schema-change/pxc.t' |
322 | --- t/pt-online-schema-change/pxc.t 2013-03-02 17:17:23 +0000 |
323 | +++ t/pt-online-schema-change/pxc.t 2013-03-08 21:04:28 +0000 |
324 | @@ -119,7 +119,7 @@ |
325 | $exit, |
326 | "wsrep_OSU_method=RSU: non-zero exit" |
327 | ) or diag($output); |
328 | -print $output; |
329 | + |
330 | like( |
331 | $output, |
332 | qr/wsrep_OSU_method=TOI is required.+?currently set to RSU/, |
333 | @@ -134,6 +134,56 @@ |
334 | ) or BAIL_OUT("Failed to restore wsrep_OSU_method=TOI"); |
335 | |
336 | # ############################################################################# |
337 | +# master -> cluster, run on master on table with foreign keys. |
338 | +# ############################################################################# |
339 | + |
340 | +# CAREFUL: The master and the cluster are different, so don't do stuff |
341 | +# on the master that will conflict with stuff already done on the cluster. |
342 | +# And since we're using RBR, we have to do a lot of stuff on the master |
343 | +# again, manually, because REPLACE and INSERT IGNORE don't work in RBR |
344 | +# like they do SBR. |
345 | + |
346 | +my ($master_dbh, $master_dsn) = $sb->start_sandbox( |
347 | + server => 'cmaster', |
348 | + type => 'master', |
349 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
350 | +); |
351 | + |
352 | +$sb->set_as_slave('node1', 'cmaster'); |
353 | + |
354 | +$sb->load_file('cmaster', "$sample/basic_with_fks.sql", undef, no_wait => 1); |
355 | + |
356 | +$master_dbh->do("SET SESSION binlog_format=STATEMENT"); |
357 | +$master_dbh->do("REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '')"); |
358 | +$sb->wait_for_slaves(master => 'cmaster', slave => 'node1'); |
359 | + |
360 | +($output, $exit) = full_output( |
361 | + sub { pt_online_schema_change::main( |
362 | + "$master_dsn,D=pt_osc,t=city", |
363 | + qw(--print --execute --alter-foreign-keys-method drop_swap), |
364 | + '--alter', 'DROP COLUMN last_update' |
365 | + )}, |
366 | + stderr => 1, |
367 | +); |
368 | + |
369 | +my $rows = $node1->selectrow_hashref("SHOW SLAVE STATUS"); |
370 | +is( |
371 | + $rows->{last_error}, |
372 | + "", |
373 | + "Alter table with foreign keys on master replicating to cluster" |
374 | +) or diag(Dumper($rows), $output); |
375 | + |
376 | +is( |
377 | + $exit, |
378 | + 0, |
379 | + "... exit 0" |
380 | +) or diag($output); |
381 | + |
382 | +$sb->stop_sandbox(qw(cmaster)); |
383 | +$node1->do("STOP SLAVE"); |
384 | +$node1->do("RESET SLAVE"); |
385 | + |
386 | +# ############################################################################# |
387 | # Done. |
388 | # ############################################################################# |
389 | $sb->wipe_clean($node1); |
390 | |
391 | === modified file 't/pt-table-checksum/pxc.t' |
392 | --- t/pt-table-checksum/pxc.t 2013-03-02 02:02:13 +0000 |
393 | +++ t/pt-table-checksum/pxc.t 2013-03-08 21:04:28 +0000 |
394 | @@ -183,7 +183,7 @@ |
395 | server => 'cslave1', |
396 | type => 'slave', |
397 | master => 'node1', |
398 | - env => q/BINLOG_FORMAT="ROW"/, |
399 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
400 | ); |
401 | |
402 | # Add the slave to the DSN table. |
403 | @@ -214,7 +214,7 @@ |
404 | |
405 | like( |
406 | $output, |
407 | - qr/replica h=127.1,P=12348 has binlog_format ROW/, |
408 | + qr/replica h=127.1,P=12348 has binlog_format ROW/i, |
409 | "--check-binlog-format warns about slave's binlog format" |
410 | ); |
411 | |
412 | @@ -251,7 +251,7 @@ |
413 | server => 'cslave1', |
414 | type => 'slave', |
415 | master => 'node2', |
416 | - env => q/BINLOG_FORMAT="ROW"/, |
417 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
418 | ); |
419 | |
420 | # Wait for the slave to apply the binlogs from node2 (its master). |
421 | @@ -291,28 +291,46 @@ |
422 | # master -> node1 in cluster, run on master |
423 | # ############################################################################# |
424 | |
425 | +# CAREFUL: The master and the cluster are different, so don't do stuff |
426 | +# on the master that will conflict with stuff already done on the cluster. |
427 | +# And since we're using RBR, we have to do a lot of stuff on the master |
428 | +# again, manually, because REPLACE and INSERT IGNORE don't work in RBR |
429 | +# like they do SBR. |
430 | + |
431 | my ($master_dbh, $master_dsn) = $sb->start_sandbox( |
432 | server => 'cmaster', |
433 | type => 'master', |
434 | - env => q/BINLOG_FORMAT="ROW"/, |
435 | + env => q/FORK="pxc" BINLOG_FORMAT="ROW"/, |
436 | ); |
437 | |
438 | -# CAREFUL: The master and the cluster are different, so we must load dbs on |
439 | -# the master then flush the logs, else node1 will apply the master's binlogs |
440 | -# and blow up because it already had these dbs. |
441 | - |
442 | -# Remember: this DSN table only has node2 and node3 (12346 and 12347) which is |
443 | -# sufficient for this test. |
444 | -$sb->load_file('cmaster', "$sample/dsn-table.sql"); |
445 | +# Since master is new, node1 shouldn't have binlog to replay. |
446 | +$sb->set_as_slave('node1', 'cmaster'); |
447 | |
448 | # We have to load a-z-cluster.sql else the pk id won'ts match because nodes use |
449 | # auto-inc offsets but the master doesn't. |
450 | -$sb->load_file('cmaster', "$sample/a-z-cluster.sql"); |
451 | - |
452 | -$master_dbh->do("FLUSH LOGS"); |
453 | -$master_dbh->do("RESET MASTER"); |
454 | - |
455 | -$sb->set_as_slave('node1', 'cmaster'); |
456 | +$sb->load_file('cmaster', "$sample/a-z-cluster.sql", undef, no_wait => 1); |
457 | + |
458 | +# Do this stuff manually and only on the master because node1/the cluster |
459 | +# already has it, and due to RBR, we can't do it other ways. |
460 | +$master_dbh->do("SET sql_log_bin=0"); |
461 | + |
462 | +# This DSN table does not include 12345 (node1/slave) intentionally, |
463 | +# so a later test can auto-find 12345 then warn "Diffs will only be |
464 | +# detected if the cluster is consistent with h=127.1,P=12345...". |
465 | +$master_dbh->do("CREATE DATABASE dsns"); |
466 | +$master_dbh->do("CREATE TABLE dsns.dsns ( |
467 | + id int auto_increment primary key, |
468 | + parent_id int default null, |
469 | + dsn varchar(255) not null |
470 | +)"); |
471 | +$master_dbh->do("INSERT INTO dsns.dsns VALUES |
472 | + (2, 1, 'h=127.1,P=12346,u=msandbox,p=msandbox'), |
473 | + (3, 2, 'h=127.1,P=12347,u=msandbox,p=msandbox')"); |
474 | + |
475 | +$master_dbh->do("INSERT INTO percona_test.sentinel (id, ping) VALUES (1, '')"); |
476 | +$master_dbh->do("SET sql_log_bin=1"); |
477 | + |
478 | +$sb->wait_for_slaves(master => 'cmaster', slave => 'node1'); |
479 | |
480 | # Notice: no --recursion-method=dsn yet. Since node1 is a traditional slave |
481 | # of the master, ptc should auto-detect it, which we'll test later by making |
482 | |
483 | === modified file 'util/make-barebones' |
484 | --- util/make-barebones 2012-10-16 15:58:29 +0000 |
485 | +++ util/make-barebones 2013-03-08 21:04:28 +0000 |
486 | @@ -8,9 +8,18 @@ |
487 | set -x |
488 | |
489 | tarball="$1" |
490 | -version=$(echo $tarball | awk -F'-' '{print $2}') |
491 | full_dir=${tarball%".tar.gz"} |
492 | |
493 | +APP="${FORK:-"mysql"}" |
494 | +if [ $APP = "mysql" ]; then |
495 | + version=$(echo $tarball | awk -F'-' '{print $2}') |
496 | +elif [ $APP = "pxc" ]; then |
497 | + version=$(echo $tarball | awk -F'-' '{print $4}') |
498 | +else |
499 | + echo "Invalid FORK=$APP" >&2 |
500 | + exit 1 |
501 | +fi |
502 | + |
503 | tar xvfz "$tarball" \ |
504 | --wildcards \ |
505 | "$full_dir/COPYING" \ |
506 | @@ -29,7 +38,10 @@ |
507 | "$full_dir/bin/mysqldump" \ |
508 | "$full_dir/bin/mysqld" \ |
509 | "$full_dir/bin/mysqld_safe" \ |
510 | - "$full_dir/bin/safe_mysqld" |
511 | + "$full_dir/bin/safe_mysqld" \ |
512 | + "$full_dir/lib/libgalera_smm.so" \ |
513 | + "$full_dir/bin/clustercheck" \ |
514 | + "$full_dir/bin/wsrep*" |
515 | |
516 | echo "This tarball was created from $tarball. It contains only the files necessary for creating a Percona Toolkit sandbox test server." > $full_dir/README.barebones |
517 | |
518 | @@ -40,7 +52,7 @@ |
519 | arch="i386" |
520 | fi |
521 | |
522 | -bare_dir="mysql-$version-$arch-barebones" |
523 | +bare_dir="$APP-$version-$arch-barebones" |
524 | mv $full_dir $bare_dir |
525 | tar cvfz $bare_dir.tar.gz $bare_dir |
526 | rm -rf $bare_dir |
21:04:23 + attest --verbose t/pt-online- schema- change/ pxc.t t/pt-archiver/pxc.t t/pt-table- checksum/ pxc.t t/pt-heartbeat/ pxc.t schema- change/ pxc.t pass checksum/ pxc.t pass /refute. testnoir. com/percona- toolkit/ jobs/percona- toolkit- pxc-5.5/ 14/results
21:04:24 pt-online-
21:04:37 pt-archiver/pxc.t pass
21:04:50 pt-table-
21:06:50 pt-heartbeat/pxc.t pass
21:07:51 All tests passed.
21:07:51 https:/
21:07:51 + EXIT_STATUS=0