Merge lp:~stewart/drizzle/bug911643 into lp:~drizzle-trunk/drizzle/development

Proposed by Stewart Smith
Status: Merged
Approved by: Mark Atwood
Approved revision: 2480
Merged at revision: 2493
Proposed branch: lp:~stewart/drizzle/bug911643
Merge into: lp:~drizzle-trunk/drizzle/development
Diff against target: 140 lines (+102/-2)
5 files modified
plugin/transaction_log/hexdump_transaction_message.cc (+1/-1)
plugin/transaction_log/print_transaction_message.cc (+1/-1)
plugin/transaction_log/tests/r/bug911643.result (+71/-0)
plugin/transaction_log/tests/t/bug911643-master.opt (+1/-0)
plugin/transaction_log/tests/t/bug911643.test (+28/-0)
To merge this branch: bzr merge lp:~stewart/drizzle/bug911643
Reviewer Review Type Date Requested Status
Mark Atwood Pending
Drizzle Merge Team Pending
David Shrewsbury Pending
Review via email: mp+88628@code.launchpad.net

This proposal supersedes a proposal from 2012-01-04.

Description of the change

yet another odd thing I've hit with the CATALOG work :)

and this time, fixed so that "make distcheck" works - namely because we were using test/ as directory for txn log, which doesn't work when (without restarting server) you try and DROP SCHEMA test (a whole bunch later in drizzledump_restore test).

i.e. thi sshould merge okay now

To post a comment you must log in.
Revision history for this message
David Shrewsbury (dshrews) wrote : Posted in a previous version of this proposal

Good catch.

review: Approve
Revision history for this message
Mark Atwood (fallenpegasus) wrote : Posted in a previous version of this proposal

Fails in Jenkins build testing

http://jenkins.drizzle.org/view/Drizzle-build/job/drizzle-build-ubuntu10.10-32bit/1168/console

main.drizzledump_restore [ fail ]
ERROR 1006 (HY000) at line 11: Can't create schema 'test' (errno: 17)
drizzletest: At line 123: command "$DRIZZLE < $drizzledumpfile" failed

The result from queries just before the failure was:
< snip >
#
# Bug#550091 in Drizzle: "drizzledump dumps data_dictionary"
#
CREATE TABLE t1 (a char(100), b int);
INSERT INTO t1 values ('a',100);
INSERT INTO t1 values ('b',200);
CREATE SCHEMA drizzledump_test;
USE drizzledump_test;
CREATE TABLE t1 (a int, b char(100));
INSERT INTO t1 VALUES (1, 'abbazabba');
DROP original schemas
DROP DATABASE test;
DROP DATABASE drizzledump_test;
Ensure we don't have those tables (verify we restored something)
SELECT * FROM test.t1;
Got one of the listed errors
SELECT * FROM drizzledump_test.t1;
Got one of the listed errors
Restoring from dumpfile
exec of './tests/var/../../client/drizzle --no-defaults --host=localhost --user=root --password= --port=9090 < /home/hudson/hudson/workspace/drizzle-build-ubuntu10.10-32bit/drizzle7-2011.11.29.2488/_build/tests/var/tmp/drizzledumpfile.sql' failed, error: 256, status: 1, errno: 11

More results from queries before failure can be found in /home/hudson/hudson/workspace/drizzle-build-ubuntu10.10-32bit/drizzle7-2011.11.29.2488/_build/tests/var/log/drizzledump_restore.log

Stopping All Servers
Restoring snapshot of databases
Resuming Tests

review: Needs Fixing

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'plugin/transaction_log/hexdump_transaction_message.cc'
--- plugin/transaction_log/hexdump_transaction_message.cc 2011-08-22 19:14:08 +0000
+++ plugin/transaction_log/hexdump_transaction_message.cc 2012-01-16 00:41:25 +0000
@@ -99,7 +99,7 @@
99 * a pool of TransactionLogReader objects that can be 99 * a pool of TransactionLogReader objects that can be
100 * re-used.100 * re-used.
101 */101 */
102 const string &filename= transaction_log->getLogFilename();102 const string &filename= transaction_log->getLogFilepath();
103 int log_file= open(filename.c_str(), O_RDONLY);103 int log_file= open(filename.c_str(), O_RDONLY);
104 if (log_file == -1)104 if (log_file == -1)
105 {105 {
106106
=== modified file 'plugin/transaction_log/print_transaction_message.cc'
--- plugin/transaction_log/print_transaction_message.cc 2011-04-14 21:56:22 +0000
+++ plugin/transaction_log/print_transaction_message.cc 2012-01-16 00:41:25 +0000
@@ -99,7 +99,7 @@
99 * a pool of TransactionLogReader objects that can be 99 * a pool of TransactionLogReader objects that can be
100 * re-used.100 * re-used.
101 */101 */
102 const string &filename= transaction_log->getLogFilename();102 const string &filename= transaction_log->getLogFilepath();
103 int log_file= open(filename.c_str(), O_RDONLY);103 int log_file= open(filename.c_str(), O_RDONLY);
104 if (log_file == -1)104 if (log_file == -1)
105 {105 {
106106
=== added file 'plugin/transaction_log/tests/r/bug911643.result'
--- plugin/transaction_log/tests/r/bug911643.result 1970-01-01 00:00:00 +0000
+++ plugin/transaction_log/tests/r/bug911643.result 2012-01-16 00:41:25 +0000
@@ -0,0 +1,71 @@
1CREATE TABLE t1 (
2pk INT NOT NULL AUTO_INCREMENT,
3col_int1 INT,
4col_int2 INT,
5col_int_not_null INT NOT NULL,
6PRIMARY KEY (pk));
7INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (1,1,1);
8INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (NULL,1,1);
9INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (2,1,3);
10SET GLOBAL transaction_log_truncate_debug= true;
11BEGIN;
12UPDATE t1 SET col_int_not_null = col_int1 WHERE col_int2 = 1;
13ERROR 23000: Column 'col_int_not_null' cannot be null
14INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (5,5,5);
15COMMIT;
16
17We should have a Transaction with a single insert Statement
18SELECT PRINT_TRANSACTION_MESSAGE('test/transaction.log',(select max(entry_offset) from DATA_DICTIONARY.TRANSACTION_LOG_TRANSACTIONS));
19PRINT_TRANSACTION_MESSAGE('test/transaction.log',(select max(entry_offset) from DATA_DICTIONARY.TRANSACTION_LOG_TRANSACTIONS))
20transaction_context {
21 server_id: 1
22 TRANSACTION_ID
23 START_TIMESTAMP
24 END_TIMESTAMP
25}
26statement {
27 type: INSERT
28 START_TIMESTAMP
29 END_TIMESTAMP
30 insert_header {
31 table_metadata {
32 schema_name: "test"
33 table_name: "t1"
34 }
35 field_metadata {
36 type: INTEGER
37 name: "pk"
38 }
39 field_metadata {
40 type: INTEGER
41 name: "col_int1"
42 }
43 field_metadata {
44 type: INTEGER
45 name: "col_int2"
46 }
47 field_metadata {
48 type: INTEGER
49 name: "col_int_not_null"
50 }
51 }
52 insert_data {
53 segment_id: 1
54 end_segment: true
55 record {
56 insert_value: "4"
57 insert_value: "5"
58 insert_value: "5"
59 insert_value: "5"
60 is_null: false
61 is_null: false
62 is_null: false
63 is_null: false
64 }
65 }
66}
67segment_id: 1
68end_segment: true
69
70DROP TABLE t1;
71SET GLOBAL transaction_log_truncate_debug= true;
072
=== added file 'plugin/transaction_log/tests/t/bug911643-master.opt'
--- plugin/transaction_log/tests/t/bug911643-master.opt 1970-01-01 00:00:00 +0000
+++ plugin/transaction_log/tests/t/bug911643-master.opt 2012-01-16 00:41:25 +0000
@@ -0,0 +1,1 @@
1--transaction-log.enable --scheduler=multi_thread --transaction-message-threshold=131072 --allow-all-authentication.allow_anonymous=true --transaction-log.file=../local/transaction.log
02
=== added file 'plugin/transaction_log/tests/t/bug911643.test'
--- plugin/transaction_log/tests/t/bug911643.test 1970-01-01 00:00:00 +0000
+++ plugin/transaction_log/tests/t/bug911643.test 2012-01-16 00:41:25 +0000
@@ -0,0 +1,28 @@
1CREATE TABLE t1 (
2 pk INT NOT NULL AUTO_INCREMENT,
3 col_int1 INT,
4 col_int2 INT,
5 col_int_not_null INT NOT NULL,
6 PRIMARY KEY (pk));
7
8INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (1,1,1);
9INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (NULL,1,1);
10INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (2,1,3);
11
12SET GLOBAL transaction_log_truncate_debug= true;
13
14--test with no previous Statement message
15BEGIN;
16--ERROR ER_BAD_NULL_ERROR
17UPDATE t1 SET col_int_not_null = col_int1 WHERE col_int2 = 1;
18INSERT INTO t1 (col_int1, col_int2, col_int_not_null) VALUES (5,5,5);
19COMMIT;
20
21--echo
22--echo We should have a Transaction with a single insert Statement
23--replace_regex /transaction_id: [0-9]+/TRANSACTION_ID/ /start_timestamp: [0-9]+/START_TIMESTAMP/g /end_timestamp: [0-9]+/END_TIMESTAMP/g /creation_timestamp: [0-9]+/CREATE_TIMESTAMP/ /update_timestamp: [0-9]+/UPDATE_TIMESTAMP/
24
25SELECT PRINT_TRANSACTION_MESSAGE('test/transaction.log',(select max(entry_offset) from DATA_DICTIONARY.TRANSACTION_LOG_TRANSACTIONS));
26
27DROP TABLE t1;
28SET GLOBAL transaction_log_truncate_debug= true;