Merge lp:~brianaker/drizzle/refactor into lp:~drizzle-trunk/drizzle/development

Proposed by Brian Aker
Status: Merged
Merged at revision: 1873
Proposed branch: lp:~brianaker/drizzle/refactor
Merge into: lp:~drizzle-trunk/drizzle/development
Diff against target: 3503 lines (+494/-489)
42 files modified
drizzled/cursor.cc (+79/-79)
drizzled/cursor.h (+11/-9)
drizzled/memory/root.cc (+4/-0)
drizzled/memory/root.h (+2/-0)
drizzled/plugin/storage_engine.cc (+2/-2)
drizzled/plugin/storage_engine.h (+2/-2)
drizzled/table.cc (+2/-2)
drizzled/table/instance.cc (+0/-1)
drizzled/table_share.cc (+4/-4)
plugin/archive/archive_engine.h (+1/-1)
plugin/archive/ha_archive.cc (+28/-28)
plugin/archive/ha_archive.h (+1/-1)
plugin/blackhole/ha_blackhole.cc (+5/-5)
plugin/blackhole/ha_blackhole.h (+1/-1)
plugin/blitzdb/blitzcmp.cc (+3/-3)
plugin/blitzdb/ha_blitz.cc (+41/-41)
plugin/blitzdb/ha_blitz.h (+1/-1)
plugin/csv/ha_tina.cc (+7/-7)
plugin/csv/ha_tina.h (+1/-1)
plugin/filesystem_engine/filesystem_engine.cc (+13/-13)
plugin/filesystem_engine/filesystem_engine.h (+1/-1)
plugin/function_engine/cursor.cc (+10/-10)
plugin/function_engine/cursor.h (+1/-1)
plugin/function_engine/function.cc (+1/-1)
plugin/function_engine/function.h (+1/-1)
plugin/haildb/haildb_engine.cc (+63/-63)
plugin/haildb/haildb_engine.h (+1/-1)
plugin/innobase/handler/ha_innodb.cc (+77/-79)
plugin/innobase/handler/ha_innodb.h (+2/-2)
plugin/memory/ha_heap.cc (+24/-24)
plugin/memory/ha_heap.h (+1/-1)
plugin/myisam/ha_myisam.cc (+40/-40)
plugin/myisam/ha_myisam.h (+1/-1)
plugin/pbms/src/ha_pbms.cc (+4/-4)
plugin/pbms/src/ha_pbms.h (+1/-1)
plugin/pbxt/src/ha_pbxt.cc (+48/-48)
plugin/pbxt/src/ha_pbxt.h (+2/-2)
plugin/pbxt/src/ha_xtsys.cc (+2/-2)
plugin/pbxt/src/ha_xtsys.h (+1/-1)
plugin/schema_engine/schema.h (+1/-1)
plugin/tableprototester/tableprototester.cc (+3/-3)
plugin/tableprototester/tableprototester.h (+1/-1)
To merge this branch: bzr merge lp:~brianaker/drizzle/refactor
Reviewer Review Type Date Requested Status
Drizzle Merge Team Pending
Review via email: mp+39120@code.launchpad.net
To post a comment you must log in.
lp:~brianaker/drizzle/refactor updated
1870. By Brian Aker

Merge Monty

1871. By Lee Bieber

Merge Barry - fix bug 657830: PBMS build failure in GCC 4.5

1872. By Brian Aker

Merge in Brian. key creation should be improved after this (ie for share)

1873. By Brian Aker

Merge in overhaul to how cursor and table are handled. Cursor now only knows
about table, and will always have a table and engine reference.

This cleans up a number of ownership issues, the biggest being that it now
creates the space needed for the next big refactor in locks.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'drizzled/cursor.cc'
2--- drizzled/cursor.cc 2010-10-22 00:20:21 +0000
3+++ drizzled/cursor.cc 2010-10-23 00:16:13 +0000
4@@ -54,9 +54,10 @@
5 ** General Cursor functions
6 ****************************************************************************/
7 Cursor::Cursor(plugin::StorageEngine &engine_arg,
8- TableShare &share_arg)
9- : table_share(&share_arg), table(0),
10- estimation_rows_to_insert(0), engine(&engine_arg),
11+ Table &arg)
12+ : table(arg),
13+ engine(engine_arg),
14+ estimation_rows_to_insert(0),
15 ref(0),
16 key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
17 ref_length(sizeof(internal::my_off_t)),
18@@ -79,7 +80,7 @@
19 */
20 Cursor *Cursor::clone(memory::Root *mem_root)
21 {
22- Cursor *new_handler= table->getMutableShare()->db_type()->getCursor(*table->getMutableShare());
23+ Cursor *new_handler= getTable()->getMutableShare()->db_type()->getCursor(*getTable());
24
25 /*
26 Allocate Cursor->ref here because otherwise ha_open will allocate it
27@@ -89,13 +90,12 @@
28 if (!(new_handler->ref= (unsigned char*) mem_root->alloc_root(ALIGN_SIZE(ref_length)*2)))
29 return NULL;
30
31- TableIdentifier identifier(table->getShare()->getSchemaName(),
32- table->getShare()->getTableName(),
33- table->getShare()->getType());
34+ TableIdentifier identifier(getTable()->getShare()->getSchemaName(),
35+ getTable()->getShare()->getTableName(),
36+ getTable()->getShare()->getType());
37
38 if (new_handler && !new_handler->ha_open(identifier,
39- table,
40- table->getDBStat(),
41+ getTable()->getDBStat(),
42 HA_OPEN_IGNORE_IF_LOCKED))
43 return new_handler;
44 return NULL;
45@@ -111,8 +111,8 @@
46 /* works only with key prefixes */
47 assert(((keypart_map_arg + 1) & keypart_map_arg) == 0);
48
49- const KeyPartInfo *key_part_found= table->getShare()->getKeyInfo(key_position).key_part;
50- const KeyPartInfo *end_key_part_found= key_part_found + table->getShare()->getKeyInfo(key_position).key_parts;
51+ const KeyPartInfo *key_part_found= getTable()->getShare()->getKeyInfo(key_position).key_part;
52+ const KeyPartInfo *end_key_part_found= key_part_found + getTable()->getShare()->getKeyInfo(key_position).key_parts;
53 uint32_t length= 0;
54
55 while (key_part_found < end_key_part_found && keypart_map_arg)
56@@ -182,17 +182,17 @@
57
58 bool Cursor::has_transactions()
59 {
60- return (table->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
61+ return (getTable()->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
62 }
63
64 void Cursor::ha_statistic_increment(uint64_t system_status_var::*offset) const
65 {
66- (table->in_use->status_var.*offset)++;
67+ (getTable()->in_use->status_var.*offset)++;
68 }
69
70 void **Cursor::ha_data(Session *session) const
71 {
72- return session->getEngineData(engine);
73+ return session->getEngineData(getEngine());
74 }
75
76 bool Cursor::is_fatal_error(int error, uint32_t flags)
77@@ -208,7 +208,7 @@
78
79 ha_rows Cursor::records() { return stats.records; }
80 uint64_t Cursor::tableSize() { return stats.index_file_length + stats.data_file_length; }
81-uint64_t Cursor::rowSize() { return table->getRecordLength() + table->sizeFields(); }
82+uint64_t Cursor::rowSize() { return getTable()->getRecordLength() + getTable()->sizeFields(); }
83
84 int Cursor::doOpen(const TableIdentifier &identifier, int mode, uint32_t test_if_locked)
85 {
86@@ -222,21 +222,17 @@
87 Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
88 */
89 int Cursor::ha_open(const TableIdentifier &identifier,
90- Table *table_arg,
91 int mode,
92 int test_if_locked)
93 {
94 int error;
95
96- table= table_arg;
97- assert(table->getShare() == table_share);
98-
99 if ((error= doOpen(identifier, mode, test_if_locked)))
100 {
101 if ((error == EACCES || error == EROFS) && mode == O_RDWR &&
102- (table->db_stat & HA_TRY_READ_ONLY))
103+ (getTable()->db_stat & HA_TRY_READ_ONLY))
104 {
105- table->db_stat|=HA_READ_ONLY;
106+ getTable()->db_stat|=HA_READ_ONLY;
107 error= doOpen(identifier, O_RDONLY,test_if_locked);
108 }
109 }
110@@ -246,12 +242,12 @@
111 }
112 else
113 {
114- if (table->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
115- table->db_stat|=HA_READ_ONLY;
116+ if (getTable()->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
117+ getTable()->db_stat|=HA_READ_ONLY;
118 (void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
119
120 /* ref is already allocated for us if we're called from Cursor::clone() */
121- if (!ref && !(ref= (unsigned char*) table->alloc_root(ALIGN_SIZE(ref_length)*2)))
122+ if (!ref && !(ref= (unsigned char*) getTable()->alloc_root(ALIGN_SIZE(ref_length)*2)))
123 {
124 close();
125 error=HA_ERR_OUT_OF_MEM;
126@@ -280,7 +276,7 @@
127 TODO remove the test for HA_READ_ORDER
128 */
129 if (stats.deleted < 10 || primary_key >= MAX_KEY ||
130- !(table->index_flags(primary_key) & HA_READ_ORDER))
131+ !(getTable()->index_flags(primary_key) & HA_READ_ORDER))
132 {
133 (void) startTableScan(1);
134 while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
135@@ -328,7 +324,7 @@
136 Session::next_insert_id to be greater than the explicit value.
137 */
138 if ((next_insert_id > 0) && (nr >= next_insert_id))
139- set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
140+ set_next_insert_id(compute_next_insert_id(nr, &getTable()->in_use->variables));
141 }
142
143
144@@ -445,7 +441,7 @@
145 {
146 uint64_t nr, nb_reserved_values;
147 bool append= false;
148- Session *session= table->in_use;
149+ Session *session= getTable()->in_use;
150 struct system_variables *variables= &session->variables;
151
152 /*
153@@ -458,8 +454,8 @@
154 for an auto increment column, not a magic value like NULL is.
155 same as sql_mode=NO_AUTO_VALUE_ON_ZERO */
156
157- if ((nr= table->next_number_field->val_int()) != 0
158- || table->auto_increment_field_not_null)
159+ if ((nr= getTable()->next_number_field->val_int()) != 0
160+ || getTable()->auto_increment_field_not_null)
161 {
162 /*
163 Update next_insert_id if we had already generated a value in this
164@@ -537,14 +533,14 @@
165 nr= compute_next_insert_id(nr-1, variables);
166 }
167
168- if (table->getShare()->next_number_keypart == 0)
169+ if (getTable()->getShare()->next_number_keypart == 0)
170 {
171 /* We must defer the appending until "nr" has been possibly truncated */
172 append= true;
173 }
174 }
175
176- if (unlikely(table->next_number_field->store((int64_t) nr, true)))
177+ if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
178 {
179 /*
180 first test if the query was aborted due to strict mode constraints
181@@ -560,9 +556,9 @@
182 bother shifting the right bound (anyway any other value from this
183 interval will cause a duplicate key).
184 */
185- nr= prev_insert_id(table->next_number_field->val_int(), variables);
186- if (unlikely(table->next_number_field->store((int64_t) nr, true)))
187- nr= table->next_number_field->val_int();
188+ nr= prev_insert_id(getTable()->next_number_field->val_int(), variables);
189+ if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
190+ nr= getTable()->next_number_field->val_int();
191 }
192 if (append)
193 {
194@@ -616,7 +612,7 @@
195 this statement used forced auto_increment values if there were some,
196 wipe them away for other statements.
197 */
198- table->in_use->auto_inc_intervals_forced.empty();
199+ getTable()->in_use->auto_inc_intervals_forced.empty();
200 }
201 }
202
203@@ -662,10 +658,10 @@
204 * possible resource to gain (and if there is... then there is a bug such
205 * that in_use should have been set.
206 */
207- if (not table || not table->in_use)
208+ if (not getTable()->in_use)
209 return;
210
211- resource_context= table->in_use->getResourceContext(engine);
212+ resource_context= getTable()->in_use->getResourceContext(getEngine());
213 /*
214 When a storage engine method is called, the transaction must
215 have been started, unless it's a DDL call, for which the
216@@ -706,9 +702,9 @@
217 * @todo Make TransactionServices generic to AfterTriggerServices
218 * or similar...
219 */
220- Session *const session= table->in_use;
221+ Session *const session= getTable()->in_use;
222 TransactionServices &transaction_services= TransactionServices::singleton();
223- transaction_services.truncateTable(session, table);
224+ transaction_services.truncateTable(session, getTable());
225 }
226
227 return result;
228@@ -807,7 +803,7 @@
229 int error;
230 if (!(error=index_next(buf)))
231 {
232- ptrdiff_t ptrdiff= buf - table->getInsertRecord();
233+ ptrdiff_t ptrdiff= buf - getTable()->getInsertRecord();
234 unsigned char *save_record_0= NULL;
235 KeyInfo *key_info= NULL;
236 KeyPartInfo *key_part;
237@@ -823,9 +819,9 @@
238 */
239 if (ptrdiff)
240 {
241- save_record_0= table->getInsertRecord();
242- table->record[0]= buf;
243- key_info= table->key_info + active_index;
244+ save_record_0= getTable()->getInsertRecord();
245+ getTable()->record[0]= buf;
246+ key_info= getTable()->key_info + active_index;
247 key_part= key_info->key_part;
248 key_part_end= key_part + key_info->key_parts;
249 for (; key_part < key_part_end; key_part++)
250@@ -835,16 +831,16 @@
251 }
252 }
253
254- if (key_cmp_if_same(table, key, active_index, keylen))
255+ if (key_cmp_if_same(getTable(), key, active_index, keylen))
256 {
257- table->status=STATUS_NOT_FOUND;
258+ getTable()->status=STATUS_NOT_FOUND;
259 error=HA_ERR_END_OF_FILE;
260 }
261
262 /* Move back if necessary. */
263 if (ptrdiff)
264 {
265- table->record[0]= save_record_0;
266+ getTable()->record[0]= save_record_0;
267 for (key_part= key_info->key_part; key_part < key_part_end; key_part++)
268 key_part->field->move_field_offset(-ptrdiff);
269 }
270@@ -881,7 +877,7 @@
271 double Cursor::index_only_read_time(uint32_t keynr, double key_records)
272 {
273 uint32_t keys_per_block= (stats.block_size/2/
274- (table->key_info[keynr].key_length + ref_length) + 1);
275+ (getTable()->key_info[keynr].key_length + ref_length) + 1);
276 return ((double) (key_records + keys_per_block-1) /
277 (double) keys_per_block);
278 }
279@@ -1188,12 +1184,12 @@
280 key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
281 (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
282 }
283- range_key_part= table->key_info[active_index].key_part;
284+ range_key_part= getTable()->key_info[active_index].key_part;
285
286 if (!start_key) // Read first record
287- result= index_first(table->getInsertRecord());
288+ result= index_first(getTable()->getInsertRecord());
289 else
290- result= index_read_map(table->getInsertRecord(),
291+ result= index_read_map(getTable()->getInsertRecord(),
292 start_key->key,
293 start_key->keypart_map,
294 start_key->flag);
295@@ -1226,11 +1222,11 @@
296 if (eq_range)
297 {
298 /* We trust that index_next_same always gives a row in range */
299- return(index_next_same(table->getInsertRecord(),
300+ return(index_next_same(getTable()->getInsertRecord(),
301 end_range->key,
302 end_range->length));
303 }
304- result= index_next(table->getInsertRecord());
305+ result= index_next(getTable()->getInsertRecord());
306 if (result)
307 return result;
308 return(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
309@@ -1398,18 +1394,18 @@
310 {
311 if (lock_type == F_RDLCK)
312 {
313- DRIZZLE_CURSOR_RDLOCK_START(table_share->getSchemaName(),
314- table_share->getTableName());
315+ DRIZZLE_CURSOR_RDLOCK_START(getTable()->getShare()->getSchemaName(),
316+ getTable()->getShare()->getTableName());
317 }
318 else if (lock_type == F_WRLCK)
319 {
320- DRIZZLE_CURSOR_WRLOCK_START(table_share->getSchemaName(),
321- table_share->getTableName());
322+ DRIZZLE_CURSOR_WRLOCK_START(getTable()->getShare()->getSchemaName(),
323+ getTable()->getShare()->getTableName());
324 }
325 else if (lock_type == F_UNLCK)
326 {
327- DRIZZLE_CURSOR_UNLOCK_START(table_share->getSchemaName(),
328- table_share->getTableName());
329+ DRIZZLE_CURSOR_UNLOCK_START(getTable()->getShare()->getSchemaName(),
330+ getTable()->getShare()->getTableName());
331 }
332 }
333
334@@ -1448,14 +1444,14 @@
335 int Cursor::ha_reset()
336 {
337 /* Check that we have called all proper deallocation functions */
338- assert(! table->getShare()->all_set.none());
339- assert(table->key_read == 0);
340+ assert(! getTable()->getShare()->all_set.none());
341+ assert(getTable()->key_read == 0);
342 /* ensure that ha_index_end / endTableScan has been called */
343 assert(inited == NONE);
344 /* Free cache used by filesort */
345- table->free_io_cache();
346+ getTable()->free_io_cache();
347 /* reset the bitmaps to point to defaults */
348- table->default_column_bitmaps();
349+ getTable()->default_column_bitmaps();
350 return(reset());
351 }
352
353@@ -1470,22 +1466,22 @@
354 * @TODO Technically, the below two lines can be take even further out of the
355 * Cursor interface and into the fill_record() method.
356 */
357- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
358+ if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
359 {
360- table->timestamp_field->set_time();
361+ getTable()->timestamp_field->set_time();
362 }
363
364- DRIZZLE_INSERT_ROW_START(table_share->getSchemaName(), table_share->getTableName());
365+ DRIZZLE_INSERT_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
366 setTransactionReadWrite();
367
368- if (unlikely(plugin::EventObserver::beforeInsertRecord(*table, buf)))
369+ if (unlikely(plugin::EventObserver::beforeInsertRecord(*getTable(), buf)))
370 {
371 error= ER_EVENT_OBSERVER_PLUGIN;
372 }
373 else
374 {
375 error= doInsertRecord(buf);
376- if (unlikely(plugin::EventObserver::afterInsertRecord(*table, buf, error)))
377+ if (unlikely(plugin::EventObserver::afterInsertRecord(*getTable(), buf, error)))
378 {
379 error= ER_EVENT_OBSERVER_PLUGIN;
380 }
381@@ -1500,7 +1496,7 @@
382 return error;
383 }
384
385- if (unlikely(log_row_for_replication(table, NULL, buf)))
386+ if (unlikely(log_row_for_replication(getTable(), NULL, buf)))
387 return HA_ERR_RBR_LOGGING_FAILED;
388
389 return 0;
390@@ -1515,23 +1511,23 @@
391 Some storage engines require that the new record is in getInsertRecord()
392 (and the old record is in getUpdateRecord()).
393 */
394- assert(new_data == table->getInsertRecord());
395+ assert(new_data == getTable()->getInsertRecord());
396
397- DRIZZLE_UPDATE_ROW_START(table_share->getSchemaName(), table_share->getTableName());
398+ DRIZZLE_UPDATE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
399 setTransactionReadWrite();
400- if (unlikely(plugin::EventObserver::beforeUpdateRecord(*table, old_data, new_data)))
401+ if (unlikely(plugin::EventObserver::beforeUpdateRecord(*getTable(), old_data, new_data)))
402 {
403 error= ER_EVENT_OBSERVER_PLUGIN;
404 }
405 else
406 {
407- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
408+ if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
409 {
410- table->timestamp_field->set_time();
411+ getTable()->timestamp_field->set_time();
412 }
413
414 error= doUpdateRecord(old_data, new_data);
415- if (unlikely(plugin::EventObserver::afterUpdateRecord(*table, old_data, new_data, error)))
416+ if (unlikely(plugin::EventObserver::afterUpdateRecord(*getTable(), old_data, new_data, error)))
417 {
418 error= ER_EVENT_OBSERVER_PLUGIN;
419 }
420@@ -1546,26 +1542,30 @@
421 return error;
422 }
423
424- if (unlikely(log_row_for_replication(table, old_data, new_data)))
425+ if (unlikely(log_row_for_replication(getTable(), old_data, new_data)))
426 return HA_ERR_RBR_LOGGING_FAILED;
427
428 return 0;
429 }
430+TableShare *Cursor::getShare()
431+{
432+ return getTable()->getMutableShare();
433+}
434
435 int Cursor::deleteRecord(const unsigned char *buf)
436 {
437 int error;
438
439- DRIZZLE_DELETE_ROW_START(table_share->getSchemaName(), table_share->getTableName());
440+ DRIZZLE_DELETE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
441 setTransactionReadWrite();
442- if (unlikely(plugin::EventObserver::beforeDeleteRecord(*table, buf)))
443+ if (unlikely(plugin::EventObserver::beforeDeleteRecord(*getTable(), buf)))
444 {
445 error= ER_EVENT_OBSERVER_PLUGIN;
446 }
447 else
448 {
449 error= doDeleteRecord(buf);
450- if (unlikely(plugin::EventObserver::afterDeleteRecord(*table, buf, error)))
451+ if (unlikely(plugin::EventObserver::afterDeleteRecord(*getTable(), buf, error)))
452 {
453 error= ER_EVENT_OBSERVER_PLUGIN;
454 }
455@@ -1578,7 +1578,7 @@
456 if (unlikely(error))
457 return error;
458
459- if (unlikely(log_row_for_replication(table, buf, NULL)))
460+ if (unlikely(log_row_for_replication(getTable(), buf, NULL)))
461 return HA_ERR_RBR_LOGGING_FAILED;
462
463 return 0;
464
465=== modified file 'drizzled/cursor.h'
466--- drizzled/cursor.h 2010-10-22 00:20:21 +0000
467+++ drizzled/cursor.h 2010-10-23 00:16:13 +0000
468@@ -145,23 +145,25 @@
469 */
470 class Cursor
471 {
472+ Table &table; /* The current open table */
473+ plugin::StorageEngine &engine; /* storage engine of this Cursor */
474+
475 protected:
476- TableShare *table_share; /* The table definition */
477- Table *table; /* The current open table */
478-
479 ha_rows estimation_rows_to_insert;
480- plugin::StorageEngine *engine; /* storage engine of this Cursor */
481+
482 public:
483 inline plugin::StorageEngine *getEngine() const /* table_type for handler */
484 {
485- return engine;
486+ return &engine;
487 }
488 unsigned char *ref; /* Pointer to current row */
489 unsigned char *dup_ref; /* Pointer to duplicate row */
490
491- TableShare *getShare() const
492+ TableShare *getShare();
493+
494+ Table *getTable() const
495 {
496- return table_share;
497+ return &table;
498 }
499
500 ha_statistics stats;
501@@ -222,13 +224,13 @@
502 */
503 Discrete_interval auto_inc_interval_for_cur_row;
504
505- Cursor(plugin::StorageEngine &engine_arg, TableShare &share_arg);
506+ Cursor(plugin::StorageEngine &engine_arg, Table &share_arg);
507 virtual ~Cursor(void);
508 virtual Cursor *clone(memory::Root *mem_root);
509
510 /* ha_ methods: pubilc wrappers for private virtual API */
511
512- int ha_open(const TableIdentifier &identifier, Table *table, int mode, int test_if_locked);
513+ int ha_open(const TableIdentifier &identifier, int mode, int test_if_locked);
514 int startIndexScan(uint32_t idx, bool sorted);
515 int endIndexScan();
516 int startTableScan(bool scan);
517
518=== modified file 'drizzled/memory/root.cc'
519--- drizzled/memory/root.cc 2010-10-02 21:15:42 +0000
520+++ drizzled/memory/root.cc 2010-10-23 00:16:13 +0000
521@@ -61,6 +61,10 @@
522 first_block_usage= 0;
523 }
524
525+memory::Root::~Root()
526+{
527+}
528+
529
530 /**
531 * @details
532
533=== modified file 'drizzled/memory/root.h'
534--- drizzled/memory/root.h 2010-10-02 21:15:42 +0000
535+++ drizzled/memory/root.h 2010-10-23 00:16:13 +0000
536@@ -85,6 +85,8 @@
537 error_handler= 0;
538 }
539
540+ ~Root();
541+
542 /**
543 * blocks with free memory in it
544 */
545
546=== modified file 'drizzled/plugin/storage_engine.cc'
547--- drizzled/plugin/storage_engine.cc 2010-10-20 01:27:24 +0000
548+++ drizzled/plugin/storage_engine.cc 2010-10-23 00:16:13 +0000
549@@ -545,9 +545,9 @@
550 return(error != 0);
551 }
552
553-Cursor *StorageEngine::getCursor(TableShare &share)
554+Cursor *StorageEngine::getCursor(Table &arg)
555 {
556- return create(share);
557+ return create(arg);
558 }
559
560 class AddTableIdentifier :
561
562=== modified file 'drizzled/plugin/storage_engine.h'
563--- drizzled/plugin/storage_engine.h 2010-10-09 01:10:07 +0000
564+++ drizzled/plugin/storage_engine.h 2010-10-23 00:16:13 +0000
565@@ -251,7 +251,7 @@
566 {
567 return 0;
568 }
569- virtual Cursor *create(TableShare &)= 0;
570+ virtual Cursor *create(Table &)= 0;
571 /* args: path */
572 virtual bool flush_logs() { return false; }
573 virtual bool show_status(Session *, stat_print_fn *, enum ha_stat_type)
574@@ -365,7 +365,7 @@
575
576 static void removeLostTemporaryTables(Session &session, const char *directory);
577
578- Cursor *getCursor(TableShare &share);
579+ Cursor *getCursor(Table &share);
580
581 uint32_t max_record_length() const
582 { return std::min((unsigned int)HA_MAX_REC_LENGTH, max_supported_record_length()); }
583
584=== modified file 'drizzled/table.cc'
585--- drizzled/table.cc 2010-10-21 20:56:58 +0000
586+++ drizzled/table.cc 2010-10-23 00:16:13 +0000
587@@ -1063,7 +1063,7 @@
588 (select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) == OPTION_BIG_TABLES)
589 {
590 table->getMutableShare()->storage_engine= myisam_engine;
591- table->cursor= table->getMutableShare()->db_type()->getCursor(*table->getMutableShare());
592+ table->cursor= table->getMutableShare()->db_type()->getCursor(*table);
593 if (group &&
594 (param->group_parts > table->cursor->getEngine()->max_key_parts() ||
595 param->group_length > table->cursor->getEngine()->max_key_length()))
596@@ -1074,7 +1074,7 @@
597 else
598 {
599 table->getMutableShare()->storage_engine= heap_engine;
600- table->cursor= table->getMutableShare()->db_type()->getCursor(*table->getMutableShare());
601+ table->cursor= table->getMutableShare()->db_type()->getCursor(*table);
602 }
603 if (! table->cursor)
604 goto err;
605
606=== modified file 'drizzled/table/instance.cc'
607--- drizzled/table/instance.cc 2010-10-21 08:01:16 +0000
608+++ drizzled/table/instance.cc 2010-10-23 00:16:13 +0000
609@@ -42,7 +42,6 @@
610
611 TableIdentifier identifier(getShare()->getSchemaName(), getShare()->getTableName(), getShare()->getPath());
612 if ((error=cursor->ha_open(identifier,
613- this,
614 O_RDWR,
615 HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
616 {
617
618=== modified file 'drizzled/table_share.cc'
619--- drizzled/table_share.cc 2010-10-22 00:26:57 +0000
620+++ drizzled/table_share.cc 2010-10-23 00:16:13 +0000
621@@ -1873,7 +1873,7 @@
622 outparam.setAlias(alias);
623
624 /* Allocate Cursor */
625- if (not (outparam.cursor= db_type()->getCursor(*this)))
626+ if (not (outparam.cursor= db_type()->getCursor(outparam)))
627 return local_error;
628
629 local_error= 4;
630@@ -2011,9 +2011,9 @@
631 assert(!(db_stat & HA_WAIT_IF_LOCKED));
632 int ha_err;
633
634- if ((ha_err= (outparam.cursor->ha_open(identifier, &outparam,
635- (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR),
636- (db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE : HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags))))
637+ if ((ha_err= (outparam.cursor->ha_open(identifier,
638+ (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR),
639+ (db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE : HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags))))
640 {
641 switch (ha_err)
642 {
643
644=== modified file 'plugin/archive/archive_engine.h'
645--- plugin/archive/archive_engine.h 2010-10-02 21:15:42 +0000
646+++ plugin/archive/archive_engine.h 2010-10-23 00:16:13 +0000
647@@ -87,7 +87,7 @@
648 return _mutex;
649 }
650
651- virtual drizzled::Cursor *create(drizzled::TableShare &table)
652+ virtual drizzled::Cursor *create(drizzled::Table &table)
653 {
654 return new ha_archive(*this, table);
655 }
656
657=== modified file 'plugin/archive/ha_archive.cc'
658--- plugin/archive/ha_archive.cc 2010-10-02 21:15:42 +0000
659+++ plugin/archive/ha_archive.cc 2010-10-23 00:16:13 +0000
660@@ -196,7 +196,7 @@
661
662
663 ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
664- TableShare &table_arg)
665+ Table &table_arg)
666 :Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
667 {
668 /* Set our original buffer from pre-allocated memory */
669@@ -294,7 +294,7 @@
670 */
671 ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
672 {
673- ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
674+ ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
675
676 pthread_mutex_lock(&a_engine->mutex());
677
678@@ -339,7 +339,7 @@
679 */
680 int ha_archive::free_share()
681 {
682- ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
683+ ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
684
685 pthread_mutex_lock(&a_engine->mutex());
686 if (!--share->use_count)
687@@ -438,7 +438,7 @@
688
689 assert(share);
690
691- record_buffer.resize(table->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
692+ record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
693
694 lock.init(&share->_lock);
695
696@@ -624,15 +624,15 @@
697
698 uint32_t ha_archive::max_row_length(const unsigned char *)
699 {
700- uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
701+ uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
702 length+= ARCHIVE_ROW_HEADER_SIZE;
703
704 uint32_t *ptr, *end;
705- for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
706+ for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
707 ptr != end ;
708 ptr++)
709 {
710- length += 2 + ((Field_blob*)table->getField(*ptr))->get_length();
711+ length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
712 }
713
714 return length;
715@@ -647,10 +647,10 @@
716 return(HA_ERR_OUT_OF_MEM);
717
718 /* Copy null bits */
719- memcpy(&record_buffer[0], record, table->getShare()->null_bytes);
720- ptr= &record_buffer[0] + table->getShare()->null_bytes;
721+ memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
722+ ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
723
724- for (Field **field=table->getFields() ; *field ; field++)
725+ for (Field **field=getTable()->getFields() ; *field ; field++)
726 {
727 if (!((*field)->is_null()))
728 ptr= (*field)->pack(ptr, record + (*field)->offset(record));
729@@ -674,7 +674,7 @@
730 int rc;
731 unsigned char *read_buf= NULL;
732 uint64_t temp_auto;
733- unsigned char *record= table->getInsertRecord();
734+ unsigned char *record= getTable()->getInsertRecord();
735
736 if (share->crashed)
737 return(HA_ERR_CRASHED_ON_USAGE);
738@@ -686,17 +686,17 @@
739 return(HA_ERR_CRASHED_ON_USAGE);
740
741
742- if (table->next_number_field && record == table->getInsertRecord())
743+ if (getTable()->next_number_field && record == getTable()->getInsertRecord())
744 {
745 update_auto_increment();
746- temp_auto= table->next_number_field->val_int();
747+ temp_auto= getTable()->next_number_field->val_int();
748
749 /*
750 We don't support decremening auto_increment. They make the performance
751 just cry.
752 */
753 if (temp_auto <= share->archive_write.auto_increment &&
754- table->getShare()->getKeyInfo(0).flags & HA_NOSAME)
755+ getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
756 {
757 rc= HA_ERR_FOUND_DUPP_KEY;
758 goto error;
759@@ -748,7 +748,7 @@
760 {
761 int rc;
762 bool found= 0;
763- current_k_offset= table->getShare()->getKeyInfo(0).key_part->offset;
764+ current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
765 current_key= key;
766 current_key_len= key_len;
767
768@@ -853,13 +853,13 @@
769 }
770
771 /* Copy null bits */
772- memcpy(record, ptr, table->getNullBytes());
773- ptr+= table->getNullBytes();
774- for (Field **field= table->getFields() ; *field ; field++)
775+ memcpy(record, ptr, getTable()->getNullBytes());
776+ ptr+= getTable()->getNullBytes();
777+ for (Field **field= getTable()->getFields() ; *field ; field++)
778 {
779 if (!((*field)->is_null()))
780 {
781- ptr= (*field)->unpack(record + (*field)->offset(table->getInsertRecord()), ptr);
782+ ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
783 }
784 }
785 return(0);
786@@ -894,7 +894,7 @@
787 current_position= aztell(&archive);
788 rc= get_row(&archive, buf);
789
790- table->status=rc ? STATUS_NOT_FOUND: 0;
791+ getTable()->status=rc ? STATUS_NOT_FOUND: 0;
792
793 return(rc);
794 }
795@@ -1014,26 +1014,26 @@
796
797 for (uint64_t x= 0; x < rows_restored ; x++)
798 {
799- rc= get_row(&archive, table->getInsertRecord());
800+ rc= get_row(&archive, getTable()->getInsertRecord());
801
802 if (rc != 0)
803 break;
804
805- real_write_row(table->getInsertRecord(), &writer);
806+ real_write_row(getTable()->getInsertRecord(), &writer);
807 /*
808 Long term it should be possible to optimize this so that
809 it is not called on each row.
810 */
811- if (table->found_next_number_field)
812+ if (getTable()->found_next_number_field)
813 {
814- Field *field= table->found_next_number_field;
815+ Field *field= getTable()->found_next_number_field;
816
817 /* Since we will need to use field to translate, we need to flip its read bit */
818 field->setReadSet();
819
820 uint64_t auto_value=
821- (uint64_t) field->val_int(table->getInsertRecord() +
822- field->offset(table->getInsertRecord()));
823+ (uint64_t) field->val_int(getTable()->getInsertRecord() +
824+ field->offset(getTable()->getInsertRecord()));
825 if (share->archive_write.auto_increment < auto_value)
826 stats.auto_increment_value=
827 (share->archive_write.auto_increment= auto_value) + 1;
828@@ -1147,7 +1147,7 @@
829
830 stat(share->data_file_name.c_str(), &file_stat);
831
832- stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
833+ stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
834 stats.data_file_length= file_stat.st_size;
835 stats.create_time= file_stat.st_ctime;
836 stats.update_time= file_stat.st_mtime;
837@@ -1229,7 +1229,7 @@
838 read_data_header(&archive);
839 for (uint64_t x= 0; x < share->archive_write.rows; x++)
840 {
841- rc= get_row(&archive, table->getInsertRecord());
842+ rc= get_row(&archive, getTable()->getInsertRecord());
843
844 if (rc != 0)
845 break;
846
847=== modified file 'plugin/archive/ha_archive.h'
848--- plugin/archive/ha_archive.h 2010-10-02 21:15:42 +0000
849+++ plugin/archive/ha_archive.h 2010-10-23 00:16:13 +0000
850@@ -79,7 +79,7 @@
851
852 public:
853 ha_archive(drizzled::plugin::StorageEngine &engine_arg,
854- drizzled::TableShare &table_arg);
855+ drizzled::Table &table_arg);
856 ~ha_archive()
857 { }
858
859
860=== modified file 'plugin/blackhole/ha_blackhole.cc'
861--- plugin/blackhole/ha_blackhole.cc 2010-10-02 21:15:42 +0000
862+++ plugin/blackhole/ha_blackhole.cc 2010-10-23 00:16:13 +0000
863@@ -68,7 +68,7 @@
864 pthread_mutex_destroy(&blackhole_mutex);
865 }
866
867- virtual Cursor *create(TableShare &table)
868+ virtual Cursor *create(Table &table)
869 {
870 return new ha_blackhole(*this, table);
871 }
872@@ -191,7 +191,7 @@
873 *****************************************************************************/
874
875 ha_blackhole::ha_blackhole(drizzled::plugin::StorageEngine &engine_arg,
876- TableShare &table_arg)
877+ Table &table_arg)
878 :Cursor(engine_arg, table_arg), share(NULL)
879 { }
880
881@@ -318,7 +318,7 @@
882
883 int ha_blackhole::doInsertRecord(unsigned char *)
884 {
885- return(table->next_number_field ? update_auto_increment() : 0);
886+ return(getTable()->next_number_field ? update_auto_increment() : 0);
887 }
888
889 int ha_blackhole::doStartTableScan(bool)
890@@ -404,7 +404,7 @@
891 {
892 pthread_mutex_lock(&blackhole_mutex);
893
894- BlackholeEngine *a_engine= static_cast<BlackholeEngine *>(engine);
895+ BlackholeEngine *a_engine= static_cast<BlackholeEngine *>(getEngine());
896 share= a_engine->findOpenTable(table_name);
897
898 if (share == NULL)
899@@ -429,7 +429,7 @@
900 pthread_mutex_lock(&blackhole_mutex);
901 if (!--share->use_count)
902 {
903- BlackholeEngine *a_engine= static_cast<BlackholeEngine *>(engine);
904+ BlackholeEngine *a_engine= static_cast<BlackholeEngine *>(getEngine());
905 a_engine->deleteOpenTable(share->table_name);
906 delete share;
907 }
908
909=== modified file 'plugin/blackhole/ha_blackhole.h'
910--- plugin/blackhole/ha_blackhole.h 2010-10-02 21:15:42 +0000
911+++ plugin/blackhole/ha_blackhole.h 2010-10-23 00:16:13 +0000
912@@ -51,7 +51,7 @@
913
914 public:
915 ha_blackhole(drizzled::plugin::StorageEngine &engine,
916- drizzled::TableShare &table_arg);
917+ drizzled::Table &table_arg);
918 ~ha_blackhole()
919 {}
920
921
922=== modified file 'plugin/blitzdb/blitzcmp.cc'
923--- plugin/blitzdb/blitzcmp.cc 2010-10-12 02:56:36 +0000
924+++ plugin/blitzdb/blitzcmp.cc 2010-10-23 00:16:13 +0000
925@@ -35,7 +35,7 @@
926 /* For now, we are only interested in supporting a PRIMARY KEY. In the
927 next phase of BlitzDB, this should loop through the key array. */
928 if (share->primary_key_exists) {
929- KeyInfo *pk = &table->key_info[table->getShare()->getPrimaryKey()];
930+ KeyInfo *pk = &getTable()->key_info[getTable()->getShare()->getPrimaryKey()];
931 KeyPartInfo *key_part = pk->key_part;
932 KeyPartInfo *key_part_end = key_part + pk->key_parts;
933 int key_changed = 0;
934@@ -67,13 +67,13 @@
935 would violate the unique contraint. */
936 if (key_changed) {
937 key = key_buffer;
938- key_len = make_index_key(key, table->getMutableShare()->getPrimaryKey(), new_row);
939+ key_len = make_index_key(key, getTable()->getMutableShare()->getPrimaryKey(), new_row);
940 fetched = share->dict.get_row(key, key_len, &fetched_len);
941
942 /* Key Exists. It's a violation. */
943 if (fetched != NULL) {
944 free(fetched);
945- this->errkey_id = table->getShare()->getPrimaryKey();
946+ this->errkey_id = getTable()->getShare()->getPrimaryKey();
947 return HA_ERR_FOUND_DUPP_KEY;
948 }
949 }
950
951=== modified file 'plugin/blitzdb/ha_blitz.cc'
952--- plugin/blitzdb/ha_blitz.cc 2010-10-12 02:56:36 +0000
953+++ plugin/blitzdb/ha_blitz.cc 2010-10-23 00:16:13 +0000
954@@ -54,7 +54,7 @@
955 tcmapdel(blitz_table_cache);
956 }
957
958- virtual drizzled::Cursor *create(drizzled::TableShare &table) {
959+ virtual drizzled::Cursor *create(drizzled::Table &table) {
960 return new ha_blitz(*this, table);
961 }
962
963@@ -365,7 +365,7 @@
964 }
965
966 ha_blitz::ha_blitz(drizzled::plugin::StorageEngine &engine_arg,
967- TableShare &table_arg) : Cursor(engine_arg, table_arg),
968+ Table &table_arg) : Cursor(engine_arg, table_arg),
969 btree_cursor(NULL),
970 table_scan(false),
971 table_based(false),
972@@ -421,7 +421,7 @@
973 will use to uniquely identify a row. The actual allocation is
974 done by the kernel so all we do here is specify the size of it.*/
975 if (share->primary_key_exists) {
976- ref_length = table->key_info[table->getShare()->getPrimaryKey()].key_length;
977+ ref_length = getTable()->key_info[getTable()->getShare()->getPrimaryKey()].key_length;
978 } else {
979 ref_length = sizeof(held_key_len) + sizeof(uint64_t);
980 }
981@@ -460,7 +460,7 @@
982
983 int ha_blitz::doStartTableScan(bool scan) {
984 /* Obtain the query type for this scan */
985- sql_command_type = session_sql_command(table->getSession());
986+ sql_command_type = session_sql_command(getTable()->getSession());
987 table_scan = scan;
988 table_based = true;
989
990@@ -488,7 +488,7 @@
991 held_key = NULL;
992
993 if (current_key == NULL) {
994- table->status = STATUS_NOT_FOUND;
995+ getTable()->status = STATUS_NOT_FOUND;
996 return HA_ERR_END_OF_FILE;
997 }
998
999@@ -519,7 +519,7 @@
1000 /* It is now memory-leak-safe to point current_key to next_key. */
1001 current_key = next_key;
1002 current_key_len = next_key_len;
1003- table->status = 0;
1004+ getTable()->status = 0;
1005 return 0;
1006 }
1007
1008@@ -585,7 +585,7 @@
1009
1010 int ha_blitz::doStartIndexScan(uint32_t key_num, bool) {
1011 active_index = key_num;
1012- sql_command_type = session_sql_command(table->getSession());
1013+ sql_command_type = session_sql_command(getTable()->getSession());
1014
1015 /* This is unlikely to happen but just for assurance, re-obtain
1016 the lock if this thread already has a certain lock. This makes
1017@@ -630,7 +630,7 @@
1018 bt_key = btree_cursor[active_index].next_key(&bt_klen);
1019
1020 if (bt_key == NULL) {
1021- table->status = STATUS_NOT_FOUND;
1022+ getTable()->status = STATUS_NOT_FOUND;
1023 return HA_ERR_END_OF_FILE;
1024 }
1025
1026@@ -639,7 +639,7 @@
1027
1028 if ((row = share->dict.get_row(dict_key, dict_klen, &rlen)) == NULL) {
1029 free(bt_key);
1030- table->status = STATUS_NOT_FOUND;
1031+ getTable()->status = STATUS_NOT_FOUND;
1032 return HA_ERR_KEY_NOT_FOUND;
1033 }
1034
1035@@ -794,14 +794,14 @@
1036 ha_statistic_increment(&system_status_var::ha_write_count);
1037
1038 /* Prepare Auto Increment field if one exists. */
1039- if (table->next_number_field && drizzle_row == table->getInsertRecord()) {
1040+ if (getTable()->next_number_field && drizzle_row == getTable()->getInsertRecord()) {
1041 pthread_mutex_lock(&blitz_utility_mutex);
1042 if ((rv = update_auto_increment()) != 0) {
1043 pthread_mutex_unlock(&blitz_utility_mutex);
1044 return rv;
1045 }
1046
1047- uint64_t next_val = table->next_number_field->val_int();
1048+ uint64_t next_val = getTable()->next_number_field->val_int();
1049
1050 if (next_val > share->auto_increment_value) {
1051 share->auto_increment_value = next_val;
1052@@ -932,7 +932,7 @@
1053 /* Now write the new key. */
1054 prefix_len = make_index_key(key_buffer, i, new_row);
1055
1056- if (i == table->getShare()->getPrimaryKey()) {
1057+ if (i == getTable()->getShare()->getPrimaryKey()) {
1058 key = merge_key(key_buffer, prefix_len, key_buffer, prefix_len, &klen);
1059 rv = share->btrees[i].write(key, klen);
1060 } else {
1061@@ -959,13 +959,13 @@
1062 if (table_based) {
1063 rv = share->dict.write_row(held_key, held_key_len, row_buf, row_len);
1064 } else {
1065- int klen = make_index_key(key_buffer, table->getShare()->getPrimaryKey(), old_row);
1066+ int klen = make_index_key(key_buffer, getTable()->getShare()->getPrimaryKey(), old_row);
1067
1068 /* Delete with the old key. */
1069 share->dict.delete_row(key_buffer, klen);
1070
1071 /* Write with the new key. */
1072- klen = make_index_key(key_buffer, table->getShare()->getPrimaryKey(), new_row);
1073+ klen = make_index_key(key_buffer, getTable()->getShare()->getPrimaryKey(), new_row);
1074 rv = share->dict.write_row(key_buffer, klen, row_buf, row_len);
1075 }
1076
1077@@ -1057,12 +1057,12 @@
1078 }
1079
1080 uint32_t ha_blitz::max_row_length(void) {
1081- uint32_t length = (table->getRecordLength() + table->sizeFields() * 2);
1082- uint32_t *pos = table->getBlobField();
1083- uint32_t *end = pos + table->sizeBlobFields();
1084+ uint32_t length = (getTable()->getRecordLength() + getTable()->sizeFields() * 2);
1085+ uint32_t *pos = getTable()->getBlobField();
1086+ uint32_t *end = pos + getTable()->sizeBlobFields();
1087
1088 while (pos != end) {
1089- length += 2 + ((Field_blob *)table->getField(*pos))->get_length();
1090+ length += 2 + ((Field_blob *)getTable()->getField(*pos))->get_length();
1091 pos++;
1092 }
1093
1094@@ -1079,12 +1079,12 @@
1095 /* Getting here means that there is a PK in this table. Get the
1096 binary representation of the PK, pack it to BlitzDB's key buffer
1097 and return the size of it. */
1098- return make_index_key(pack_to, table->getShare()->getPrimaryKey(), row);
1099+ return make_index_key(pack_to, getTable()->getShare()->getPrimaryKey(), row);
1100 }
1101
1102 size_t ha_blitz::make_index_key(char *pack_to, int key_num,
1103 const unsigned char *row) {
1104- KeyInfo *key = &table->key_info[key_num];
1105+ KeyInfo *key = &getTable()->key_info[key_num];
1106 KeyPartInfo *key_part = key->key_part;
1107 KeyPartInfo *key_part_end = key_part + key->key_parts;
1108
1109@@ -1145,7 +1145,7 @@
1110 }
1111
1112 size_t ha_blitz::btree_key_length(const char *key, const int key_num) {
1113- KeyInfo *key_info = &table->key_info[key_num];
1114+ KeyInfo *key_info = &getTable()->key_info[key_num];
1115 KeyPartInfo *key_part = key_info->key_part;
1116 KeyPartInfo *key_part_end = key_part + key_info->key_parts;
1117 char *pos = (char *)key;
1118@@ -1185,7 +1185,7 @@
1119 /* Converts a native Drizzle index key to BlitzDB's format. */
1120 char *ha_blitz::native_to_blitz_key(const unsigned char *native_key,
1121 const int key_num, int *return_key_len) {
1122- KeyInfo *key = &table->key_info[key_num];
1123+ KeyInfo *key = &getTable()->key_info[key_num];
1124 KeyPartInfo *key_part = key->key_part;
1125 KeyPartInfo *key_part_end = key_part + key->key_parts;
1126
1127@@ -1240,16 +1240,16 @@
1128
1129 /* Nothing special to do if the table is fixed length */
1130 if (share->fixed_length_table) {
1131- memcpy(row_buffer, row_to_pack, table->getShare()->getRecordLength());
1132- return (size_t)table->getShare()->getRecordLength();
1133+ memcpy(row_buffer, row_to_pack, getTable()->getShare()->getRecordLength());
1134+ return (size_t)getTable()->getShare()->getRecordLength();
1135 }
1136
1137 /* Copy NULL bits */
1138- memcpy(row_buffer, row_to_pack, table->getShare()->null_bytes);
1139- pos = row_buffer + table->getShare()->null_bytes;
1140+ memcpy(row_buffer, row_to_pack, getTable()->getShare()->null_bytes);
1141+ pos = row_buffer + getTable()->getShare()->null_bytes;
1142
1143 /* Pack each field into the buffer */
1144- for (Field **field = table->getFields(); *field; field++) {
1145+ for (Field **field = getTable()->getFields(); *field; field++) {
1146 if (!((*field)->is_null()))
1147 pos = (*field)->pack(pos, row_to_pack + (*field)->offset(row_to_pack));
1148 }
1149@@ -1270,13 +1270,13 @@
1150 /* Start by copying NULL bits which is the beginning block
1151 of a Drizzle row. */
1152 pos = (const unsigned char *)from;
1153- memcpy(to, pos, table->getShare()->null_bytes);
1154- pos += table->getShare()->null_bytes;
1155+ memcpy(to, pos, getTable()->getShare()->null_bytes);
1156+ pos += getTable()->getShare()->null_bytes;
1157
1158 /* Unpack all fields in the provided row. */
1159- for (Field **field = table->getFields(); *field; field++) {
1160+ for (Field **field = getTable()->getFields(); *field; field++) {
1161 if (!((*field)->is_null())) {
1162- pos = (*field)->unpack(to + (*field)->offset(table->getInsertRecord()), pos);
1163+ pos = (*field)->unpack(to + (*field)->offset(getTable()->getInsertRecord()), pos);
1164 }
1165 }
1166
1167@@ -1308,7 +1308,7 @@
1168
1169 BlitzShare *ha_blitz::get_share(const char *name) {
1170 BlitzShare *share_ptr;
1171- BlitzEngine *bz_engine = (BlitzEngine *)engine;
1172+ BlitzEngine *bz_engine = (BlitzEngine *)getEngine();
1173 std::string table_path(name);
1174
1175 pthread_mutex_lock(&blitz_utility_mutex);
1176@@ -1333,14 +1333,14 @@
1177 }
1178
1179 /* Prepare Index Structure(s) */
1180- KeyInfo *curr = &table->getMutableShare()->getKeyInfo(0);
1181- share_ptr->btrees = new BlitzTree[table->getShare()->keys];
1182+ KeyInfo *curr = &getTable()->getMutableShare()->getKeyInfo(0);
1183+ share_ptr->btrees = new BlitzTree[getTable()->getShare()->keys];
1184
1185- for (uint32_t i = 0; i < table->getShare()->keys; i++, curr++) {
1186+ for (uint32_t i = 0; i < getTable()->getShare()->keys; i++, curr++) {
1187 share_ptr->btrees[i].open(table_path.c_str(), i, BDBOWRITER);
1188 share_ptr->btrees[i].parts = new BlitzKeyPart[curr->key_parts];
1189
1190- if (table->key_info[i].flags & HA_NOSAME)
1191+ if (getTable()->key_info[i].flags & HA_NOSAME)
1192 share_ptr->btrees[i].unique = true;
1193
1194 share_ptr->btrees[i].length = curr->key_length;
1195@@ -1353,7 +1353,7 @@
1196 if (f->null_ptr) {
1197 share_ptr->btrees[i].parts[j].null_bitmask = f->null_bit;
1198 share_ptr->btrees[i].parts[j].null_pos
1199- = (uint32_t)(f->null_ptr - (unsigned char *)table->getInsertRecord());
1200+ = (uint32_t)(f->null_ptr - (unsigned char *)getTable()->getInsertRecord());
1201 }
1202
1203 share_ptr->btrees[i].parts[j].flag = curr->key_part[j].key_part_flag;
1204@@ -1371,13 +1371,13 @@
1205 /* Set Meta Data */
1206 share_ptr->auto_increment_value = share_ptr->dict.read_meta_autoinc();
1207 share_ptr->table_name = table_path;
1208- share_ptr->nkeys = table->getShare()->keys;
1209+ share_ptr->nkeys = getTable()->getShare()->keys;
1210 share_ptr->use_count = 1;
1211
1212- share_ptr->fixed_length_table = !(table->getShare()->db_create_options
1213+ share_ptr->fixed_length_table = !(getTable()->getShare()->db_create_options
1214 & HA_OPTION_PACK_RECORD);
1215
1216- if (table->getShare()->getPrimaryKey() >= MAX_KEY)
1217+ if (getTable()->getShare()->getPrimaryKey() >= MAX_KEY)
1218 share_ptr->primary_key_exists = false;
1219 else
1220 share_ptr->primary_key_exists = true;
1221@@ -1408,7 +1408,7 @@
1222 share->btrees[i].close();
1223 }
1224
1225- BlitzEngine *bz_engine = (BlitzEngine *)engine;
1226+ BlitzEngine *bz_engine = (BlitzEngine *)getEngine();
1227 bz_engine->deleteTableShare(share->table_name);
1228
1229 delete[] share->btrees;
1230
1231=== modified file 'plugin/blitzdb/ha_blitz.h'
1232--- plugin/blitzdb/ha_blitz.h 2010-07-08 10:20:38 +0000
1233+++ plugin/blitzdb/ha_blitz.h 2010-10-23 00:16:13 +0000
1234@@ -301,7 +301,7 @@
1235
1236 public:
1237 ha_blitz(drizzled::plugin::StorageEngine &engine_arg,
1238- drizzled::TableShare &table_arg);
1239+ drizzled::Table &table_arg);
1240 ~ha_blitz() {}
1241
1242 /* TABLE CONTROL RELATED FUNCTIONS */
1243
1244=== modified file 'plugin/csv/ha_tina.cc'
1245--- plugin/csv/ha_tina.cc 2010-10-10 08:28:30 +0000
1246+++ plugin/csv/ha_tina.cc 2010-10-23 00:16:13 +0000
1247@@ -112,7 +112,7 @@
1248 pthread_mutex_destroy(&tina_mutex);
1249 }
1250
1251- virtual Cursor *create(TableShare &table)
1252+ virtual Cursor *create(Table &table)
1253 {
1254 return new ha_tina(*this, table);
1255 }
1256@@ -279,7 +279,7 @@
1257 {
1258 pthread_mutex_lock(&tina_mutex);
1259
1260- Tina *a_tina= static_cast<Tina *>(engine);
1261+ Tina *a_tina= static_cast<Tina *>(getEngine());
1262 share= a_tina->findOpenTable(table_name);
1263
1264 std::string meta_file_name;
1265@@ -481,7 +481,7 @@
1266 share->tina_write_opened= false;
1267 }
1268
1269- Tina *a_tina= static_cast<Tina *>(engine);
1270+ Tina *a_tina= static_cast<Tina *>(getEngine());
1271 a_tina->deleteOpenTable(share->table_name);
1272 delete share;
1273 }
1274@@ -530,7 +530,7 @@
1275
1276
1277
1278-ha_tina::ha_tina(drizzled::plugin::StorageEngine &engine_arg, TableShare &table_arg)
1279+ha_tina::ha_tina(drizzled::plugin::StorageEngine &engine_arg, Table &table_arg)
1280 :Cursor(engine_arg, table_arg),
1281 /*
1282 These definitions are found in Cursor.h
1283@@ -557,7 +557,7 @@
1284
1285 buffer.length(0);
1286
1287- for (Field **field= table->getFields() ; *field ; field++)
1288+ for (Field **field= getTable()->getFields() ; *field ; field++)
1289 {
1290 const char *ptr;
1291 const char *end_ptr;
1292@@ -675,9 +675,9 @@
1293
1294 error= HA_ERR_CRASHED_ON_USAGE;
1295
1296- memset(buf, 0, table->getShare()->null_bytes);
1297+ memset(buf, 0, getTable()->getShare()->null_bytes);
1298
1299- for (Field **field=table->getFields() ; *field ; field++)
1300+ for (Field **field= getTable()->getFields() ; *field ; field++)
1301 {
1302 char curr_char;
1303
1304
1305=== modified file 'plugin/csv/ha_tina.h'
1306--- plugin/csv/ha_tina.h 2010-10-02 21:15:42 +0000
1307+++ plugin/csv/ha_tina.h 2010-10-23 00:16:13 +0000
1308@@ -88,7 +88,7 @@
1309 int init_data_file();
1310
1311 public:
1312- ha_tina(drizzled::plugin::StorageEngine &engine, drizzled::TableShare &table_arg);
1313+ ha_tina(drizzled::plugin::StorageEngine &engine, drizzled::Table &table_arg);
1314 ~ha_tina()
1315 {
1316 if (file_buff)
1317
1318=== modified file 'plugin/filesystem_engine/filesystem_engine.cc'
1319--- plugin/filesystem_engine/filesystem_engine.cc 2010-09-25 22:17:18 +0000
1320+++ plugin/filesystem_engine/filesystem_engine.cc 2010-10-23 00:16:13 +0000
1321@@ -74,7 +74,7 @@
1322 pthread_mutex_destroy(&filesystem_mutex);
1323 }
1324
1325- virtual Cursor *create(TableShare &table)
1326+ virtual Cursor *create(Table &table)
1327 {
1328 return new FilesystemCursor(*this, table);
1329 }
1330@@ -369,7 +369,7 @@
1331 {
1332 Guard g(filesystem_mutex);
1333
1334- FilesystemEngine *a_engine= static_cast<FilesystemEngine *>(engine);
1335+ FilesystemEngine *a_engine= static_cast<FilesystemEngine *>(getEngine());
1336 share= a_engine->findOpenTable(table_name);
1337
1338 /*
1339@@ -384,7 +384,7 @@
1340 return NULL;
1341 }
1342
1343- share->format.parseFromTable(table->getShare()->getTableProto());
1344+ share->format.parseFromTable(getTable()->getShare()->getTableProto());
1345 if (!share->format.isFileGiven())
1346 {
1347 return NULL;
1348@@ -414,7 +414,7 @@
1349 Guard g(filesystem_mutex);
1350
1351 if (!--share->use_count){
1352- FilesystemEngine *a_engine= static_cast<FilesystemEngine *>(engine);
1353+ FilesystemEngine *a_engine= static_cast<FilesystemEngine *>(getEngine());
1354 a_engine->deleteOpenTable(share->table_name);
1355 pthread_mutex_destroy(&share->mutex);
1356 delete share;
1357@@ -453,7 +453,7 @@
1358 thread_locked = false;
1359 }
1360
1361-FilesystemCursor::FilesystemCursor(drizzled::plugin::StorageEngine &engine_arg, TableShare &table_arg)
1362+FilesystemCursor::FilesystemCursor(drizzled::plugin::StorageEngine &engine_arg, Table &table_arg)
1363 : Cursor(engine_arg, table_arg),
1364 file_buff(new TransparentFile),
1365 thread_locked(false)
1366@@ -487,7 +487,7 @@
1367
1368 int FilesystemCursor::doStartTableScan(bool)
1369 {
1370- sql_command_type = session_sql_command(table->getSession());
1371+ sql_command_type = session_sql_command(getTable()->getSession());
1372
1373 if (thread_locked)
1374 critical_section_exit();
1375@@ -515,14 +515,14 @@
1376
1377 int FilesystemCursor::find_current_row(unsigned char *buf)
1378 {
1379- ptrdiff_t row_offset= buf - table->record[0];
1380+ ptrdiff_t row_offset= buf - getTable()->record[0];
1381
1382 next_position= current_position;
1383
1384 string content;
1385 bool line_done= false;
1386 bool line_blank= true;
1387- Field **field= table->getFields();
1388+ Field **field= getTable()->getFields();
1389 for (; !line_done && *field; ++next_position)
1390 {
1391 char ch= file_buff->get_value(next_position);
1392@@ -616,8 +616,8 @@
1393 if (tag_depth >= share->vm.size())
1394 return HA_ERR_END_OF_FILE;
1395
1396- ptrdiff_t row_offset= buf - table->record[0];
1397- for (Field **field= table->getFields(); *field; field++)
1398+ ptrdiff_t row_offset= buf - getTable()->record[0];
1399+ for (Field **field= getTable()->getFields(); *field; field++)
1400 {
1401 string key((*field)->field_name);
1402 string content= share->vm[tag_depth][key];
1403@@ -696,7 +696,7 @@
1404
1405 int FilesystemCursor::doEndTableScan()
1406 {
1407- sql_command_type = session_sql_command(table->getSession());
1408+ sql_command_type = session_sql_command(getTable()->getSession());
1409
1410 if (share->format.isTagFormat())
1411 {
1412@@ -784,7 +784,7 @@
1413 {
1414 bool first= true;
1415 drizzled::String attribute;
1416- for (Field **field= table->getFields(); *field; ++field)
1417+ for (Field **field= getTable()->getFields(); *field; ++field)
1418 {
1419 if (first == true)
1420 {
1421@@ -817,7 +817,7 @@
1422 if (share->format.isTagFormat())
1423 return 0;
1424
1425- sql_command_type = session_sql_command(table->getSession());
1426+ sql_command_type = session_sql_command(getTable()->getSession());
1427
1428 critical_section_enter();
1429
1430
1431=== modified file 'plugin/filesystem_engine/filesystem_engine.h'
1432--- plugin/filesystem_engine/filesystem_engine.h 2010-07-30 12:39:39 +0000
1433+++ plugin/filesystem_engine/filesystem_engine.h 2010-10-23 00:16:13 +0000
1434@@ -67,7 +67,7 @@
1435 std::vector< std::pair<off_t, off_t> > slots;
1436
1437 public:
1438- FilesystemCursor(drizzled::plugin::StorageEngine &engine, drizzled::TableShare &table_arg);
1439+ FilesystemCursor(drizzled::plugin::StorageEngine &engine, drizzled::Table &table_arg);
1440 ~FilesystemCursor()
1441 {
1442 }
1443
1444=== modified file 'plugin/function_engine/cursor.cc'
1445--- plugin/function_engine/cursor.cc 2010-07-29 00:35:25 +0000
1446+++ plugin/function_engine/cursor.cc 2010-10-23 00:16:13 +0000
1447@@ -37,7 +37,7 @@
1448 *****************************************************************************/
1449
1450 FunctionCursor::FunctionCursor(plugin::StorageEngine &engine_arg,
1451- TableShare &table_arg) :
1452+ Table &table_arg) :
1453 Cursor(engine_arg, table_arg),
1454 estimate_of_rows(100), // Completely fabricated, I used to use the value 2.
1455 rows_returned(0)
1456@@ -45,7 +45,7 @@
1457
1458 int FunctionCursor::open(const char *name, int, uint32_t)
1459 {
1460- tool= static_cast<Function *>(engine)->getFunction(name);
1461+ tool= static_cast<Function *>(getEngine())->getFunction(name);
1462 // assert(tool);
1463
1464 record_id= 0;
1465@@ -67,7 +67,7 @@
1466 int FunctionCursor::doStartTableScan(bool)
1467 {
1468 rows_returned= 0;
1469- generator= tool->generator(table->getFields());
1470+ generator= tool->generator(getTable()->getFields());
1471
1472 return 0;
1473 }
1474@@ -79,12 +79,12 @@
1475 ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
1476
1477 /* Fix bug in the debug logic for field */
1478- for (Field **field= table->getFields() ; *field ; field++)
1479+ for (Field **field= getTable()->getFields() ; *field ; field++)
1480 {
1481 (*field)->setWriteSet();
1482 }
1483
1484- more_rows= generator->sub_populate(table->getShare()->sizeFields());
1485+ more_rows= generator->sub_populate(getTable()->getShare()->sizeFields());
1486
1487 if (more_rows)
1488 {
1489@@ -102,11 +102,11 @@
1490
1491 void FunctionCursor::position(const unsigned char *record)
1492 {
1493- if (row_cache.size() <= record_id * table->getShare()->getRecordLength())
1494+ if (row_cache.size() <= record_id * getTable()->getShare()->getRecordLength())
1495 {
1496- row_cache.resize(row_cache.size() + table->getShare()->getRecordLength() * 100); // Hardwired at adding an additional 100 rows of storage
1497+ row_cache.resize(row_cache.size() + getTable()->getShare()->getRecordLength() * 100); // Hardwired at adding an additional 100 rows of storage
1498 }
1499- memcpy(&row_cache[record_id * table->getShare()->getRecordLength()], record, table->getShare()->getRecordLength());
1500+ memcpy(&row_cache[record_id * getTable()->getShare()->getRecordLength()], record, getTable()->getShare()->getRecordLength());
1501 internal::my_store_ptr(ref, ref_length, record_id);
1502 record_id++;
1503 }
1504@@ -151,8 +151,8 @@
1505 ha_statistic_increment(&system_status_var::ha_read_rnd_count);
1506 size_t position_id= (size_t)internal::my_get_ptr(pos, ref_length);
1507
1508- assert(position_id * table->getShare()->getRecordLength() < row_cache.size());
1509- memcpy(buf, &row_cache[position_id * table->getShare()->getRecordLength()], table->getShare()->getRecordLength());
1510+ assert(position_id * getTable()->getShare()->getRecordLength() < row_cache.size());
1511+ memcpy(buf, &row_cache[position_id * getTable()->getShare()->getRecordLength()], getTable()->getShare()->getRecordLength());
1512
1513 return 0;
1514 }
1515
1516=== modified file 'plugin/function_engine/cursor.h'
1517--- plugin/function_engine/cursor.h 2010-05-20 17:33:40 +0000
1518+++ plugin/function_engine/cursor.h 2010-10-23 00:16:13 +0000
1519@@ -39,7 +39,7 @@
1520
1521 public:
1522 FunctionCursor(drizzled::plugin::StorageEngine &engine,
1523- drizzled::TableShare &table_arg);
1524+ drizzled::Table &table_arg);
1525 ~FunctionCursor() {}
1526
1527 int open(const char *name, int mode, uint32_t test_if_locked);
1528
1529=== modified file 'plugin/function_engine/function.cc'
1530--- plugin/function_engine/function.cc 2010-10-10 11:41:42 +0000
1531+++ plugin/function_engine/function.cc 2010-10-23 00:16:13 +0000
1532@@ -49,7 +49,7 @@
1533 }
1534
1535
1536-Cursor *Function::create(TableShare &table)
1537+Cursor *Function::create(Table &table)
1538 {
1539 return new FunctionCursor(*this, table);
1540 }
1541
1542=== modified file 'plugin/function_engine/function.h'
1543--- plugin/function_engine/function.h 2010-10-09 01:10:07 +0000
1544+++ plugin/function_engine/function.h 2010-10-23 00:16:13 +0000
1545@@ -59,7 +59,7 @@
1546 return EPERM;
1547 }
1548
1549- virtual drizzled::Cursor *create(drizzled::TableShare &table);
1550+ virtual drizzled::Cursor *create(drizzled::Table &table);
1551
1552 const char **bas_ext() const
1553 {
1554
1555=== modified file 'plugin/haildb/haildb_engine.cc'
1556--- plugin/haildb/haildb_engine.cc 2010-10-12 14:11:44 +0000
1557+++ plugin/haildb/haildb_engine.cc 2010-10-23 00:16:13 +0000
1558@@ -150,7 +150,7 @@
1559
1560 ~HailDBEngine();
1561
1562- virtual Cursor *create(TableShare &table)
1563+ virtual Cursor *create(Table &table)
1564 {
1565 return new HailDBCursor(*this, table);
1566 }
1567@@ -474,7 +474,7 @@
1568 {
1569 uint64_t nr;
1570 ib_err_t err;
1571- ib_trx_t transaction= *get_trx(table->in_use);
1572+ ib_trx_t transaction= *get_trx(getTable()->in_use);
1573 ib_cursor_attach_trx(cursor, transaction);
1574 tuple= ib_clust_read_tuple_create(cursor);
1575 err= ib_cursor_last(cursor);
1576@@ -485,7 +485,7 @@
1577 else
1578 {
1579 assert (err == DB_SUCCESS);
1580- err= ib_tuple_read_u64(tuple, table->getShare()->fields, &nr);
1581+ err= ib_tuple_read_u64(tuple, getTable()->getShare()->fields, &nr);
1582 nr++;
1583 }
1584 ib_tuple_delete(tuple);
1585@@ -500,33 +500,33 @@
1586 int error;
1587
1588 (void) extra(HA_EXTRA_KEYREAD);
1589- table->mark_columns_used_by_index_no_reset(table->getShare()->next_number_index);
1590- doStartIndexScan(table->getShare()->next_number_index, 1);
1591- if (table->getShare()->next_number_keypart == 0)
1592+ getTable()->mark_columns_used_by_index_no_reset(getTable()->getShare()->next_number_index);
1593+ doStartIndexScan(getTable()->getShare()->next_number_index, 1);
1594+ if (getTable()->getShare()->next_number_keypart == 0)
1595 { // Autoincrement at key-start
1596- error=index_last(table->getUpdateRecord());
1597+ error=index_last(getTable()->getUpdateRecord());
1598 }
1599 else
1600 {
1601 unsigned char key[MAX_KEY_LENGTH];
1602- key_copy(key, table->getInsertRecord(),
1603- table->key_info + table->getShare()->next_number_index,
1604- table->getShare()->next_number_key_offset);
1605- error= index_read_map(table->getUpdateRecord(), key,
1606- make_prev_keypart_map(table->getShare()->next_number_keypart),
1607+ key_copy(key, getTable()->getInsertRecord(),
1608+ getTable()->key_info + getTable()->getShare()->next_number_index,
1609+ getTable()->getShare()->next_number_key_offset);
1610+ error= index_read_map(getTable()->getUpdateRecord(), key,
1611+ make_prev_keypart_map(getTable()->getShare()->next_number_keypart),
1612 HA_READ_PREFIX_LAST);
1613 }
1614
1615 if (error)
1616 nr=1;
1617 else
1618- nr= ((uint64_t) table->found_next_number_field->
1619- val_int_offset(table->getShare()->rec_buff_length)+1);
1620+ nr= ((uint64_t) getTable()->found_next_number_field->
1621+ val_int_offset(getTable()->getShare()->rec_buff_length)+1);
1622 doEndIndexScan();
1623 (void) extra(HA_EXTRA_NO_KEYREAD);
1624
1625- if (table->getShare()->getTableProto()->options().auto_increment_value() > nr)
1626- nr= table->getShare()->getTableProto()->options().auto_increment_value();
1627+ if (getTable()->getShare()->getTableProto()->options().auto_increment_value() > nr)
1628+ nr= getTable()->getShare()->getTableProto()->options().auto_increment_value();
1629
1630 return nr;
1631 }
1632@@ -559,7 +559,7 @@
1633 {
1634 pthread_mutex_lock(&haildb_mutex);
1635
1636- HailDBEngine *a_engine= static_cast<HailDBEngine *>(engine);
1637+ HailDBEngine *a_engine= static_cast<HailDBEngine *>(getEngine());
1638 share= a_engine->findOpenTable(table_name);
1639
1640 if (!share)
1641@@ -573,7 +573,7 @@
1642 return(NULL);
1643 }
1644
1645- if (table->found_next_number_field)
1646+ if (getTable()->found_next_number_field)
1647 {
1648 share->auto_increment_value.fetch_and_store(
1649 a_engine->getInitialAutoIncrementValue(this));
1650@@ -602,7 +602,7 @@
1651 pthread_mutex_lock(&haildb_mutex);
1652 if (!--share->use_count)
1653 {
1654- HailDBEngine *a_engine= static_cast<HailDBEngine *>(engine);
1655+ HailDBEngine *a_engine= static_cast<HailDBEngine *>(getEngine());
1656 a_engine->deleteOpenTable(share->table_name);
1657 delete share;
1658 }
1659@@ -796,7 +796,7 @@
1660 }
1661
1662 HailDBCursor::HailDBCursor(drizzled::plugin::StorageEngine &engine_arg,
1663- TableShare &table_arg)
1664+ Table &table_arg)
1665 :Cursor(engine_arg, table_arg),
1666 ib_lock_mode(IB_LOCK_NONE),
1667 write_can_replace(false),
1668@@ -836,14 +836,14 @@
1669 lock.init(&share->lock);
1670
1671
1672- if (table->getShare()->getPrimaryKey() != MAX_KEY)
1673- ref_length= table->key_info[table->getShare()->getPrimaryKey()].key_length;
1674+ if (getTable()->getShare()->getPrimaryKey() != MAX_KEY)
1675+ ref_length= getTable()->key_info[getTable()->getShare()->getPrimaryKey()].key_length;
1676 else if (share->has_hidden_primary_key)
1677 ref_length= sizeof(uint64_t);
1678 else
1679 {
1680- unsigned int keynr= get_first_unique_index(*table);
1681- ref_length= table->key_info[keynr].key_length;
1682+ unsigned int keynr= get_first_unique_index(*getTable());
1683+ ref_length= getTable()->key_info[keynr].key_length;
1684 }
1685
1686 in_table_scan= false;
1687@@ -1872,7 +1872,7 @@
1688 ib_err_t err;
1689 int ret= 0;
1690
1691- ib_trx_t transaction= *get_trx(table->in_use);
1692+ ib_trx_t transaction= *get_trx(getTable()->in_use);
1693
1694 tuple= ib_clust_read_tuple_create(cursor);
1695
1696@@ -1894,11 +1894,11 @@
1697 * yuck.
1698 */
1699
1700- HailDBEngine *storage_engine= static_cast<HailDBEngine*>(engine);
1701+ HailDBEngine *storage_engine= static_cast<HailDBEngine*>(getEngine());
1702 err= ib_cursor_reset(cursor);
1703 storage_engine->doCommit(current_session, true);
1704 storage_engine->doStartTransaction(current_session, START_TRANS_NO_OPTIONS);
1705- transaction= *get_trx(table->in_use);
1706+ transaction= *get_trx(getTable()->in_use);
1707 assert(err == DB_SUCCESS);
1708 ib_cursor_attach_trx(cursor, transaction);
1709 err= ib_cursor_first(cursor);
1710@@ -1907,13 +1907,13 @@
1711 assert(err == DB_SUCCESS || err == DB_END_OF_INDEX);
1712
1713
1714- if (table->next_number_field)
1715+ if (getTable()->next_number_field)
1716 {
1717 update_auto_increment();
1718
1719- uint64_t temp_auto= table->next_number_field->val_int();
1720+ uint64_t temp_auto= getTable()->next_number_field->val_int();
1721
1722- if (temp_auto <= innobase_get_int_col_max_value(table->next_number_field))
1723+ if (temp_auto <= innobase_get_int_col_max_value(getTable()->next_number_field))
1724 {
1725 while (true)
1726 {
1727@@ -1935,11 +1935,11 @@
1728
1729 }
1730
1731- write_row_to_haildb_tuple(table->getFields(), tuple);
1732+ write_row_to_haildb_tuple(getTable()->getFields(), tuple);
1733
1734 if (share->has_hidden_primary_key)
1735 {
1736- err= ib_tuple_write_u64(tuple, table->getShare()->fields, share->hidden_pkey_auto_increment_value.fetch_and_increment());
1737+ err= ib_tuple_write_u64(tuple, getTable()->getShare()->fields, share->hidden_pkey_auto_increment_value.fetch_and_increment());
1738 }
1739
1740 err= ib_cursor_insert_row(cursor, tuple);
1741@@ -1948,13 +1948,13 @@
1742 {
1743 if (write_can_replace)
1744 {
1745- store_key_value_from_haildb(table->key_info + table->getShare()->getPrimaryKey(),
1746+ store_key_value_from_haildb(getTable()->key_info + getTable()->getShare()->getPrimaryKey(),
1747 ref, ref_length, record);
1748
1749 ib_tpl_t search_tuple= ib_clust_search_tuple_create(cursor);
1750
1751 fill_ib_search_tpl_from_drizzle_key(search_tuple,
1752- table->key_info + 0,
1753+ getTable()->key_info + 0,
1754 ref, ref_length);
1755
1756 int res;
1757@@ -1968,7 +1968,7 @@
1758 err= ib_cursor_first(cursor);
1759 assert(err == DB_SUCCESS || err == DB_END_OF_INDEX);
1760
1761- write_row_to_haildb_tuple(table->getFields(), tuple);
1762+ write_row_to_haildb_tuple(getTable()->getFields(), tuple);
1763
1764 err= ib_cursor_insert_row(cursor, tuple);
1765 assert(err==DB_SUCCESS); // probably be nice and process errors
1766@@ -1997,7 +1997,7 @@
1767 err= ib_tuple_copy(update_tuple, tuple);
1768 assert(err == DB_SUCCESS);
1769
1770- write_row_to_haildb_tuple(table->getFields(), update_tuple);
1771+ write_row_to_haildb_tuple(getTable()->getFields(), update_tuple);
1772
1773 err= ib_cursor_update_row(cursor, tuple, update_tuple);
1774
1775@@ -2031,7 +2031,7 @@
1776 so only support TRUNCATE and not DELETE FROM t;
1777 (this is what ha_haildb does)
1778 */
1779- if (session_sql_command(table->in_use) != SQLCOM_TRUNCATE)
1780+ if (session_sql_command(getTable()->in_use) != SQLCOM_TRUNCATE)
1781 return HA_ERR_WRONG_COMMAND;
1782
1783 ib_id_t id;
1784@@ -2046,7 +2046,7 @@
1785 {
1786 ib_err_t rollback_err= ib_trx_rollback(transaction);
1787
1788- push_warning_printf(table->in_use, DRIZZLE_ERROR::WARN_LEVEL_ERROR,
1789+ push_warning_printf(getTable()->in_use, DRIZZLE_ERROR::WARN_LEVEL_ERROR,
1790 ER_CANT_DELETE_FILE,
1791 _("Cannot Lock HailDB Data Dictionary. HailDB Error %d (%s)\n"),
1792 err, ib_strerror(err));
1793@@ -2087,7 +2087,7 @@
1794 doEndTableScan();
1795 in_table_scan= true;
1796
1797- transaction= *get_trx(table->in_use);
1798+ transaction= *get_trx(getTable()->in_use);
1799
1800 assert(transaction != NULL);
1801
1802@@ -2206,7 +2206,7 @@
1803 err= ib_cursor_next(cursor);
1804
1805 tuple= ib_tuple_clear(tuple);
1806- ret= read_row_from_haildb(buf, cursor, tuple, table,
1807+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1808 share->has_hidden_primary_key,
1809 &hidden_autoinc_pkey_position);
1810
1811@@ -2241,13 +2241,13 @@
1812 else
1813 {
1814 unsigned int keynr;
1815- if (table->getShare()->getPrimaryKey() != MAX_KEY)
1816- keynr= table->getShare()->getPrimaryKey();
1817+ if (getTable()->getShare()->getPrimaryKey() != MAX_KEY)
1818+ keynr= getTable()->getShare()->getPrimaryKey();
1819 else
1820- keynr= get_first_unique_index(*table);
1821+ keynr= get_first_unique_index(*getTable());
1822
1823 fill_ib_search_tpl_from_drizzle_key(search_tuple,
1824- table->key_info + keynr,
1825+ getTable()->key_info + keynr,
1826 pos, ref_length);
1827 }
1828
1829@@ -2263,7 +2263,7 @@
1830 tuple= ib_tuple_clear(tuple);
1831
1832 if (ret == 0)
1833- ret= read_row_from_haildb(buf, cursor, tuple, table,
1834+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1835 share->has_hidden_primary_key,
1836 &hidden_autoinc_pkey_position);
1837
1838@@ -2334,12 +2334,12 @@
1839 else
1840 {
1841 unsigned int keynr;
1842- if (table->getShare()->getPrimaryKey() != MAX_KEY)
1843- keynr= table->getShare()->getPrimaryKey();
1844+ if (getTable()->getShare()->getPrimaryKey() != MAX_KEY)
1845+ keynr= getTable()->getShare()->getPrimaryKey();
1846 else
1847- keynr= get_first_unique_index(*table);
1848+ keynr= get_first_unique_index(*getTable());
1849
1850- store_key_value_from_haildb(table->key_info + keynr,
1851+ store_key_value_from_haildb(getTable()->key_info + keynr,
1852 ref, ref_length, record);
1853 }
1854
1855@@ -2385,7 +2385,7 @@
1856
1857 int HailDBCursor::doStartIndexScan(uint32_t keynr, bool)
1858 {
1859- ib_trx_t transaction= *get_trx(table->in_use);
1860+ ib_trx_t transaction= *get_trx(getTable()->in_use);
1861
1862 active_index= keynr;
1863
1864@@ -2399,8 +2399,8 @@
1865 {
1866 ib_err_t err;
1867 ib_id_t index_id;
1868- err= ib_index_get_id(table_path_to_haildb_name(table_share->getPath()),
1869- table_share->getKeyInfo(keynr).name,
1870+ err= ib_index_get_id(table_path_to_haildb_name(getShare()->getPath()),
1871+ getShare()->getKeyInfo(keynr).name,
1872 &index_id);
1873 if (err != DB_SUCCESS)
1874 return -1;
1875@@ -2547,7 +2547,7 @@
1876 search_tuple= ib_sec_search_tuple_create(cursor);
1877
1878 fill_ib_search_tpl_from_drizzle_key(search_tuple,
1879- table->key_info + active_index,
1880+ getTable()->key_info + active_index,
1881 key_ptr, key_len);
1882
1883 err= ib_cursor_moveto(cursor, search_tuple, search_mode, &res);
1884@@ -2555,7 +2555,7 @@
1885
1886 if ((err == DB_RECORD_NOT_FOUND || err == DB_END_OF_INDEX))
1887 {
1888- table->status= STATUS_NOT_FOUND;
1889+ getTable()->status= STATUS_NOT_FOUND;
1890 return HA_ERR_KEY_NOT_FOUND;
1891 }
1892
1893@@ -2565,14 +2565,14 @@
1894 }
1895
1896 tuple= ib_tuple_clear(tuple);
1897- ret= read_row_from_haildb(buf, cursor, tuple, table,
1898+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1899 share->has_hidden_primary_key,
1900 &hidden_autoinc_pkey_position,
1901 (allocate_blobs)? &blobroot : NULL);
1902 if (ret == 0)
1903- table->status= 0;
1904+ getTable()->status= 0;
1905 else
1906- table->status= STATUS_NOT_FOUND;
1907+ getTable()->status= STATUS_NOT_FOUND;
1908
1909 advance_cursor= true;
1910
1911@@ -2594,8 +2594,8 @@
1912 /* works only with key prefixes */
1913 assert(((keypart_map_arg + 1) & keypart_map_arg) == 0);
1914
1915- KeyPartInfo *key_part_found= table->getShare()->getKeyInfo(key_position).key_part;
1916- KeyPartInfo *end_key_part_found= key_part_found + table->getShare()->getKeyInfo(key_position).key_parts;
1917+ KeyPartInfo *key_part_found= getTable()->getShare()->getKeyInfo(key_position).key_part;
1918+ KeyPartInfo *end_key_part_found= key_part_found + getTable()->getShare()->getKeyInfo(key_position).key_parts;
1919 uint32_t length= 0;
1920
1921 while (key_part_found < end_key_part_found && keypart_map_arg)
1922@@ -2654,7 +2654,7 @@
1923 }
1924
1925 tuple= ib_tuple_clear(tuple);
1926- ret= read_row_from_haildb(buf, cursor, tuple, table,
1927+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1928 share->has_hidden_primary_key,
1929 &hidden_autoinc_pkey_position);
1930
1931@@ -2687,7 +2687,7 @@
1932 }
1933
1934 tuple= ib_tuple_clear(tuple);
1935- ret= read_row_from_haildb(buf, cursor, tuple, table,
1936+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1937 share->has_hidden_primary_key,
1938 &hidden_autoinc_pkey_position);
1939
1940@@ -2707,7 +2707,7 @@
1941 return ib_err_t_to_drizzle_error(err);
1942
1943 tuple= ib_tuple_clear(tuple);
1944- ret= read_row_from_haildb(buf, cursor, tuple, table,
1945+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1946 share->has_hidden_primary_key,
1947 &hidden_autoinc_pkey_position);
1948
1949@@ -2727,7 +2727,7 @@
1950 return ib_err_t_to_drizzle_error(err);
1951
1952 tuple= ib_tuple_clear(tuple);
1953- ret= read_row_from_haildb(buf, cursor, tuple, table,
1954+ ret= read_row_from_haildb(buf, cursor, tuple, getTable(),
1955 share->has_hidden_primary_key,
1956 &hidden_autoinc_pkey_position);
1957 advance_cursor= true;
1958
1959=== modified file 'plugin/haildb/haildb_engine.h'
1960--- plugin/haildb/haildb_engine.h 2010-09-30 05:47:49 +0000
1961+++ plugin/haildb/haildb_engine.h 2010-10-23 00:16:13 +0000
1962@@ -39,7 +39,7 @@
1963 class HailDBCursor: public drizzled::Cursor
1964 {
1965 public:
1966- HailDBCursor(drizzled::plugin::StorageEngine &engine, drizzled::TableShare &table_arg);
1967+ HailDBCursor(drizzled::plugin::StorageEngine &engine, drizzled::Table &table_arg);
1968 ~HailDBCursor()
1969 {}
1970
1971
1972=== modified file 'plugin/innobase/handler/ha_innodb.cc'
1973--- plugin/innobase/handler/ha_innodb.cc 2010-10-14 12:36:24 +0000
1974+++ plugin/innobase/handler/ha_innodb.cc 2010-10-23 00:16:13 +0000
1975@@ -391,7 +391,7 @@
1976 /* out: 0 or error number */
1977 ::drizzled::XID *xid); /* in: X/Open XA transaction identification */
1978
1979- virtual Cursor *create(TableShare &table)
1980+ virtual Cursor *create(Table &table)
1981 {
1982 return new ha_innobase(*this, table);
1983 }
1984@@ -1612,7 +1612,7 @@
1985 Construct ha_innobase Cursor. */
1986 UNIV_INTERN
1987 ha_innobase::ha_innobase(plugin::StorageEngine &engine_arg,
1988- TableShare &table_arg)
1989+ Table &table_arg)
1990 :Cursor(engine_arg, table_arg),
1991 primary_key(0), /* needs initialization because index_flags() may be called
1992 before this is set to the real value. It's ok to have any
1993@@ -2897,8 +2897,8 @@
1994 const char* col_name;
1995 ulint error;
1996
1997- col_name = table->found_next_number_field->field_name;
1998- index = innobase_get_index(table->getShare()->next_number_index);
1999+ col_name = getTable()->found_next_number_field->field_name;
2000+ index = innobase_get_index(getTable()->getShare()->next_number_index);
2001
2002 /* Execute SELECT MAX(col_name) FROM TABLE; */
2003 error = row_search_max_autoinc(index, col_name, &auto_inc);
2004@@ -2956,7 +2956,7 @@
2005 UT_NOT_USED(mode);
2006 UT_NOT_USED(test_if_locked);
2007
2008- session= table->in_use;
2009+ session= getTable()->in_use;
2010
2011 /* Under some cases Drizzle seems to call this function while
2012 holding btr_search_latch. This breaks the latching order as
2013@@ -2980,8 +2980,8 @@
2014 stored the string length as the first byte. */
2015
2016 upd_and_key_val_buff_len =
2017- table->getShare()->stored_rec_length
2018- + table->getShare()->max_key_length
2019+ getTable()->getShare()->stored_rec_length
2020+ + getTable()->getShare()->max_key_length
2021 + MAX_REF_PARTS * 3;
2022
2023 upd_buff.resize(upd_and_key_val_buff_len);
2024@@ -3044,13 +3044,13 @@
2025
2026 prebuilt = row_create_prebuilt(ib_table);
2027
2028- prebuilt->mysql_row_len = table->getShare()->stored_rec_length;
2029- prebuilt->default_rec = table->getDefaultValues();
2030+ prebuilt->mysql_row_len = getTable()->getShare()->stored_rec_length;
2031+ prebuilt->default_rec = getTable()->getDefaultValues();
2032 ut_ad(prebuilt->default_rec);
2033
2034 /* Looks like MySQL-3.23 sometimes has primary key number != 0 */
2035
2036- primary_key = table->getShare()->getPrimaryKey();
2037+ primary_key = getTable()->getShare()->getPrimaryKey();
2038 key_used_on_scan = primary_key;
2039
2040 /* Allocate a buffer for a 'row reference'. A row reference is
2041@@ -3073,7 +3073,7 @@
2042 save space, because all row reference buffers are allocated
2043 based on ref_length. */
2044
2045- ref_length = table->key_info[primary_key].key_length;
2046+ ref_length = getTable()->key_info[primary_key].key_length;
2047 } else {
2048 if (primary_key != MAX_KEY) {
2049 errmsg_printf(ERRMSG_LVL_ERROR, "Table %s has no primary key in InnoDB data "
2050@@ -3125,7 +3125,7 @@
2051 info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
2052
2053 /* Only if the table has an AUTOINC column. */
2054- if (prebuilt->table != NULL && table->found_next_number_field != NULL) {
2055+ if (prebuilt->table != NULL && getTable()->found_next_number_field != NULL) {
2056 ulint error;
2057
2058 dict_table_autoinc_lock(prebuilt->table);
2059@@ -3163,7 +3163,7 @@
2060 {
2061 Session* session;
2062
2063- session= table->in_use;
2064+ session= getTable()->in_use;
2065 if (session != NULL) {
2066 getTransactionalEngine()->releaseTemporaryLatches(session);
2067 }
2068@@ -3444,7 +3444,7 @@
2069 uint buff_len,/*!< in: buffer length */
2070 const unsigned char* record)/*!< in: row in MySQL format */
2071 {
2072- KeyInfo* key_info = &table->key_info[keynr];
2073+ KeyInfo* key_info = &getTable()->key_info[keynr];
2074 KeyPartInfo* key_part = key_info->key_part;
2075 KeyPartInfo* end = key_part + key_info->key_parts;
2076 char* buff_start = buff;
2077@@ -3520,7 +3520,7 @@
2078
2079 data = row_mysql_read_true_varchar(&len,
2080 (byte*) (record
2081- + (ulint)get_field_offset(table, field)),
2082+ + (ulint)get_field_offset(getTable(), field)),
2083 lenlen);
2084
2085 true_len = len;
2086@@ -3583,12 +3583,12 @@
2087
2088 blob_data = row_mysql_read_blob_ref(&blob_len,
2089 (byte*) (record
2090- + (ulint)get_field_offset(table, field)),
2091+ + (ulint)get_field_offset(getTable(), field)),
2092 (ulint) field->pack_length());
2093
2094 true_len = blob_len;
2095
2096- ut_a(get_field_offset(table, field)
2097+ ut_a(get_field_offset(getTable(), field)
2098 == key_part->offset);
2099
2100 /* For multi byte character sets we need to calculate
2101@@ -4079,7 +4079,7 @@
2102 num_write_row++;
2103
2104 /* This is the case where the table has an auto-increment column */
2105- if (table->next_number_field && record == table->getInsertRecord()) {
2106+ if (getTable()->next_number_field && record == getTable()->getInsertRecord()) {
2107
2108 /* Reset the error code before calling
2109 innobase_get_auto_increment(). */
2110@@ -4108,8 +4108,7 @@
2111 /* Build the template used in converting quickly between
2112 the two database formats */
2113
2114- build_template(prebuilt, NULL, table,
2115- ROW_MYSQL_WHOLE_ROW);
2116+ build_template(prebuilt, NULL, getTable(), ROW_MYSQL_WHOLE_ROW);
2117 }
2118
2119 innodb_srv_conc_enter_innodb(prebuilt->trx);
2120@@ -4133,10 +4132,9 @@
2121 /* We need the upper limit of the col type to check for
2122 whether we update the table autoinc counter or not. */
2123 col_max_value = innobase_get_int_col_max_value(
2124- table->next_number_field);
2125-
2126+ getTable()->next_number_field);
2127 /* Get the value that MySQL attempted to store in the table.*/
2128- auto_inc = table->next_number_field->val_int();
2129+ auto_inc = getTable()->next_number_field->val_int();
2130
2131 switch (error) {
2132 case DB_DUPLICATE_KEY:
2133@@ -4386,7 +4384,7 @@
2134 /* Build an update vector from the modified fields in the rows
2135 (uses upd_buff of the handle) */
2136
2137- calc_row_difference(uvect, (unsigned char*) old_row, new_row, table,
2138+ calc_row_difference(uvect, (unsigned char*) old_row, new_row, getTable(),
2139 &upd_buff[0], (ulint)upd_and_key_val_buff_len,
2140 prebuilt, user_session);
2141
2142@@ -4395,17 +4393,17 @@
2143
2144 ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
2145
2146- if (table->found_next_number_field)
2147+ if (getTable()->found_next_number_field)
2148 {
2149 uint64_t auto_inc;
2150 uint64_t col_max_value;
2151
2152- auto_inc = table->found_next_number_field->val_int();
2153+ auto_inc = getTable()->found_next_number_field->val_int();
2154
2155 /* We need the upper limit of the col type to check for
2156 whether we update the table autoinc counter or not. */
2157 col_max_value = innobase_get_int_col_max_value(
2158- table->found_next_number_field);
2159+ getTable()->found_next_number_field);
2160
2161 uint64_t current_autoinc;
2162 ulint autoinc_error= innobase_get_autoinc(&current_autoinc);
2163@@ -4442,8 +4440,8 @@
2164 value used in the INSERT statement.*/
2165
2166 if (error == DB_SUCCESS
2167- && table->next_number_field
2168- && new_row == table->getInsertRecord()
2169+ && getTable()->next_number_field
2170+ && new_row == getTable()->getInsertRecord()
2171 && session_sql_command(user_session) == SQLCOM_INSERT
2172 && (trx->duplicates & (TRX_DUP_IGNORE | TRX_DUP_REPLACE))
2173 == TRX_DUP_IGNORE) {
2174@@ -4451,12 +4449,12 @@
2175 uint64_t auto_inc;
2176 uint64_t col_max_value;
2177
2178- auto_inc = table->next_number_field->val_int();
2179+ auto_inc = getTable()->next_number_field->val_int();
2180
2181 /* We need the upper limit of the col type to check for
2182 whether we update the table autoinc counter or not. */
2183 col_max_value = innobase_get_int_col_max_value(
2184- table->next_number_field);
2185+ getTable()->next_number_field);
2186
2187 if (auto_inc <= col_max_value && auto_inc != 0) {
2188
2189@@ -4586,7 +4584,7 @@
2190 ha_innobase::try_semi_consistent_read(bool yes)
2191 /*===========================================*/
2192 {
2193- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2194+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2195
2196 /* Row read type is set to semi consistent read if this was
2197 requested by the MySQL and either innodb_locks_unsafe_for_binlog
2198@@ -4786,7 +4784,7 @@
2199 necessarily prebuilt->index, but can also be the clustered index */
2200
2201 if (prebuilt->sql_stat_start) {
2202- build_template(prebuilt, user_session, table,
2203+ build_template(prebuilt, user_session, getTable(),
2204 ROW_MYSQL_REC_FIELDS);
2205 }
2206
2207@@ -4841,21 +4839,21 @@
2208 switch (ret) {
2209 case DB_SUCCESS:
2210 error = 0;
2211- table->status = 0;
2212+ getTable()->status = 0;
2213 break;
2214 case DB_RECORD_NOT_FOUND:
2215 error = HA_ERR_KEY_NOT_FOUND;
2216- table->status = STATUS_NOT_FOUND;
2217+ getTable()->status = STATUS_NOT_FOUND;
2218 break;
2219 case DB_END_OF_INDEX:
2220 error = HA_ERR_KEY_NOT_FOUND;
2221- table->status = STATUS_NOT_FOUND;
2222+ getTable()->status = STATUS_NOT_FOUND;
2223 break;
2224 default:
2225 error = convert_error_code_to_mysql((int) ret,
2226 prebuilt->table->flags,
2227 user_session);
2228- table->status = STATUS_NOT_FOUND;
2229+ getTable()->status = STATUS_NOT_FOUND;
2230 break;
2231 }
2232
2233@@ -4897,10 +4895,10 @@
2234 ut_ad(user_session == table->in_use);
2235 ut_a(prebuilt->trx == session_to_trx(user_session));
2236
2237- if (keynr != MAX_KEY && table->getShare()->sizeKeys() > 0)
2238+ if (keynr != MAX_KEY && getTable()->getShare()->sizeKeys() > 0)
2239 {
2240 index = dict_table_get_index_on_name(prebuilt->table,
2241- table->getShare()->getTableProto()->indexes(keynr).name().c_str());
2242+ getTable()->getShare()->getTableProto()->indexes(keynr).name().c_str());
2243 } else {
2244 index = dict_table_get_first_index(prebuilt->table);
2245 }
2246@@ -4909,7 +4907,7 @@
2247 errmsg_printf(ERRMSG_LVL_ERROR,
2248 "Innodb could not find key n:o %u with name %s "
2249 "from dict cache for table %s",
2250- keynr, table->getShare()->getTableProto()->indexes(keynr).name().c_str(),
2251+ keynr, getTable()->getShare()->getTableProto()->indexes(keynr).name().c_str(),
2252 prebuilt->table->name);
2253 }
2254
2255@@ -4967,7 +4965,7 @@
2256 the flag ROW_MYSQL_WHOLE_ROW below, but that caused unnecessary
2257 copying. Starting from MySQL-4.1 we use a more efficient flag here. */
2258
2259- build_template(prebuilt, user_session, table, ROW_MYSQL_REC_FIELDS);
2260+ build_template(prebuilt, user_session, getTable(), ROW_MYSQL_REC_FIELDS);
2261
2262 return(0);
2263 }
2264@@ -5027,20 +5025,20 @@
2265 switch (ret) {
2266 case DB_SUCCESS:
2267 error = 0;
2268- table->status = 0;
2269+ getTable()->status = 0;
2270 break;
2271 case DB_RECORD_NOT_FOUND:
2272 error = HA_ERR_END_OF_FILE;
2273- table->status = STATUS_NOT_FOUND;
2274+ getTable()->status = STATUS_NOT_FOUND;
2275 break;
2276 case DB_END_OF_INDEX:
2277 error = HA_ERR_END_OF_FILE;
2278- table->status = STATUS_NOT_FOUND;
2279+ getTable()->status = STATUS_NOT_FOUND;
2280 break;
2281 default:
2282 error = convert_error_code_to_mysql(
2283 (int) ret, prebuilt->table->flags, user_session);
2284- table->status = STATUS_NOT_FOUND;
2285+ getTable()->status = STATUS_NOT_FOUND;
2286 break;
2287 }
2288
2289@@ -5235,7 +5233,7 @@
2290
2291 ha_statistic_increment(&system_status_var::ha_read_rnd_count);
2292
2293- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2294+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2295
2296 if (prebuilt->clust_index_was_generated) {
2297 /* No primary key was defined for the table and we
2298@@ -5281,7 +5279,7 @@
2299 {
2300 uint len;
2301
2302- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2303+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2304
2305 if (prebuilt->clust_index_was_generated) {
2306 /* No primary key was defined for the table and we
2307@@ -5950,7 +5948,7 @@
2308
2309 ut_a(prebuilt->trx);
2310 ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
2311- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2312+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2313
2314 dict_table = prebuilt->table;
2315 trx = prebuilt->trx;
2316@@ -5979,7 +5977,7 @@
2317 /* Get the transaction associated with the current session, or create one
2318 if not yet created, and update prebuilt->trx */
2319
2320- update_session(table->in_use);
2321+ update_session(getTable()->in_use);
2322
2323 if (session_sql_command(user_session) != SQLCOM_TRUNCATE) {
2324 fallback:
2325@@ -6321,10 +6319,10 @@
2326 KeyInfo* key;
2327 dict_index_t* index;
2328 unsigned char* key_val_buff2 = (unsigned char*) malloc(
2329- table->getShare()->stored_rec_length
2330- + table->getShare()->max_key_length + 100);
2331- ulint buff2_len = table->getShare()->stored_rec_length
2332- + table->getShare()->max_key_length + 100;
2333+ getTable()->getShare()->stored_rec_length
2334+ + getTable()->getShare()->max_key_length + 100);
2335+ ulint buff2_len = getTable()->getShare()->stored_rec_length
2336+ + getTable()->getShare()->max_key_length + 100;
2337 dtuple_t* range_start;
2338 dtuple_t* range_end;
2339 ib_int64_t n_rows;
2340@@ -6332,7 +6330,7 @@
2341 ulint mode2;
2342 mem_heap_t* heap;
2343
2344- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2345+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2346
2347 prebuilt->trx->op_info = (char*)"estimating records in index range";
2348
2349@@ -6343,9 +6341,9 @@
2350
2351 active_index = keynr;
2352
2353- key = &table->key_info[active_index];
2354+ key = &getTable()->key_info[active_index];
2355
2356- index = dict_table_get_index_on_name(prebuilt->table, table->getShare()->getTableProto()->indexes(active_index).name().c_str());
2357+ index = dict_table_get_index_on_name(prebuilt->table, getTable()->getShare()->getTableProto()->indexes(active_index).name().c_str());
2358
2359 /* MySQL knows about this index and so we must be able to find it.*/
2360 ut_a(index);
2361@@ -6427,7 +6425,7 @@
2362 external_lock(). To be safe, update the session of the current table
2363 handle. */
2364
2365- update_session(table->in_use);
2366+ update_session(getTable()->in_use);
2367
2368 prebuilt->trx->op_info = (char*)
2369 "calculating upper bound for table rows";
2370@@ -6491,7 +6489,7 @@
2371 ha_rows total_rows;
2372 double time_for_scan;
2373
2374- if (index != table->getShare()->getPrimaryKey()) {
2375+ if (index != getTable()->getShare()->getPrimaryKey()) {
2376 /* Not clustered */
2377 return(Cursor::read_time(index, ranges, rows));
2378 }
2379@@ -6549,7 +6547,7 @@
2380 external_lock(). To be safe, update the session of the current table
2381 handle. */
2382
2383- update_session(table->in_use);
2384+ update_session(getTable()->in_use);
2385
2386 /* In case MySQL calls this in the middle of a SELECT query, release
2387 possible adaptive hash latch to avoid deadlocks of threads */
2388@@ -6657,7 +6655,7 @@
2389
2390 Session* session;
2391
2392- session= table->in_use;
2393+ session= getTable()->in_use;
2394 assert(session);
2395
2396 push_warning_printf(
2397@@ -6693,7 +6691,7 @@
2398 index = dict_table_get_next_index(index);
2399 }
2400
2401- for (i = 0; i < table->getShare()->sizeKeys(); i++) {
2402+ for (i = 0; i < getTable()->getShare()->sizeKeys(); i++) {
2403 if (index == NULL) {
2404 errmsg_printf(ERRMSG_LVL_ERROR, "Table %s contains fewer "
2405 "indexes inside InnoDB than "
2406@@ -6707,7 +6705,7 @@
2407 break;
2408 }
2409
2410- for (j = 0; j < table->key_info[i].key_parts; j++) {
2411+ for (j = 0; j < getTable()->key_info[i].key_parts; j++) {
2412
2413 if (j + 1 > index->n_uniq) {
2414 errmsg_printf(ERRMSG_LVL_ERROR,
2415@@ -6741,7 +6739,7 @@
2416 rec_per_key = 1;
2417 }
2418
2419- table->key_info[i].rec_per_key[j]=
2420+ getTable()->key_info[i].rec_per_key[j]=
2421 rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
2422 (ulong) rec_per_key;
2423 }
2424@@ -6766,7 +6764,7 @@
2425 }
2426 }
2427
2428- if ((flag & HA_STATUS_AUTO) && table->found_next_number_field) {
2429+ if ((flag & HA_STATUS_AUTO) && getTable()->found_next_number_field) {
2430 stats.auto_increment_value = innobase_peek_autoinc();
2431 }
2432
2433@@ -6810,7 +6808,7 @@
2434 {
2435 ulint ret;
2436
2437- assert(session == table->in_use);
2438+ assert(session == getTable()->in_use);
2439 ut_a(prebuilt->trx);
2440 ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
2441 ut_a(prebuilt->trx == session_to_trx(session));
2442@@ -6819,7 +6817,7 @@
2443 /* Build the template; we will use a dummy template
2444 in index scans done in checking */
2445
2446- build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
2447+ build_template(prebuilt, NULL, getTable(), ROW_MYSQL_WHOLE_ROW);
2448 }
2449
2450 ret = row_check_table_for_mysql(prebuilt);
2451@@ -6858,7 +6856,7 @@
2452 return((char*)comment); /* string too long */
2453 }
2454
2455- update_session(table->in_use);
2456+ update_session(getTable()->in_use);
2457
2458 prebuilt->trx->op_info = (char*)"returning table comment";
2459
2460@@ -6929,7 +6927,7 @@
2461 external_lock(). To be safe, update the session of the current table
2462 handle. */
2463
2464- update_session(table->in_use);
2465+ update_session(getTable()->in_use);
2466
2467 prebuilt->trx->op_info = (char*)"getting info on foreign keys";
2468
2469@@ -6978,7 +6976,7 @@
2470 dict_foreign_t* foreign;
2471
2472 ut_a(prebuilt != NULL);
2473- update_session(table->in_use);
2474+ update_session(getTable()->in_use);
2475 prebuilt->trx->op_info = (char*)"getting list of foreign keys";
2476 trx_search_latch_release_if_reserved(prebuilt->trx);
2477 mutex_enter(&(dict_sys->mutex));
2478@@ -7116,7 +7114,7 @@
2479 {
2480 bool can_switch;
2481
2482- ut_a(prebuilt->trx == session_to_trx(table->in_use));
2483+ ut_a(prebuilt->trx == session_to_trx(getTable()->in_use));
2484
2485 prebuilt->trx->op_info =
2486 "determining if there are foreign key constraints";
2487@@ -7204,16 +7202,16 @@
2488 either, because the calling threads may change.
2489 CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */
2490 case HA_EXTRA_IGNORE_DUP_KEY:
2491- session_to_trx(table->in_use)->duplicates |= TRX_DUP_IGNORE;
2492+ session_to_trx(getTable()->in_use)->duplicates |= TRX_DUP_IGNORE;
2493 break;
2494 case HA_EXTRA_WRITE_CAN_REPLACE:
2495- session_to_trx(table->in_use)->duplicates |= TRX_DUP_REPLACE;
2496+ session_to_trx(getTable()->in_use)->duplicates |= TRX_DUP_REPLACE;
2497 break;
2498 case HA_EXTRA_WRITE_CANNOT_REPLACE:
2499- session_to_trx(table->in_use)->duplicates &= ~TRX_DUP_REPLACE;
2500+ session_to_trx(getTable()->in_use)->duplicates &= ~TRX_DUP_REPLACE;
2501 break;
2502 case HA_EXTRA_NO_IGNORE_DUP_KEY:
2503- session_to_trx(table->in_use)->duplicates &=
2504+ session_to_trx(getTable()->in_use)->duplicates &=
2505 ~(TRX_DUP_IGNORE | TRX_DUP_REPLACE);
2506 break;
2507 default:/* Do nothing */
2508@@ -7854,7 +7852,7 @@
2509 uint64_t autoinc = 0;
2510
2511 /* Prepare prebuilt->trx in the table handle */
2512- update_session(table->in_use);
2513+ update_session(getTable()->in_use);
2514
2515 error = innobase_get_autoinc(&autoinc);
2516
2517@@ -7908,7 +7906,7 @@
2518 /* We need the upper limit of the col type to check for
2519 whether we update the table autoinc counter or not. */
2520 col_max_value = innobase_get_int_col_max_value(
2521- table->next_number_field);
2522+ getTable()->next_number_field);
2523
2524 current = *first_value > col_max_value ? autoinc : *first_value;
2525 need = *nb_reserved_values * increment;
2526@@ -7951,7 +7949,7 @@
2527 {
2528 int error;
2529
2530- update_session(table->in_use);
2531+ update_session(getTable()->in_use);
2532
2533 error = row_lock_table_autoinc_for_mysql(prebuilt);
2534
2535@@ -8017,10 +8015,10 @@
2536 /* Do a type-aware comparison of primary key fields. PK fields
2537 are always NOT NULL, so no checks for NULL are performed. */
2538
2539- key_part = table->key_info[table->getShare()->getPrimaryKey()].key_part;
2540+ key_part = getTable()->key_info[getTable()->getShare()->getPrimaryKey()].key_part;
2541
2542 key_part_end = key_part
2543- + table->key_info[table->getShare()->getPrimaryKey()].key_parts;
2544+ + getTable()->key_info[getTable()->getShare()->getPrimaryKey()].key_parts;
2545
2546 for (; key_part != key_part_end; ++key_part) {
2547 field = key_part->field;
2548
2549=== modified file 'plugin/innobase/handler/ha_innodb.h'
2550--- plugin/innobase/handler/ha_innodb.h 2010-10-12 05:42:04 +0000
2551+++ plugin/innobase/handler/ha_innodb.h 2010-10-23 00:16:13 +0000
2552@@ -107,7 +107,7 @@
2553 /* Init values for the class: */
2554 public:
2555 UNIV_INTERN ha_innobase(plugin::StorageEngine &engine,
2556- TableShare &table_arg);
2557+ Table &table_arg);
2558 UNIV_INTERN ~ha_innobase();
2559 /**
2560 * Returns the plugin::TransactionStorageEngine pointer
2561@@ -119,7 +119,7 @@
2562 */
2563 UNIV_INTERN plugin::TransactionalStorageEngine *getTransactionalEngine()
2564 {
2565- return static_cast<plugin::TransactionalStorageEngine *>(engine);
2566+ return static_cast<plugin::TransactionalStorageEngine *>(getEngine());
2567 }
2568
2569 UNIV_INTERN const char* index_type(uint key_number);
2570
2571=== modified file 'plugin/memory/ha_heap.cc'
2572--- plugin/memory/ha_heap.cc 2010-10-02 21:15:42 +0000
2573+++ plugin/memory/ha_heap.cc 2010-10-23 00:16:13 +0000
2574@@ -60,7 +60,7 @@
2575 hp_panic(HA_PANIC_CLOSE);
2576 }
2577
2578- virtual Cursor *create(TableShare &table)
2579+ virtual Cursor *create(Table &table)
2580 {
2581 return new ha_heap(*this, table);
2582 }
2583@@ -158,7 +158,7 @@
2584 *****************************************************************************/
2585
2586 ha_heap::ha_heap(plugin::StorageEngine &engine_arg,
2587- TableShare &table_arg)
2588+ Table &table_arg)
2589 :Cursor(engine_arg, table_arg), file(0), records_changed(0), key_stat_version(0),
2590 internal_table(0)
2591 {}
2592@@ -185,9 +185,9 @@
2593 HP_SHARE *internal_share= NULL;
2594 message::Table create_proto;
2595
2596- if (not heap_storage_engine->heap_create_table(table->in_use,
2597+ if (not heap_storage_engine->heap_create_table(getTable()->in_use,
2598 identifier.getPath().c_str(),
2599- table,
2600+ getTable(),
2601 internal_table,
2602 create_proto,
2603 &internal_share))
2604@@ -240,12 +240,12 @@
2605
2606 Cursor *ha_heap::clone(memory::Root *)
2607 {
2608- Cursor *new_handler= table->getMutableShare()->db_type()->getCursor(*(table->getMutableShare()));
2609- TableIdentifier identifier(table->getShare()->getSchemaName(),
2610- table->getShare()->getTableName(),
2611- table->getShare()->getPath());
2612+ Cursor *new_handler= getTable()->getMutableShare()->db_type()->getCursor(*getTable());
2613+ TableIdentifier identifier(getTable()->getShare()->getSchemaName(),
2614+ getTable()->getShare()->getTableName(),
2615+ getTable()->getShare()->getPath());
2616
2617- if (new_handler && !new_handler->ha_open(identifier, table, table->db_stat,
2618+ if (new_handler && !new_handler->ha_open(identifier, getTable()->db_stat,
2619 HA_OPEN_IGNORE_IF_LOCKED))
2620 return new_handler;
2621 return NULL;
2622@@ -281,9 +281,9 @@
2623
2624 void ha_heap::update_key_stats()
2625 {
2626- for (uint32_t i= 0; i < table->getShare()->sizeKeys(); i++)
2627+ for (uint32_t i= 0; i < getTable()->getShare()->sizeKeys(); i++)
2628 {
2629- KeyInfo *key= &table->key_info[i];
2630+ KeyInfo *key= &getTable()->key_info[i];
2631
2632 if (!key->rec_per_key)
2633 continue;
2634@@ -310,7 +310,7 @@
2635 int ha_heap::doInsertRecord(unsigned char * buf)
2636 {
2637 int res;
2638- if (table->next_number_field && buf == table->getInsertRecord())
2639+ if (getTable()->next_number_field && buf == getTable()->getInsertRecord())
2640 {
2641 if ((res= update_auto_increment()))
2642 return res;
2643@@ -350,7 +350,7 @@
2644 int res;
2645
2646 res= heap_delete(file,buf);
2647- if (!res && table->getShare()->getType() == message::Table::STANDARD &&
2648+ if (!res && getTable()->getShare()->getType() == message::Table::STANDARD &&
2649 ++records_changed*MEMORY_STATS_UPDATE_THRESHOLD > file->getShare()->records)
2650 {
2651 /*
2652@@ -369,7 +369,7 @@
2653 assert(inited==INDEX);
2654 ha_statistic_increment(&system_status_var::ha_read_key_count);
2655 int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
2656- table->status = error ? STATUS_NOT_FOUND : 0;
2657+ getTable()->status = error ? STATUS_NOT_FOUND : 0;
2658 return error;
2659 }
2660
2661@@ -380,7 +380,7 @@
2662 ha_statistic_increment(&system_status_var::ha_read_key_count);
2663 int error= heap_rkey(file, buf, active_index, key, keypart_map,
2664 HA_READ_PREFIX_LAST);
2665- table->status= error ? STATUS_NOT_FOUND : 0;
2666+ getTable()->status= error ? STATUS_NOT_FOUND : 0;
2667 return error;
2668 }
2669
2670@@ -390,7 +390,7 @@
2671 {
2672 ha_statistic_increment(&system_status_var::ha_read_key_count);
2673 int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
2674- table->status = error ? STATUS_NOT_FOUND : 0;
2675+ getTable()->status = error ? STATUS_NOT_FOUND : 0;
2676 return error;
2677 }
2678
2679@@ -399,7 +399,7 @@
2680 assert(inited==INDEX);
2681 ha_statistic_increment(&system_status_var::ha_read_next_count);
2682 int error=heap_rnext(file,buf);
2683- table->status=error ? STATUS_NOT_FOUND: 0;
2684+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2685 return error;
2686 }
2687
2688@@ -408,7 +408,7 @@
2689 assert(inited==INDEX);
2690 ha_statistic_increment(&system_status_var::ha_read_prev_count);
2691 int error=heap_rprev(file,buf);
2692- table->status=error ? STATUS_NOT_FOUND: 0;
2693+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2694 return error;
2695 }
2696
2697@@ -417,7 +417,7 @@
2698 assert(inited==INDEX);
2699 ha_statistic_increment(&system_status_var::ha_read_first_count);
2700 int error=heap_rfirst(file, buf, active_index);
2701- table->status=error ? STATUS_NOT_FOUND: 0;
2702+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2703 return error;
2704 }
2705
2706@@ -426,7 +426,7 @@
2707 assert(inited==INDEX);
2708 ha_statistic_increment(&system_status_var::ha_read_last_count);
2709 int error=heap_rlast(file, buf, active_index);
2710- table->status=error ? STATUS_NOT_FOUND: 0;
2711+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2712 return error;
2713 }
2714
2715@@ -439,7 +439,7 @@
2716 {
2717 ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
2718 int error=heap_scan(file, buf);
2719- table->status=error ? STATUS_NOT_FOUND: 0;
2720+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2721 return error;
2722 }
2723
2724@@ -450,7 +450,7 @@
2725 ha_statistic_increment(&system_status_var::ha_read_rnd_count);
2726 memcpy(&heap_position, pos, sizeof(HEAP_PTR));
2727 error=heap_rrnd(file, buf, heap_position);
2728- table->status=error ? STATUS_NOT_FOUND: 0;
2729+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2730 return error;
2731 }
2732
2733@@ -499,7 +499,7 @@
2734 int ha_heap::delete_all_rows()
2735 {
2736 heap_clear(file);
2737- if (table->getShare()->getType() == message::Table::STANDARD)
2738+ if (getTable()->getShare()->getType() == message::Table::STANDARD)
2739 {
2740 /*
2741 We can perform this safely since only one writer at the time is
2742@@ -634,7 +634,7 @@
2743 ha_rows ha_heap::records_in_range(uint32_t inx, key_range *min_key,
2744 key_range *max_key)
2745 {
2746- KeyInfo *key= &table->key_info[inx];
2747+ KeyInfo *key= &getTable()->key_info[inx];
2748
2749 if (!min_key || !max_key ||
2750 min_key->length != max_key->length ||
2751
2752=== modified file 'plugin/memory/ha_heap.h'
2753--- plugin/memory/ha_heap.h 2010-10-02 21:15:42 +0000
2754+++ plugin/memory/ha_heap.h 2010-10-23 00:16:13 +0000
2755@@ -35,7 +35,7 @@
2756 uint32_t key_stat_version;
2757 bool internal_table;
2758 public:
2759- ha_heap(drizzled::plugin::StorageEngine &engine, drizzled::TableShare &table);
2760+ ha_heap(drizzled::plugin::StorageEngine &engine, drizzled::Table &table);
2761 ~ha_heap() {}
2762 Cursor *clone(drizzled::memory::Root *mem_root);
2763
2764
2765=== modified file 'plugin/myisam/ha_myisam.cc'
2766--- plugin/myisam/ha_myisam.cc 2010-10-02 21:15:42 +0000
2767+++ plugin/myisam/ha_myisam.cc 2010-10-23 00:16:13 +0000
2768@@ -104,7 +104,7 @@
2769 mi_panic(HA_PANIC_CLOSE);
2770 }
2771
2772- virtual Cursor *create(TableShare &table)
2773+ virtual Cursor *create(Table &table)
2774 {
2775 return new ha_myisam(*this, table);
2776 }
2777@@ -544,7 +544,7 @@
2778 }
2779
2780 ha_myisam::ha_myisam(plugin::StorageEngine &engine_arg,
2781- TableShare &table_arg)
2782+ Table &table_arg)
2783 : Cursor(engine_arg, table_arg),
2784 file(0),
2785 can_enable_indexes(true),
2786@@ -590,13 +590,13 @@
2787 if (!(file= mi_open(identifier, mode, test_if_locked)))
2788 return (errno ? errno : -1);
2789
2790- if (!table->getShare()->getType()) /* No need to perform a check for tmp table */
2791+ if (!getTable()->getShare()->getType()) /* No need to perform a check for tmp table */
2792 {
2793- if ((errno= table2myisam(table, &keyinfo, &recinfo, &recs)))
2794+ if ((errno= table2myisam(getTable(), &keyinfo, &recinfo, &recs)))
2795 {
2796 goto err;
2797 }
2798- if (check_definition(keyinfo, recinfo, table->getShare()->sizeKeys(), recs,
2799+ if (check_definition(keyinfo, recinfo, getTable()->getShare()->sizeKeys(), recs,
2800 file->s->keyinfo, file->s->rec,
2801 file->s->base.keys, file->s->base.fields, true))
2802 {
2803@@ -612,17 +612,17 @@
2804 info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
2805 if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
2806 mi_extra(file, HA_EXTRA_WAIT_LOCK, 0);
2807- if (!table->getShare()->db_record_offset)
2808+ if (!getTable()->getShare()->db_record_offset)
2809 is_ordered= false;
2810
2811
2812 keys_with_parts.reset();
2813- for (i= 0; i < table->getShare()->sizeKeys(); i++)
2814+ for (i= 0; i < getTable()->getShare()->sizeKeys(); i++)
2815 {
2816- table->key_info[i].block_size= file->s->keyinfo[i].block_length;
2817+ getTable()->key_info[i].block_size= file->s->keyinfo[i].block_length;
2818
2819- KeyPartInfo *kp= table->key_info[i].key_part;
2820- KeyPartInfo *kp_end= kp + table->key_info[i].key_parts;
2821+ KeyPartInfo *kp= getTable()->key_info[i].key_part;
2822+ KeyPartInfo *kp_end= kp + getTable()->key_info[i].key_parts;
2823 for (; kp != kp_end; kp++)
2824 {
2825 if (!kp->field->part_of_key.test(i))
2826@@ -659,7 +659,7 @@
2827 If we have an auto_increment column and we are writing a changed row
2828 or a new row, then update the auto_increment value in the record.
2829 */
2830- if (table->next_number_field && buf == table->getInsertRecord())
2831+ if (getTable()->next_number_field && buf == getTable()->getInsertRecord())
2832 {
2833 int error;
2834 if ((error= update_auto_increment()))
2835@@ -691,12 +691,12 @@
2836 {
2837 errmsg_printf(ERRMSG_LVL_INFO, "Retrying repair of: '%s' failed. "
2838 "Please try REPAIR EXTENDED or myisamchk",
2839- table->getShare()->getPath());
2840+ getTable()->getShare()->getPath());
2841 return(HA_ADMIN_FAILED);
2842 }
2843
2844- param.db_name= table->getShare()->getSchemaName();
2845- param.table_name= table->getAlias();
2846+ param.db_name= getTable()->getShare()->getSchemaName();
2847+ param.table_name= getTable()->getAlias();
2848 param.tmpfile_createflag = O_RDWR | O_TRUNC;
2849 param.using_global_keycache = 1;
2850 param.session= session;
2851@@ -705,7 +705,7 @@
2852 strcpy(fixed_name,file->filename);
2853
2854 // Don't lock tables if we have used LOCK Table
2855- if (mi_lock_database(file, table->getShare()->getType() ? F_EXTRA_LCK : F_WRLCK))
2856+ if (mi_lock_database(file, getTable()->getShare()->getType() ? F_EXTRA_LCK : F_WRLCK))
2857 {
2858 mi_check_print_error(&param,ER(ER_CANT_LOCK),errno);
2859 return(HA_ADMIN_FAILED);
2860@@ -902,7 +902,7 @@
2861 }
2862 else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
2863 {
2864- Session *session= table->in_use;
2865+ Session *session= getTable()->in_use;
2866 MI_CHECK param;
2867 const char *save_proc_info= session->get_proc_info();
2868 session->set_proc_info("Creating index");
2869@@ -978,7 +978,7 @@
2870
2871 void ha_myisam::start_bulk_insert(ha_rows rows)
2872 {
2873- Session *session= table->in_use;
2874+ Session *session= getTable()->in_use;
2875 ulong size= session->variables.read_buff_size;
2876
2877 /* don't enable row cache if too few rows */
2878@@ -1063,7 +1063,7 @@
2879 assert(inited==INDEX);
2880 ha_statistic_increment(&system_status_var::ha_read_key_count);
2881 int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
2882- table->status=error ? STATUS_NOT_FOUND: 0;
2883+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2884 return error;
2885 }
2886
2887@@ -1073,7 +1073,7 @@
2888 {
2889 ha_statistic_increment(&system_status_var::ha_read_key_count);
2890 int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
2891- table->status=error ? STATUS_NOT_FOUND: 0;
2892+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2893 return error;
2894 }
2895
2896@@ -1084,7 +1084,7 @@
2897 ha_statistic_increment(&system_status_var::ha_read_key_count);
2898 int error=mi_rkey(file, buf, active_index, key, keypart_map,
2899 HA_READ_PREFIX_LAST);
2900- table->status=error ? STATUS_NOT_FOUND: 0;
2901+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2902 return(error);
2903 }
2904
2905@@ -1093,7 +1093,7 @@
2906 assert(inited==INDEX);
2907 ha_statistic_increment(&system_status_var::ha_read_next_count);
2908 int error=mi_rnext(file,buf,active_index);
2909- table->status=error ? STATUS_NOT_FOUND: 0;
2910+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2911 return error;
2912 }
2913
2914@@ -1102,7 +1102,7 @@
2915 assert(inited==INDEX);
2916 ha_statistic_increment(&system_status_var::ha_read_prev_count);
2917 int error=mi_rprev(file,buf, active_index);
2918- table->status=error ? STATUS_NOT_FOUND: 0;
2919+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2920 return error;
2921 }
2922
2923@@ -1111,7 +1111,7 @@
2924 assert(inited==INDEX);
2925 ha_statistic_increment(&system_status_var::ha_read_first_count);
2926 int error=mi_rfirst(file, buf, active_index);
2927- table->status=error ? STATUS_NOT_FOUND: 0;
2928+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2929 return error;
2930 }
2931
2932@@ -1120,7 +1120,7 @@
2933 assert(inited==INDEX);
2934 ha_statistic_increment(&system_status_var::ha_read_last_count);
2935 int error=mi_rlast(file, buf, active_index);
2936- table->status=error ? STATUS_NOT_FOUND: 0;
2937+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2938 return error;
2939 }
2940
2941@@ -1135,7 +1135,7 @@
2942 {
2943 error= mi_rnext_same(file,buf);
2944 } while (error == HA_ERR_RECORD_DELETED);
2945- table->status=error ? STATUS_NOT_FOUND: 0;
2946+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2947 return error;
2948 }
2949
2950@@ -1176,7 +1176,7 @@
2951 {
2952 ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
2953 int error=mi_scan(file, buf);
2954- table->status=error ? STATUS_NOT_FOUND: 0;
2955+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2956 return error;
2957 }
2958
2959@@ -1184,7 +1184,7 @@
2960 {
2961 ha_statistic_increment(&system_status_var::ha_read_rnd_count);
2962 int error=mi_rrnd(file, buf, internal::my_get_ptr(pos,ref_length));
2963- table->status=error ? STATUS_NOT_FOUND: 0;
2964+ getTable()->status=error ? STATUS_NOT_FOUND: 0;
2965 return error;
2966 }
2967
2968@@ -1213,7 +1213,7 @@
2969 }
2970 if (flag & HA_STATUS_CONST)
2971 {
2972- TableShare *share= table->getMutableShare();
2973+ TableShare *share= getTable()->getMutableShare();
2974 stats.max_data_file_length= misam_info.max_data_file_length;
2975 stats.max_index_file_length= misam_info.max_index_file_length;
2976 stats.create_time= misam_info.create_time;
2977@@ -1270,9 +1270,9 @@
2978 share->keys_for_keyread&= share->keys_in_use;
2979 share->db_record_offset= misam_info.record_offset;
2980 if (share->key_parts)
2981- memcpy(table->key_info[0].rec_per_key,
2982+ memcpy(getTable()->key_info[0].rec_per_key,
2983 misam_info.rec_per_key,
2984- sizeof(table->key_info[0].rec_per_key)*share->key_parts);
2985+ sizeof(getTable()->key_info[0].rec_per_key)*share->key_parts);
2986 assert(share->getType() != message::Table::STANDARD);
2987
2988 /*
2989@@ -1337,7 +1337,7 @@
2990 int ha_myisam::external_lock(Session *session, int lock_type)
2991 {
2992 file->in_use= session;
2993- return mi_lock_database(file, !table->getShare()->getType() ?
2994+ return mi_lock_database(file, !getTable()->getShare()->getType() ?
2995 lock_type : ((lock_type == F_UNLCK) ?
2996 F_UNLCK : F_EXTRA_LCK));
2997 }
2998@@ -1408,7 +1408,7 @@
2999 int error;
3000 unsigned char key[MI_MAX_KEY_LENGTH];
3001
3002- if (!table->getShare()->next_number_key_offset)
3003+ if (!getTable()->getShare()->next_number_key_offset)
3004 { // Autoincrement at key-start
3005 ha_myisam::info(HA_STATUS_AUTO);
3006 *first_value= stats.auto_increment_value;
3007@@ -1418,22 +1418,22 @@
3008 }
3009
3010 /* it's safe to call the following if bulk_insert isn't on */
3011- mi_flush_bulk_insert(file, table->getShare()->next_number_index);
3012+ mi_flush_bulk_insert(file, getTable()->getShare()->next_number_index);
3013
3014 (void) extra(HA_EXTRA_KEYREAD);
3015- key_copy(key, table->getInsertRecord(),
3016- &table->key_info[table->getShare()->next_number_index],
3017- table->getShare()->next_number_key_offset);
3018- error= mi_rkey(file, table->getUpdateRecord(), (int) table->getShare()->next_number_index,
3019- key, make_prev_keypart_map(table->getShare()->next_number_keypart),
3020+ key_copy(key, getTable()->getInsertRecord(),
3021+ &getTable()->key_info[getTable()->getShare()->next_number_index],
3022+ getTable()->getShare()->next_number_key_offset);
3023+ error= mi_rkey(file, getTable()->getUpdateRecord(), (int) getTable()->getShare()->next_number_index,
3024+ key, make_prev_keypart_map(getTable()->getShare()->next_number_keypart),
3025 HA_READ_PREFIX_LAST);
3026 if (error)
3027 nr= 1;
3028 else
3029 {
3030 /* Get data from getUpdateRecord() */
3031- nr= ((uint64_t) table->next_number_field->
3032- val_int_offset(table->getShare()->rec_buff_length)+1);
3033+ nr= ((uint64_t) getTable()->next_number_field->
3034+ val_int_offset(getTable()->getShare()->rec_buff_length)+1);
3035 }
3036 extra(HA_EXTRA_NO_KEYREAD);
3037 *first_value= nr;
3038
3039=== modified file 'plugin/myisam/ha_myisam.h'
3040--- plugin/myisam/ha_myisam.h 2010-10-02 21:15:42 +0000
3041+++ plugin/myisam/ha_myisam.h 2010-10-23 00:16:13 +0000
3042@@ -34,7 +34,7 @@
3043
3044 public:
3045 ha_myisam(drizzled::plugin::StorageEngine &engine,
3046- drizzled::TableShare &table_arg);
3047+ drizzled::Table &table_arg);
3048 ~ha_myisam() {}
3049 Cursor *clone(drizzled::memory::Root *mem_root);
3050 const char *index_type(uint32_t key_number);
3051
3052=== modified file 'plugin/pbms/src/ha_pbms.cc'
3053--- plugin/pbms/src/ha_pbms.cc 2010-10-18 21:21:20 +0000
3054+++ plugin/pbms/src/ha_pbms.cc 2010-10-23 00:16:13 +0000
3055@@ -114,7 +114,7 @@
3056 int doStartTransaction(Session *session, start_transaction_option_t options);
3057 int doCommit(Session *, bool);
3058 int doRollback(Session *, bool);
3059- Cursor *create(TableShare& table);
3060+ Cursor *create(Table& table);
3061 bool doDropSchema(const drizzled::SchemaIdentifier&);
3062
3063 /*
3064@@ -257,7 +257,7 @@
3065
3066
3067 #ifdef DRIZZLED
3068-Cursor *PBMSStorageEngine::create(TableShare& table)
3069+Cursor *PBMSStorageEngine::create(Table& table)
3070 {
3071 PBMSStorageEngine * const hton = this;
3072 return new ha_pbms(hton, table);
3073@@ -655,7 +655,7 @@
3074 }
3075
3076 #ifdef DRIZZLED
3077-ha_pbms::ha_pbms(handlerton *hton, TableShare& table_arg) : handler(*hton, table_arg),
3078+ha_pbms::ha_pbms(handlerton *hton, Table& table_arg) : handler(*hton, table_arg),
3079 #else
3080 ha_pbms::ha_pbms(handlerton *hton, TABLE_SHARE *table_arg) : handler(hton, table_arg),
3081 #endif
3082@@ -696,7 +696,7 @@
3083
3084 inner_();
3085 try_(a) {
3086- ha_open_tab = MSSystemTableShare::openSystemTable(table_path, table);
3087+ ha_open_tab = MSSystemTableShare::openSystemTable(table_path, getTable());
3088 #ifdef DRIZZLED
3089 ha_lock.init(&ha_open_tab->myShare->myThrLock);
3090 #else
3091
3092=== modified file 'plugin/pbms/src/ha_pbms.h'
3093--- plugin/pbms/src/ha_pbms.h 2010-10-02 21:15:42 +0000
3094+++ plugin/pbms/src/ha_pbms.h 2010-10-23 00:16:13 +0000
3095@@ -66,7 +66,7 @@
3096
3097 public:
3098 #ifdef DRIZZLED
3099- ha_pbms(handlerton *hton, TableShare& table_arg);
3100+ ha_pbms(handlerton *hton, Table& table_arg);
3101 #else
3102 ha_pbms(handlerton *hton, TABLE_SHARE *table_arg);
3103 #endif
3104
3105=== modified file 'plugin/pbxt/src/ha_pbxt.cc'
3106--- plugin/pbxt/src/ha_pbxt.cc 2010-10-09 10:57:54 +0000
3107+++ plugin/pbxt/src/ha_pbxt.cc 2010-10-23 00:16:13 +0000
3108@@ -1482,7 +1482,7 @@
3109 return 0;
3110 }
3111
3112-Cursor *PBXTStorageEngine::create(TableShare& table)
3113+Cursor *PBXTStorageEngine::create(Table& table)
3114 {
3115 return new ha_pbxt(*this, table);
3116 }
3117@@ -2020,7 +2020,7 @@
3118 *
3119 */
3120
3121-ha_pbxt::ha_pbxt(plugin::StorageEngine &engine_arg, TableShare &table_arg) : Cursor(engine_arg, table_arg)
3122+ha_pbxt::ha_pbxt(plugin::StorageEngine &engine_arg, Table &table_arg) : Cursor(engine_arg, table_arg)
3123 {
3124 pb_share = NULL;
3125 pb_open_tab = NULL;
3126@@ -2351,9 +2351,9 @@
3127 return;
3128
3129 xt_spinlock_lock(&tab->tab_ainc_lock);
3130- if (table->found_next_number_field && !tab->tab_auto_inc) {
3131- Field *tmp_fie = table->next_number_field;
3132- THD *tmp_thd = table->in_use;
3133+ if (getTable()->found_next_number_field && !tab->tab_auto_inc) {
3134+ Field *tmp_fie = getTable()->next_number_field;
3135+ THD *tmp_thd = getTable()->in_use;
3136 xtBool xn_started = FALSE;
3137 XTThreadPtr self = pb_open_tab->ot_thread;
3138
3139@@ -2381,19 +2381,19 @@
3140 }
3141
3142 /* Setup the conditions for the next call! */
3143- table->in_use = current_thd;
3144- table->next_number_field = table->found_next_number_field;
3145+ getTable()->in_use = current_thd;
3146+ getTable()->next_number_field = getTable()->found_next_number_field;
3147
3148 extra(HA_EXTRA_KEYREAD);
3149- table->mark_columns_used_by_index_no_reset(table->getShare()->next_number_index, *table->read_set);
3150+ getTable()->mark_columns_used_by_index_no_reset(getTable()->getShare()->next_number_index, *getTable()->read_set);
3151 column_bitmaps_signal();
3152- doStartIndexScan(table->getShare()->next_number_index, 0);
3153- if (!table->getShare()->next_number_key_offset) {
3154+ doStartIndexScan(getTable()->getShare()->next_number_index, 0);
3155+ if (!getTable()->getShare()->next_number_key_offset) {
3156 // Autoincrement at key-start
3157- err = index_last(table->getUpdateRecord());
3158- if (!err && !table->next_number_field->is_null(table->getShare()->rec_buff_length)) {
3159+ err = index_last(getTable()->getUpdateRecord());
3160+ if (!err && !getTable()->next_number_field->is_null(getTable()->getShare()->rec_buff_length)) {
3161 /* {PRE-INC} */
3162- nr = (xtWord8) table->next_number_field->val_int_offset(table->getShare()->rec_buff_length);
3163+ nr = (xtWord8) getTable()->next_number_field->val_int_offset(getTable()->getShare()->rec_buff_length);
3164 }
3165 }
3166 else {
3167@@ -2403,13 +2403,13 @@
3168 */
3169 xtWord8 val;
3170
3171- err = index_first(table->getUpdateRecord());
3172+ err = index_first(getTable()->getUpdateRecord());
3173 while (!err) {
3174 /* {PRE-INC} */
3175- val = (xtWord8) table->next_number_field->val_int_offset(table->getShare()->rec_buff_length);
3176+ val = (xtWord8) getTable()->next_number_field->val_int_offset(getTable()->getShare()->rec_buff_length);
3177 if (val > nr)
3178 nr = val;
3179- err = index_next(table->getUpdateRecord());
3180+ err = index_next(getTable()->getUpdateRecord());
3181 }
3182 }
3183
3184@@ -2444,8 +2444,8 @@
3185 tab->tab_auto_inc = min_auto_inc-1;
3186
3187 /* Restore the changed values: */
3188- table->next_number_field = tmp_fie;
3189- table->in_use = tmp_thd;
3190+ getTable()->next_number_field = tmp_fie;
3191+ getTable()->in_use = tmp_thd;
3192
3193 if (xn_started) {
3194 XT_PRINT0(self, "xt_xn_commit in init_auto_increment\n");
3195@@ -2480,7 +2480,7 @@
3196 nr += increment - ((nr - offset) % increment);
3197 else
3198 nr += increment;
3199- if (table->next_number_field->cmp((const unsigned char *)&nr_less_inc, (const unsigned char *)&nr) < 0)
3200+ if (getTable()->next_number_field->cmp((const unsigned char *)&nr_less_inc, (const unsigned char *)&nr) < 0)
3201 tab->tab_auto_inc = (xtWord8) (nr);
3202 else
3203 nr = ~0; /* indicate error to the caller */
3204@@ -2610,14 +2610,14 @@
3205 pb_import_row_count++;
3206 }
3207
3208- if (table->next_number_field && buf == table->getInsertRecord()) {
3209+ if (getTable()->next_number_field && buf == getTable()->getInsertRecord()) {
3210 int update_err = update_auto_increment();
3211 if (update_err) {
3212 ha_log_pbxt_thread_error_for_mysql(pb_ignore_dup_key);
3213 err = update_err;
3214 goto done;
3215 }
3216- ha_set_auto_increment(pb_open_tab, table->next_number_field);
3217+ ha_set_auto_increment(pb_open_tab, getTable()->next_number_field);
3218 }
3219
3220 if (!xt_tab_new_record(pb_open_tab, (xtWord1 *) buf)) {
3221@@ -2735,12 +2735,12 @@
3222 * update t1 set a=2 where a=1;
3223 * insert into t1 (val) values (1);
3224 */
3225- if (table->found_next_number_field && new_data == table->getInsertRecord()) {
3226+ if (getTable()->found_next_number_field && new_data == getTable()->getInsertRecord()) {
3227 MX_LONGLONG_T nr;
3228- const boost::dynamic_bitset<>& old_bitmap= table->use_all_columns(*table->read_set);
3229- nr = table->found_next_number_field->val_int();
3230- ha_set_auto_increment(pb_open_tab, table->found_next_number_field);
3231- table->restore_column_map(old_bitmap);
3232+ const boost::dynamic_bitset<>& old_bitmap= getTable()->use_all_columns(*getTable()->read_set);
3233+ nr = getTable()->found_next_number_field->val_int();
3234+ ha_set_auto_increment(pb_open_tab, getTable()->found_next_number_field);
3235+ getTable()->restore_column_map(old_bitmap);
3236 }
3237
3238 if (!xt_tab_update_record(pb_open_tab, (xtWord1 *) old_data, (xtWord1 *) new_data))
3239@@ -3134,7 +3134,7 @@
3240 /* The number of columns required: */
3241 if (pb_open_tab->ot_is_modify) {
3242
3243- pb_open_tab->ot_cols_req = table->read_set->MX_BIT_SIZE();
3244+ pb_open_tab->ot_cols_req = getTable()->read_set->MX_BIT_SIZE();
3245 #ifdef XT_PRINT_INDEX_OPT
3246 ind = (XTIndexPtr) pb_share->sh_dic_keys[idx];
3247
3248@@ -3149,7 +3149,7 @@
3249 }
3250 else {
3251 //pb_open_tab->ot_cols_req = ha_get_max_bit(table->read_set);
3252- pb_open_tab->ot_cols_req = table->read_set->MX_BIT_SIZE();
3253+ pb_open_tab->ot_cols_req = getTable()->read_set->MX_BIT_SIZE();
3254
3255 /* Check for index coverage!
3256 *
3257@@ -3200,9 +3200,9 @@
3258 */
3259 std::string bitmap_str= convert_long_to_bit_string(ind->mi_col_map, ind->mi_col_map_size);
3260 MX_BITMAP tmp(bitmap_str);
3261- if (MX_BIT_IS_SUBSET(table->read_set, tmp))
3262+ if (MX_BIT_IS_SUBSET(getTable()->read_set, tmp))
3263 #else
3264- if (MX_BIT_IS_SUBSET(table->read_set, ind->mi_col_map))
3265+ if (MX_BIT_IS_SUBSET(getTable()->read_set, ind->mi_col_map))
3266 #endif
3267 pb_key_read = TRUE;
3268 #ifdef XT_PRINT_INDEX_OPT
3269@@ -3347,10 +3347,10 @@
3270 XT_DISABLED_TRACE(("search tx=%d val=%d err=%d\n", (int) pb_open_tab->ot_thread->st_xact_data->xd_start_xn_id, (int) XT_GET_DISK_4(key), err));
3271 done:
3272 if (err)
3273- table->status = STATUS_NOT_FOUND;
3274+ getTable()->status = STATUS_NOT_FOUND;
3275 else {
3276 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3277- table->status = 0;
3278+ getTable()->status = 0;
3279 }
3280 return err;
3281 }
3282@@ -3408,10 +3408,10 @@
3283 #endif
3284 done:
3285 if (err)
3286- table->status = STATUS_NOT_FOUND;
3287+ getTable()->status = STATUS_NOT_FOUND;
3288 else {
3289 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3290- table->status = 0;
3291+ getTable()->status = 0;
3292 }
3293 XT_RETURN(err);
3294 }
3295@@ -3462,10 +3462,10 @@
3296 #endif
3297 done:
3298 if (err)
3299- table->status = STATUS_NOT_FOUND;
3300+ getTable()->status = STATUS_NOT_FOUND;
3301 else {
3302 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3303- table->status = 0;
3304+ getTable()->status = 0;
3305 }
3306 XT_RETURN(err);
3307 }
3308@@ -3500,10 +3500,10 @@
3309 #endif
3310 done:
3311 if (err)
3312- table->status = STATUS_NOT_FOUND;
3313+ getTable()->status = STATUS_NOT_FOUND;
3314 else {
3315 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3316- table->status = 0;
3317+ getTable()->status = 0;
3318 }
3319 XT_RETURN(err);
3320 }
3321@@ -3553,10 +3553,10 @@
3322 #endif
3323 done:
3324 if (err)
3325- table->status = STATUS_NOT_FOUND;
3326+ getTable()->status = STATUS_NOT_FOUND;
3327 else {
3328 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3329- table->status = 0;
3330+ getTable()->status = 0;
3331 }
3332 XT_RETURN(err);
3333 }
3334@@ -3599,10 +3599,10 @@
3335 #endif
3336 done:
3337 if (err)
3338- table->status = STATUS_NOT_FOUND;
3339+ getTable()->status = STATUS_NOT_FOUND;
3340 else {
3341 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3342- table->status = 0;
3343+ getTable()->status = 0;
3344 }
3345 XT_RETURN(err);
3346 }
3347@@ -3646,7 +3646,7 @@
3348
3349 /* The number of columns required: */
3350 if (pb_open_tab->ot_is_modify) {
3351- pb_open_tab->ot_cols_req = table->read_set->MX_BIT_SIZE();
3352+ pb_open_tab->ot_cols_req = getTable()->read_set->MX_BIT_SIZE();
3353 /* {START-STAT-HACK} previously position of start statement hack,
3354 * previous comment to code below: */
3355 /* Start a statement based transaction as soon
3356@@ -3656,7 +3656,7 @@
3357 }
3358 else {
3359 //pb_open_tab->ot_cols_req = ha_get_max_bit(table->read_set);
3360- pb_open_tab->ot_cols_req = table->read_set->MX_BIT_SIZE();
3361+ pb_open_tab->ot_cols_req = getTable()->read_set->MX_BIT_SIZE();
3362
3363 /*
3364 * in case of queries like SELECT COUNT(*) FROM t
3365@@ -3726,10 +3726,10 @@
3366 err = HA_ERR_END_OF_FILE;
3367
3368 if (err)
3369- table->status = STATUS_NOT_FOUND;
3370+ getTable()->status = STATUS_NOT_FOUND;
3371 else {
3372 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3373- table->status = 0;
3374+ getTable()->status = 0;
3375 }
3376 XT_RETURN(err);
3377 }
3378@@ -3801,10 +3801,10 @@
3379 }
3380
3381 if (err)
3382- table->status = STATUS_NOT_FOUND;
3383+ getTable()->status = STATUS_NOT_FOUND;
3384 else {
3385 pb_open_tab->ot_thread->st_statistics.st_row_select++;
3386- table->status = 0;
3387+ getTable()->status = 0;
3388 }
3389 XT_RETURN(err);
3390 }
3391
3392=== modified file 'plugin/pbxt/src/ha_pbxt.h'
3393--- plugin/pbxt/src/ha_pbxt.h 2010-10-06 18:22:02 +0000
3394+++ plugin/pbxt/src/ha_pbxt.h 2010-10-23 00:16:13 +0000
3395@@ -80,7 +80,7 @@
3396 /* override */ int close_connection(Session *);
3397 /* override */ int commit(Session *, bool);
3398 /* override */ int rollback(Session *, bool);
3399- /* override */ Cursor *create(TableShare&);
3400+ /* override */ Cursor *create(Table&);
3401 /* override */ void drop_database(char *);
3402 /* override */ bool show_status(Session *, stat_print_fn *, enum ha_stat_type);
3403 /* override */ const char **bas_ext() const;
3404@@ -174,7 +174,7 @@
3405 THD *pb_mysql_thd; /* A pointer to the MySQL thread. */
3406 xtBool pb_in_stat; /* TRUE of start_stmt() was issued */
3407
3408- ha_pbxt(plugin::StorageEngine &engine_arg, TableShare &table_arg);
3409+ ha_pbxt(plugin::StorageEngine &engine_arg, Table &table_arg);
3410 virtual ~ha_pbxt() { }
3411
3412 /* The name that will be used for display purposes */
3413
3414=== modified file 'plugin/pbxt/src/ha_xtsys.cc'
3415--- plugin/pbxt/src/ha_xtsys.cc 2010-10-02 21:15:42 +0000
3416+++ plugin/pbxt/src/ha_xtsys.cc 2010-10-23 00:16:13 +0000
3417@@ -59,7 +59,7 @@
3418 */
3419
3420 #ifdef DRIZZLED
3421-ha_xtsys::ha_xtsys(handlerton *hton, TableShare& table_arg):
3422+ha_xtsys::ha_xtsys(handlerton *hton, Table& table_arg):
3423 handler(*hton, table_arg),
3424 ha_open_tab(NULL)
3425 {
3426@@ -96,7 +96,7 @@
3427 try_(a) {
3428 xt_ha_open_database_of_table(self, (XTPathStrPtr) table_path);
3429
3430- ha_open_tab = XTSystemTableShare::openSystemTable(self, table_path, table);
3431+ ha_open_tab = XTSystemTableShare::openSystemTable(self, table_path, getTable());
3432 MYSQL_INIT_LOCK(ha_lock, ha_open_tab->ost_share->sts_my_lock);
3433 ref_length = ha_open_tab->getRefLen();
3434 }
3435
3436=== modified file 'plugin/pbxt/src/ha_xtsys.h'
3437--- plugin/pbxt/src/ha_xtsys.h 2010-10-02 21:15:42 +0000
3438+++ plugin/pbxt/src/ha_xtsys.h 2010-10-23 00:16:13 +0000
3439@@ -59,7 +59,7 @@
3440
3441 public:
3442 #ifdef DRIZZLED
3443- ha_xtsys(handlerton *hton, TableShare& table_arg);
3444+ ha_xtsys(handlerton *hton, Table& table_arg);
3445 #else
3446 ha_xtsys(handlerton *hton, TABLE_SHARE *table_arg);
3447 #endif
3448
3449=== modified file 'plugin/schema_engine/schema.h'
3450--- plugin/schema_engine/schema.h 2010-10-09 01:10:07 +0000
3451+++ plugin/schema_engine/schema.h 2010-10-23 00:16:13 +0000
3452@@ -53,7 +53,7 @@
3453
3454 bool doCanCreateTable(const drizzled::TableIdentifier &identifier);
3455
3456- drizzled::Cursor *create(drizzled::TableShare &)
3457+ drizzled::Cursor *create(drizzled::Table &)
3458 {
3459 return NULL;
3460 }
3461
3462=== modified file 'plugin/tableprototester/tableprototester.cc'
3463--- plugin/tableprototester/tableprototester.cc 2010-09-23 01:53:13 +0000
3464+++ plugin/tableprototester/tableprototester.cc 2010-10-23 00:16:13 +0000
3465@@ -57,7 +57,7 @@
3466 table_definition_ext= TABLEPROTOTESTER_EXT;
3467 }
3468
3469- virtual Cursor *create(TableShare &table)
3470+ virtual Cursor *create(Table &table)
3471 {
3472 return new TableProtoTesterCursor(*this, table);
3473 }
3474@@ -128,7 +128,7 @@
3475 }
3476
3477 TableProtoTesterCursor::TableProtoTesterCursor(drizzled::plugin::StorageEngine &engine_arg,
3478- TableShare &table_arg) :
3479+ Table &table_arg) :
3480 Cursor(engine_arg, table_arg)
3481 { }
3482
3483@@ -263,7 +263,7 @@
3484
3485 int TableProtoTesterCursor::doInsertRecord(unsigned char *)
3486 {
3487- return(table->next_number_field ? update_auto_increment() : 0);
3488+ return(getTable()->next_number_field ? update_auto_increment() : 0);
3489 }
3490
3491 int TableProtoTesterCursor::doStartTableScan(bool)
3492
3493=== modified file 'plugin/tableprototester/tableprototester.h'
3494--- plugin/tableprototester/tableprototester.h 2010-04-23 17:43:11 +0000
3495+++ plugin/tableprototester/tableprototester.h 2010-10-23 00:16:13 +0000
3496@@ -24,7 +24,7 @@
3497 class TableProtoTesterCursor: public drizzled::Cursor
3498 {
3499 public:
3500- TableProtoTesterCursor(drizzled::plugin::StorageEngine &engine, drizzled::TableShare &table_arg);
3501+ TableProtoTesterCursor(drizzled::plugin::StorageEngine &engine, drizzled::Table &table_arg);
3502 ~TableProtoTesterCursor()
3503 {}
3504