1
# See the file LICENSE for redistribution information.
3
# Copyright (c) 1996-2002
4
# Sleepycat Software. All rights reserved.
9
# TEST Per-operation recovery tests for non-duplicate, non-split
10
# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
11
# TEST condition. Any test that appears with the message (change state)
12
# TEST indicates that we've already run the particular test, but we are
13
# TEST running it again so that we can change the state of the data base
14
# TEST to prepare for the next test (this applies to all other recovery
15
# TEST tests as well).
17
# TEST These are the most basic recovery tests. We do individual recovery
18
# TEST tests for each operation in the access method interface. First we
19
# TEST create a file and capture the state of the database (i.e., we copy
20
# TEST it. Then we run a transaction containing a single operation. In
21
# TEST one test, we abort the transaction and compare the outcome to the
22
# TEST original copy of the file. In the second test, we restore the
23
# TEST original copy of the database and then run recovery and compare
24
# TEST this against the actual database.
25
proc recd001 { method {select 0} args} {
29
set orig_fixed_len $fixed_len
30
set opts [convert_args $method $args]
31
set omethod [convert_method $method]
33
puts "Recd001: $method operation/transaction tests"
35
# Create the database and environment.
38
# The recovery tests were originally written to
39
# do a command, abort, do it again, commit, and then
40
# repeat the sequence with another command. Each command
41
# tends to require that the previous command succeeded and
42
# left the database a certain way. To avoid cluttering up the
43
# op_recover interface as well as the test code, we create two
44
# databases; one does abort and then commit for each op, the
45
# other does prepare, prepare-abort, and prepare-commit for each
46
# op. If all goes well, this allows each command to depend
47
# exactly one successful iteration of the previous command.
48
set testfile recd001.db
49
set testfile2 recd001-2.db
51
set flags "-create -txn -home $testdir"
53
puts "\tRecd001.a.0: creating environment"
54
set env_cmd "berkdb_env $flags"
55
set dbenv [eval $env_cmd]
56
error_check_good dbenv [is_valid_env $dbenv] TRUE
59
# We need to create a database to get the pagesize (either
60
# the default or whatever might have been specified).
61
# Then remove it so we can compute fixed_len and create the
63
set oflags "-create $omethod -mode 0644 \
64
-env $dbenv $opts $testfile"
65
set db [eval {berkdb_open} $oflags]
66
error_check_good db_open [is_valid_db $db] TRUE
69
# Compute the fixed_len based on the pagesize being used.
70
# We want the fixed_len to be 1/4 the pagesize.
72
set pg [get_pagesize $stat]
73
error_check_bad get_pagesize $pg -1
74
set fixed_len [expr $pg / 4]
75
error_check_good db_close [$db close] 0
76
error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
78
# Convert the args again because fixed_len is now real.
79
# Create the databases and close the environment.
80
# cannot specify db truncate in txn protected env!!!
81
set opts [convert_args $method ""]
82
set omethod [convert_method $method]
83
set oflags "-create $omethod -mode 0644 \
84
-env $dbenv $opts $testfile"
85
set db [eval {berkdb_open} $oflags]
86
error_check_good db_open [is_valid_db $db] TRUE
87
error_check_good db_close [$db close] 0
89
set oflags "-create $omethod -mode 0644 \
90
-env $dbenv $opts $testfile2"
91
set db [eval {berkdb_open} $oflags]
92
error_check_good db_open [is_valid_db $db] TRUE
93
error_check_good db_close [$db close] 0
95
error_check_good env_close [$dbenv close] 0
97
puts "\tRecd001.a.1: Verify db_printlog can read logfile"
98
set tmpfile $testdir/printlog.out
99
set stat [catch {exec $util_path/db_printlog -h $testdir \
101
error_check_good db_printlog $stat 0
104
# List of recovery tests: {CMD MSG} pairs.
106
{ {DB put -txn TXNID $key $data} "Recd001.b: put"}
107
{ {DB del -txn TXNID $key} "Recd001.c: delete"}
108
{ {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
109
{ {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
110
{ {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
111
{ {DB del -txn TXNID $key} "Recd001.g: big data delete"}
112
{ {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
113
{ {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
114
{ {DB put -txn TXNID -partial {$off $len} $key $partial_grow}
115
"Recd001.j: partial put growing"}
116
{ {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
117
{ {DB put -txn TXNID -partial {$off $len} $key $partial_shrink}
118
"Recd001.l: partial put shrinking"}
119
{ {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
120
{ {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
123
# These are all the data values that we're going to need to read
124
# through the operation table and run the recovery tests.
126
if { [is_record_based $method] == 1 } {
131
set data recd001_data
132
set newdata NEWrecd001_dataNEW
136
set partial_grow replacement_record_grow
137
set partial_shrink xxx
138
if { [is_fixed_length $method] == 1 } {
139
set len [string length $partial_grow]
140
set partial_shrink $partial_grow
142
set bigdata [replicate $key $fixed_len]
143
if { [is_record_based $method] == 1 } {
144
set bigkey $fixed_len
146
set bigkey [replicate $key $fixed_len]
149
foreach pair $rlist {
150
set cmd [subst [lindex $pair 0]]
151
set msg [lindex $pair 1]
152
if { $select != 0 } {
153
set tag [lindex $msg 0]
154
set tail [expr [string length $tag] - 2]
155
set tag [string range $tag $tail $tail]
156
if { [lsearch $select $tag] == -1 } {
161
if { [is_queue $method] != 1 } {
162
if { [string first append $cmd] != -1 } {
165
if { [string first consume $cmd] != -1 } {
170
# if { [is_fixed_length $method] == 1 } {
171
# if { [string first partial $cmd] != -1 } {
175
op_recover abort $testdir $env_cmd $testfile $cmd $msg
176
op_recover commit $testdir $env_cmd $testfile $cmd $msg
178
# Note that since prepare-discard ultimately aborts
179
# the txn, it must come before prepare-commit.
181
op_recover prepare-abort $testdir $env_cmd $testfile2 \
183
op_recover prepare-discard $testdir $env_cmd $testfile2 \
185
op_recover prepare-commit $testdir $env_cmd $testfile2 \
188
set fixed_len $orig_fixed_len
190
if { [is_fixed_length $method] == 1 } {
191
puts "Skipping remainder of test for fixed length methods"
196
# Check partial extensions. If we add a key/data to the database
197
# and then expand it using -partial, then recover, recovery was
198
# failing in #3944. Check that scenario here.
201
# We loop here because on each iteration, we need to clean up
202
# the old env (i.e. this test does not depend on earlier runs).
203
# If we run it without cleaning up the env inbetween, we do not
204
# test the scenario of #3944.
206
set len [string length $data]
208
set part_data [replicate "abcdefgh" 32]
211
{DB put -txn TXNID -partial {$len $len2} $key $part_data}]
212
set msg "Recd001.o: partial put prepopulated/expanding"
213
foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
216
set dbenv [eval $env_cmd]
217
error_check_good dbenv [is_valid_env $dbenv] TRUE
219
error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
220
set oflags "-create $omethod -mode 0644 \
221
-env $dbenv -txn $t $opts $testfile"
222
set db [eval {berkdb_open} $oflags]
223
error_check_good db_open [is_valid_db $db] TRUE
224
set oflags "-create $omethod -mode 0644 \
225
-env $dbenv -txn $t $opts $testfile2"
226
set db2 [eval {berkdb_open} $oflags]
227
error_check_good db_open [is_valid_db $db2] TRUE
229
set ret [$db put -txn $t -partial $p $key $data]
230
error_check_good dbput $ret 0
232
set ret [$db2 put -txn $t -partial $p $key $data]
233
error_check_good dbput $ret 0
234
error_check_good txncommit [$t commit] 0
235
error_check_good dbclose [$db close] 0
236
error_check_good dbclose [$db2 close] 0
237
error_check_good dbenvclose [$dbenv close] 0
239
op_recover $op $testdir $env_cmd $testfile $cmd $msg