16
from lib389 import DirSrv, Entry, tools
17
from lib389.tools import DirSrvTools
18
from lib389._constants import *
19
from lib389.properties import *
20
from constants import *
22
logging.getLogger(__name__).setLevel(logging.DEBUG)
23
log = logging.getLogger(__name__)
25
installation_prefix = None
27
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
28
ENTRY_DN = "cn=test_entry, %s" % SUFFIX
30
OTHER_NAME = 'other_entry'
33
ATTRIBUTES = [ 'street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber' ]
35
class TopologyMasterConsumer(object):
36
def __init__(self, master, consumer):
41
self.consumer = consumer
44
return "Master[%s] -> Consumer[%s" % (self.master, self.consumer)
47
@pytest.fixture(scope="module")
48
def topology(request):
50
This fixture is used to create a replicated topology for the 'module'.
51
The replicated topology is MASTER -> Consumer.
52
At the beginning, It may exists a master instance and/or a consumer instance.
53
It may also exists a backup for the master and/or the consumer.
56
If master instance exists:
58
If consumer instance exists:
60
If backup of master AND backup of consumer exists:
61
create or rebind to consumer
62
create or rebind to master
64
restore master from backup
65
restore consumer from backup
71
Initialize replication
74
global installation_prefix
76
if installation_prefix:
77
args_instance[SER_DEPLOYED_DIR] = installation_prefix
79
master = DirSrv(verbose=False)
80
consumer = DirSrv(verbose=False)
82
# Args for the master instance
83
args_instance[SER_HOST] = HOST_MASTER
84
args_instance[SER_PORT] = PORT_MASTER
85
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER
86
args_master = args_instance.copy()
87
master.allocate(args_master)
89
# Args for the consumer instance
90
args_instance[SER_HOST] = HOST_CONSUMER
91
args_instance[SER_PORT] = PORT_CONSUMER
92
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER
93
args_consumer = args_instance.copy()
94
consumer.allocate(args_consumer)
97
# Get the status of the backups
98
backup_master = master.checkBackupFS()
99
backup_consumer = consumer.checkBackupFS()
101
# Get the status of the instance and restart it if it exists
102
instance_master = master.exists()
104
master.stop(timeout=10)
105
master.start(timeout=10)
107
instance_consumer = consumer.exists()
108
if instance_consumer:
109
consumer.stop(timeout=10)
110
consumer.start(timeout=10)
112
if backup_master and backup_consumer:
113
# The backups exist, assuming they are correct
114
# we just re-init the instances with them
115
if not instance_master:
117
# Used to retrieve configuration information (dbdir, confdir...)
120
if not instance_consumer:
122
# Used to retrieve configuration information (dbdir, confdir...)
125
# restore master from backup
126
master.stop(timeout=10)
127
master.restoreFS(backup_master)
128
master.start(timeout=10)
130
# restore consumer from backup
131
consumer.stop(timeout=10)
132
consumer.restoreFS(backup_consumer)
133
consumer.start(timeout=10)
135
# We should be here only in two conditions
136
# - This is the first time a test involve master-consumer
137
# so we need to create everything
138
# - Something weird happened (instance/backup destroyed)
139
# so we discard everything and recreate all
141
# Remove all the backups. So even if we have a specific backup file
142
# (e.g backup_master) we clear all backups that an instance my have created
144
master.clearBackupFS()
146
consumer.clearBackupFS()
148
# Remove all the instances
151
if instance_consumer:
154
# Create the instances
161
# Now prepare the Master-Consumer topology
163
# First Enable replication
164
master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER)
165
consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
167
# Initialize the supplier->consumer
169
properties = {RA_NAME: r'meTo_$host:$port',
170
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
171
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
172
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
173
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
174
repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
176
if not repl_agreement:
177
log.fatal("Fail to create a replica agreement")
180
log.debug("%s created" % repl_agreement)
181
master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER)
182
master.waitForReplInit(repl_agreement)
184
# Check replication is working fine
185
master.add_s(Entry((TEST_REPL_DN, {
186
'objectclass': "top person".split(),
188
'cn': 'test_repl'})))
192
ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
194
except ldap.NO_SUCH_OBJECT:
198
# Time to create the backups
199
master.stop(timeout=10)
200
master.backupfile = master.backupFS()
201
master.start(timeout=10)
203
consumer.stop(timeout=10)
204
consumer.backupfile = consumer.backupFS()
205
consumer.start(timeout=10)
208
# Here we have two instances master and consumer
209
# with replication working. Either coming from a backup recovery
210
# or from a fresh (re)init
211
# Time to return the topology
212
return TopologyMasterConsumer(master, consumer)
215
def test_ticket47619_init(topology):
217
Initialize the test environment
219
topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
220
#topology.master.plugins.enable(name=PLUGIN_MEMBER_OF)
221
#topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
222
topology.master.stop(timeout=10)
223
topology.master.start(timeout=10)
225
topology.master.log.info("test_ticket47619_init topology %r" % (topology))
226
# the test case will check if a warning message is logged in the
227
# error log of the supplier
228
topology.master.errorlog_file = open(topology.master.errlog, "r")
232
for cpt in range(MAX_OTHERS):
233
name = "%s%d" % (OTHER_NAME, cpt)
234
topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
235
'objectclass': "top person".split(),
239
topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1))
241
# Check the number of entries in the retro changelog
243
ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
244
assert len(ents) == MAX_OTHERS
246
def test_ticket47619_create_index(topology):
248
args = {INDEX_TYPE: 'eq'}
249
for attr in ATTRIBUTES:
250
topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)
252
def test_ticket47619_reindex(topology):
254
Reindex all the attributes in ATTRIBUTES
256
args = {TASK_WAIT: True}
257
for attr in ATTRIBUTES:
258
rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)
261
def test_ticket47619_check_indexed_search(topology):
262
for attr in ATTRIBUTES:
263
ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)
264
assert len(ents) == 0
266
def test_ticket47619_final(topology):
267
topology.master.stop(timeout=10)
268
topology.consumer.stop(timeout=10)
272
run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
273
To run isolated without py.test, you need to
274
- edit this file and comment '@pytest.fixture' line before 'topology' function.
275
- set the installation prefix
278
global installation_prefix
279
installation_prefix = None
281
topo = topology(True)
282
test_ticket47619_init(topo)
284
test_ticket47619_create_index(topo)
286
# important restart that trigger the hang
287
# at restart, finding the new 'changelog' backend, the backend is acquired in Read
288
# preventing the reindex task to complete
289
topo.master.restart(timeout=10)
290
test_ticket47619_reindex(topo)
291
test_ticket47619_check_indexed_search(topo)
293
test_ticket47619_final(topo)
296
if __name__ == '__main__':