16
from lib389 import DirSrv, Entry, tools
17
from lib389.tools import DirSrvTools
18
from lib389._constants import *
19
from lib389.properties import *
20
from constants import *
21
from lib389._constants import REPLICAROLE_MASTER
23
logging.getLogger(__name__).setLevel(logging.DEBUG)
24
log = logging.getLogger(__name__)
27
# important part. We can deploy Master1 and Master2 on different versions
29
installation1_prefix = None
30
installation2_prefix = None
32
SCHEMA_DN = "cn=schema"
33
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
34
OC_NAME = 'OCticket47676'
36
MUST = "(postalAddress $ postalCode)"
37
MAY = "(member $ street)"
39
OC2_NAME = 'OC2ticket47676'
41
MUST_2 = "(postalAddress $ postalCode)"
42
MAY_2 = "(member $ street)"
44
REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
45
REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
47
OTHER_NAME = 'other_entry'
50
BIND_NAME = 'bind_entry'
51
BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
54
ENTRY_NAME = 'test_entry'
55
ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
56
ENTRY_OC = "top person %s" % OC_NAME
58
BASE_OID = "1.2.3.4.5.6.7.8.9.10"
60
def _oc_definition(oid_ext, name, must=None, may=None):
61
oid = "%s.%d" % (BASE_OID, oid_ext)
62
desc = 'To test ticket 47490'
69
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
71
class TopologyMaster1Master2(object):
72
def __init__(self, master1, master2):
74
self.master1 = master1
77
self.master2 = master2
80
@pytest.fixture(scope="module")
81
def topology(request):
83
This fixture is used to create a replicated topology for the 'module'.
84
The replicated topology is MASTER1 <-> Master2.
85
At the beginning, It may exists a master2 instance and/or a master2 instance.
86
It may also exists a backup for the master1 and/or the master2.
89
If master1 instance exists:
91
If master2 instance exists:
93
If backup of master1 AND backup of master2 exists:
94
create or rebind to master1
95
create or rebind to master2
97
restore master1 from backup
98
restore master2 from backup
104
Initialize replication
107
global installation1_prefix
108
global installation2_prefix
110
# allocate master1 on a given deployement
111
master1 = DirSrv(verbose=False)
112
if installation1_prefix:
113
args_instance[SER_DEPLOYED_DIR] = installation1_prefix
115
# Args for the master1 instance
116
args_instance[SER_HOST] = HOST_MASTER_1
117
args_instance[SER_PORT] = PORT_MASTER_1
118
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
119
args_master = args_instance.copy()
120
master1.allocate(args_master)
122
# allocate master1 on a given deployement
123
master2 = DirSrv(verbose=False)
124
if installation2_prefix:
125
args_instance[SER_DEPLOYED_DIR] = installation2_prefix
127
# Args for the consumer instance
128
args_instance[SER_HOST] = HOST_MASTER_2
129
args_instance[SER_PORT] = PORT_MASTER_2
130
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
131
args_master = args_instance.copy()
132
master2.allocate(args_master)
135
# Get the status of the backups
136
backup_master1 = master1.checkBackupFS()
137
backup_master2 = master2.checkBackupFS()
139
# Get the status of the instance and restart it if it exists
140
instance_master1 = master1.exists()
142
master1.stop(timeout=10)
143
master1.start(timeout=10)
145
instance_master2 = master2.exists()
147
master2.stop(timeout=10)
148
master2.start(timeout=10)
150
if backup_master1 and backup_master2:
151
# The backups exist, assuming they are correct
152
# we just re-init the instances with them
153
if not instance_master1:
155
# Used to retrieve configuration information (dbdir, confdir...)
158
if not instance_master2:
160
# Used to retrieve configuration information (dbdir, confdir...)
163
# restore master1 from backup
164
master1.stop(timeout=10)
165
master1.restoreFS(backup_master1)
166
master1.start(timeout=10)
168
# restore master2 from backup
169
master2.stop(timeout=10)
170
master2.restoreFS(backup_master2)
171
master2.start(timeout=10)
173
# We should be here only in two conditions
174
# - This is the first time a test involve master-consumer
175
# so we need to create everything
176
# - Something weird happened (instance/backup destroyed)
177
# so we discard everything and recreate all
179
# Remove all the backups. So even if we have a specific backup file
180
# (e.g backup_master) we clear all backups that an instance my have created
182
master1.clearBackupFS()
184
master2.clearBackupFS()
186
# Remove all the instances
192
# Create the instances
199
# Now prepare the Master-Consumer topology
201
# First Enable replication
202
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
203
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
205
# Initialize the supplier->consumer
207
properties = {RA_NAME: r'meTo_$host:$port',
208
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
209
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
210
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
211
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
212
repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
214
if not repl_agreement:
215
log.fatal("Fail to create a replica agreement")
218
log.debug("%s created" % repl_agreement)
220
properties = {RA_NAME: r'meTo_$host:$port',
221
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
222
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
223
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
224
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
225
master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
227
master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
228
master1.waitForReplInit(repl_agreement)
230
# Check replication is working fine
231
master1.add_s(Entry((TEST_REPL_DN, {
232
'objectclass': "top person".split(),
234
'cn': 'test_repl'})))
238
ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
240
except ldap.NO_SUCH_OBJECT:
244
# Time to create the backups
245
master1.stop(timeout=10)
246
master1.backupfile = master1.backupFS()
247
master1.start(timeout=10)
249
master2.stop(timeout=10)
250
master2.backupfile = master2.backupFS()
251
master2.start(timeout=10)
254
# Here we have two instances master and consumer
255
# with replication working. Either coming from a backup recovery
256
# or from a fresh (re)init
257
# Time to return the topology
258
return TopologyMaster1Master2(master1, master2)
261
def test_ticket47676_init(topology):
264
- Objectclass with MAY 'member'
265
- an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
266
It deletes the anonymous aci
271
topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
272
new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
273
topology.master1.addSchema('objectClasses', new_oc)
276
# entry used to bind with
277
topology.master1.log.info("Add %s" % BIND_DN)
278
topology.master1.add_s(Entry((BIND_DN, {
279
'objectclass': "top person".split(),
282
'userpassword': BIND_PW})))
284
# enable acl error logging
285
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128+8192))] # ACL + REPL
286
topology.master1.modify_s(DN_CONFIG, mod)
287
topology.master2.modify_s(DN_CONFIG, mod)
290
for cpt in range(MAX_OTHERS):
291
name = "%s%d" % (OTHER_NAME, cpt)
292
topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
293
'objectclass': "top person".split(),
297
def test_ticket47676_skip_oc_at(topology):
299
This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
300
on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
301
If the schema has successfully been pushed, updating Master2 should succeed
303
topology.master1.log.info("\n\n######################### ADD ######################\n")
305
# bind as 'cn=Directory manager'
306
topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
307
topology.master1.simple_bind_s(DN_DM, PASSWORD)
309
# Prepare the entry with multivalued members
310
entry = Entry(ENTRY_DN)
311
entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
312
entry.setValues('sn', ENTRY_NAME)
313
entry.setValues('cn', ENTRY_NAME)
314
entry.setValues('postalAddress', 'here')
315
entry.setValues('postalCode', '1234')
317
for cpt in range(MAX_OTHERS):
318
name = "%s%d" % (OTHER_NAME, cpt)
319
members.append("cn=%s,%s" % (name, SUFFIX))
320
members.append(BIND_DN)
321
entry.setValues('member', members)
323
topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
324
topology.master1.add_s(entry)
327
# Now check the entry as been replicated
329
topology.master2.simple_bind_s(DN_DM, PASSWORD)
330
topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
334
ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
336
except ldap.NO_SUCH_OBJECT:
341
# Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
342
topology.master1.log.info("Update %s on M2" % ENTRY_DN)
343
mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
344
topology.master2.modify_s(ENTRY_DN, mod)
346
topology.master1.simple_bind_s(DN_DM, PASSWORD)
349
ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
350
if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
355
assert ent.getValue('description') == 'test_add'
357
def test_ticket47676_reject_action(topology):
359
topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
361
topology.master1.simple_bind_s(DN_DM, PASSWORD)
362
topology.master2.simple_bind_s(DN_DM, PASSWORD)
364
# make master1 to refuse to push the schema if OC_NAME is present in consumer schema
365
mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
366
topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
368
# Restart is required to take into account that policy
369
topology.master1.stop(timeout=10)
370
topology.master1.start(timeout=10)
372
# Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
373
topology.master1.log.info("Add %s on M1" % OC2_NAME)
374
new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must = MUST, may = MAY)
375
topology.master1.addSchema('objectClasses', new_oc)
377
# Safety checking that the schema has been updated on M1
378
topology.master1.log.info("Check %s is in M1" % OC2_NAME)
379
ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
380
assert ent.hasAttr('objectclasses')
382
for objectclass in ent.getValues('objectclasses'):
383
if str(objectclass).find(OC2_NAME) >= 0:
388
# Do an update of M1 so that M1 will try to push the schema
389
topology.master1.log.info("Update %s on M1" % ENTRY_DN)
390
mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
391
topology.master1.modify_s(ENTRY_DN, mod)
393
# Check the replication occured and so also M1 attempted to push the schema
394
topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
397
ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
398
if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
399
# update was replicated
405
# Check that the schema has not been pushed
406
topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
407
ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
408
assert ent.hasAttr('objectclasses')
410
for objectclass in ent.getValues('objectclasses'):
411
if str(objectclass).find(OC2_NAME) >= 0:
416
topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
418
# make master1 to do no specific action on OC_NAME
419
mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL
420
topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
422
# Restart is required to take into account that policy
423
topology.master1.stop(timeout=10)
424
topology.master1.start(timeout=10)
426
# Do an update of M1 so that M1 will try to push the schema
427
topology.master1.log.info("Update %s on M1" % ENTRY_DN)
428
mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
429
topology.master1.modify_s(ENTRY_DN, mod)
431
# Check the replication occured and so also M1 attempted to push the schema
432
topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
435
ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
436
if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
437
# update was replicated
443
# Check that the schema has been pushed
444
topology.master1.log.info("Check %s is in M2" % OC2_NAME)
445
ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
446
assert ent.hasAttr('objectclasses')
448
for objectclass in ent.getValues('objectclasses'):
449
if str(objectclass).find(OC2_NAME) >= 0:
454
def test_ticket47676_final(topology):
455
topology.master1.stop(timeout=10)
456
topology.master2.stop(timeout=10)
460
run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
461
To run isolated without py.test, you need to
462
- edit this file and comment '@pytest.fixture' line before 'topology' function.
463
- set the installation prefix
466
global installation1_prefix
467
global installation2_prefix
468
installation1_prefix = None
469
installation2_prefix = None
471
topo = topology(True)
472
topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n")
473
test_ticket47676_init(topo)
475
test_ticket47676_skip_oc_at(topo)
476
test_ticket47676_reject_action(topo)
478
test_ticket47676_final(topo)
483
if __name__ == '__main__':