~mordred/ubuntu/maverick/drizzle/prerelease

« back to all changes in this revision

Viewing changes to plugin/pbxt/src/lock_xt.cc

  • Committer: Monty Taylor
  • Date: 2010-09-26 16:09:02 UTC
  • mto: This revision was merged to the branch mainline in revision 1383.
  • Revision ID: mordred@inaugust.com-20100926160902-r30v5hegk16cjk22
Tags: upstream-2010.09.1794
ImportĀ upstreamĀ versionĀ 2010.09.1794

Show diffs side-by-side

added added

removed removed

Lines of Context:
1141
1141
        sxs->sxs_locker = thd_id;
1142
1142
#endif
1143
1143
 
1144
 
        /* Wait for all the reader to wait! */
1145
 
        while (sxs->sxs_wait_count < sxs->sxs_rlock_count)
1146
 
                xt_yield();
 
1144
        /* Wait for all the readers to wait! */
 
1145
        while (sxs->sxs_wait_count < sxs->sxs_rlock_count) {
 
1146
                sxs->sxs_xwaiter = 1;
 
1147
                xt_yield(); //*
 
1148
                /* This should not be required, because there is only one thread
 
1149
                 * accessing this value. However, the lock fails if this
 
1150
                 * is not done with an atomic op.
 
1151
                 *
 
1152
                 * This is because threads on other processors have the
 
1153
                 * value in processor cache. So they do not
 
1154
                 * notice that the value has been set to zero.
 
1155
                 * They think it is still 1 and march through
 
1156
                 * the barrier (sxs->sxs_xwaiter < sxs->sxs_xlocked) below.
 
1157
                 *
 
1158
                 * In the meantime, this X locker has gone on thinking
 
1159
                 * all is OK.
 
1160
                 */
 
1161
                xt_atomic_tas2(&sxs->sxs_xwaiter, 0);
 
1162
        }
1147
1163
 
1148
1164
#ifdef XT_THREAD_LOCK_INFO
1149
1165
        xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1155
1171
{
1156
1172
        xt_atomic_inc2(&sxs->sxs_rlock_count);
1157
1173
 
1158
 
        /* Check if there could be an X locker: */
1159
 
        if (sxs->sxs_xlocked) {
1160
 
                /* I am waiting... */
 
1174
        /* Wait as long as the locker is not waiting: */
 
1175
        while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1161
1176
                xt_atomic_inc2(&sxs->sxs_wait_count);
1162
 
                while (sxs->sxs_xlocked)
 
1177
                while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1163
1178
                        xt_yield();
 
1179
                }
1164
1180
                xt_atomic_dec2(&sxs->sxs_wait_count);
1165
1181
        }
1166
1182
 
1361
1377
 
1362
1378
/*
1363
1379
 * -----------------------------------------------------------------------
 
1380
 * RECURSIVE R/W LOCK (allows X lockers to lock again)
 
1381
 */
 
1382
 
 
1383
#ifdef XT_THREAD_LOCK_INFO
 
1384
void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm, const char *name)
 
1385
{
 
1386
        rm->rm_locker = NULL;
 
1387
        rm->rm_lock_count = 0;
 
1388
        xt_init_mutex(self, &rm->rm_mutex, name);
 
1389
}
 
1390
#else
 
1391
xtPublic void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm)
 
1392
{
 
1393
        rm->rm_locker = NULL;
 
1394
        rm->rm_lock_count = 0;
 
1395
        xt_init_mutex(self, &rm->rm_mutex);
 
1396
}
 
1397
#endif
 
1398
 
 
1399
xtPublic void xt_recursivemutex_free(XTRecursiveMutexPtr rm)
 
1400
{
 
1401
        xt_free_mutex(&rm->rm_mutex);
 
1402
#ifdef XT_THREAD_LOCK_INFO
 
1403
        xt_thread_lock_info_free(&rm->rm_lock_info);
 
1404
#endif
 
1405
}
 
1406
 
 
1407
xtPublic void xt_recursivemutex_lock(XTThreadPtr self, XTRecursiveMutexPtr rm)
 
1408
{
 
1409
        if (self != rm->rm_locker) {
 
1410
                xt_lock_mutex(self, &rm->rm_mutex);
 
1411
                rm->rm_locker = self;
 
1412
        }
 
1413
        rm->rm_lock_count++;
 
1414
}
 
1415
 
 
1416
xtPublic void xt_recursivemutex_unlock(XTThreadPtr self, XTRecursiveMutexPtr rm)
 
1417
{
 
1418
        ASSERT(self == rm->rm_locker);
 
1419
        ASSERT(rm->rm_lock_count > 0);
 
1420
        rm->rm_lock_count--;
 
1421
        if (!rm->rm_lock_count) {
 
1422
                rm->rm_locker = NULL;
 
1423
                xt_unlock_mutex(self, &rm->rm_mutex);
 
1424
        }
 
1425
}
 
1426
 
 
1427
/*
 
1428
 * -----------------------------------------------------------------------
 
1429
 * RECURSIVE MUTEX (allows lockers to lock again)
 
1430
 */
 
1431
 
 
1432
#ifdef XT_THREAD_LOCK_INFO
 
1433
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name)
 
1434
{
 
1435
        rrw->rrw_locker = NULL;
 
1436
        rrw->rrw_lock_count = 0;
 
1437
        xt_init_rwlock(self, &rrw->rrw_lock, name);
 
1438
}
 
1439
#else
 
1440
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw)
 
1441
{
 
1442
        rrw->rrw_locker = NULL;
 
1443
        rrw->rrw_lock_count = 0;
 
1444
        xt_init_rwlock(self, &rrw->rrw_lock);
 
1445
}
 
1446
#endif
 
1447
 
 
1448
void xt_recurrwlock_free(XTRecurRWLockPtr rrw)
 
1449
{
 
1450
        xt_free_rwlock(&rrw->rrw_lock);
 
1451
#ifdef XT_THREAD_LOCK_INFO
 
1452
        xt_thread_lock_info_free(&rrw->rrw_lock_info);
 
1453
#endif
 
1454
}
 
1455
 
 
1456
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw)
 
1457
{
 
1458
        if (self != rrw->rrw_locker) {
 
1459
                xt_xlock_rwlock(self, &rrw->rrw_lock);
 
1460
                rrw->rrw_locker = self;
 
1461
        }
 
1462
        rrw->rrw_lock_count++;
 
1463
}
 
1464
 
 
1465
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw)
 
1466
{
 
1467
        xt_slock_rwlock(self, &rrw->rrw_lock);
 
1468
}
 
1469
 
 
1470
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw)
 
1471
{
 
1472
        xt_slock_rwlock_ns(&rrw->rrw_lock);
 
1473
}
 
1474
 
 
1475
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw)
 
1476
{
 
1477
        ASSERT(self == rrw->rrw_locker);
 
1478
        ASSERT(rrw->rrw_lock_count > 0);
 
1479
        rrw->rrw_lock_count--;
 
1480
        if (!rrw->rrw_lock_count) {
 
1481
                rrw->rrw_locker = NULL;
 
1482
                xt_unlock_rwlock(self, &rrw->rrw_lock);
 
1483
        }
 
1484
}
 
1485
 
 
1486
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw)
 
1487
{
 
1488
        xt_unlock_rwlock(self, &rrw->rrw_lock);
 
1489
}
 
1490
 
 
1491
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw)
 
1492
{
 
1493
        xt_unlock_rwlock_ns(&rrw->rrw_lock);
 
1494
}
 
1495
 
 
1496
/*
 
1497
 * -----------------------------------------------------------------------
1364
1498
 * UNIT TESTS
1365
1499
 */
1366
1500