1
/* VerifyPN - TAPAAL Petri Net Engine
2
* Copyright (C) 2016 Peter Gjøl Jensen <root@petergjoel.dk>
4
* This program is free software: you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation, either version 3 of the License, or
7
* (at your option) any later version.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
19
* File: linked_bucket.h
20
* Author: Peter G. Jensen
22
* Created on 07 June 2016, 21:51
30
#ifndef LINKED_BUCKET_H
31
#define LINKED_BUCKET_H
33
template<typename T, size_t C>
34
class linked_bucket_t {
38
std::atomic<bucket_t*> _nbucket;
39
std::atomic<size_t> _offset;
42
} __attribute__ ((aligned (64)));
47
std::atomic<index_t*> _next;
51
std::vector<bucket_t* > _tnext;
55
linked_bucket_t(size_t threads)
57
for (size_t i = 0; i < threads; ++i) {
60
_begin = new bucket_t;
63
_begin->_nbucket = nullptr;
67
_index->_next = nullptr;
69
memset(&_begin->_data, 0, sizeof(T)*C);
70
memset(&_index->_index, 0, sizeof(bucket_t*)*C);
71
_index->_index[0] = _begin;
77
bucket_t* n = _begin->_nbucket.load();
81
} while (_begin != nullptr);
84
index_t* n = _index->_next.load();
88
} while (_index != nullptr);
91
inline T& operator[](size_t i) {
92
bucket_t* n = indexToBucket(i);
95
return n->_data[i % C];
102
n = n->_nbucket.load();
103
if(n == nullptr) std::cerr << "FAILED FETCHING ID: " << i << std::endl;
104
assert(n != nullptr);
107
return n->_data[i % C];
110
inline const T& operator[](size_t i) const {
112
bucket_t* n = indexToBucket(i);
115
return n->_data[i % C];
123
n = n->_nbucket.load();
124
assert(n != nullptr);
127
return n->_data[i % C];
131
bucket_t* n = _begin;
133
while (n != nullptr) {
135
n = n->_nbucket.load();
140
inline size_t next(size_t thread) {
141
if (_tnext[thread] == nullptr || _tnext[thread]->_count == C) {
142
bucket_t* next = new bucket_t;
144
next->_nbucket = nullptr;
146
memset(&next->_data, 0, sizeof(T)*C);
148
bucket_t* n = _tnext[thread];
153
next->_offset = n->_offset.load() + C; // beginning of next
155
bucket_t* tmp = nullptr;
157
while (!n->_nbucket.compare_exchange_weak(tmp, next)) {
158
if (tmp != nullptr) {
159
assert(tmp != nullptr);
164
_tnext[thread] = next;
166
insertToIndex(next, next->_offset);
169
bucket_t* c = _tnext[thread];
171
// return old counter value, then increment
172
return c->_offset + (c->_count++);
175
inline void pop_back(size_t thread)
177
assert(_tnext[thread] != nullptr && _tnext[thread]->_count > 0);
178
--_tnext[thread]->_count;
183
inline void insertToIndex(bucket_t* bucket, size_t id)
185
index_t* tmp = _index;
192
// extend index if needed
193
index_t* nindex = new index_t;
194
memset(&nindex->_index, 0, sizeof(bucket_t*)*C);
196
if(!old->_next.compare_exchange_strong(tmp, nindex))
209
tmp->_index[id/C] = bucket;
212
inline bucket_t* indexToBucket(size_t id)
214
index_t* tmp = _index;
218
if(tmp == nullptr) return nullptr;
221
return tmp->_index[id/C];
223
} __attribute__ ((aligned (64)));
226
#endif /* LINKED_BUCKET_H */