1
/* VerifyPN - TAPAAL Petri Net Engine
2
* Copyright (C) 2016 Peter Gjøl Jensen <root@petergjoel.dk>
4
* This program is free software: you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation, either version 3 of the License, or
7
* (at your option) any later version.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
19
* File: linked_bucket.h
20
* Author: Peter G. Jensen
22
* Created on 07 June 2016, 21:51
30
#ifndef LINKED_BUCKET_H
31
#define LINKED_BUCKET_H
33
template<typename T, size_t C>
34
class linked_bucket_t {
38
std::atomic<bucket_t*> _nbucket;
39
std::atomic<size_t> _offset;
42
} __attribute__ ((aligned (64)));
47
std::atomic<index_t*> _next;
51
std::vector<bucket_t* > _tnext;
55
linked_bucket_t(size_t threads)
57
for (size_t i = 0; i < threads; ++i) {
60
_begin = new bucket_t;
63
_begin->_nbucket = NULL;
69
memset(&_begin->_data, 0, sizeof(T)*C);
70
memset(&_index->_index, 0, sizeof(bucket_t*)*C);
71
_index->_index[0] = _begin;
74
virtual ~linked_bucket_t() {
77
bucket_t* n = _begin->_nbucket.load();
81
} while (_begin != NULL);
84
index_t* n = _index->_next.load();
88
} while (_index != NULL);
91
inline T& operator[](size_t i) {
92
bucket_t* n = indexToBucket(i);
95
return n->_data[i % C];
102
n = n->_nbucket.load();
103
if(n == NULL) std::cerr << "FAILED FETCHING ID: " << i << std::endl;
107
return n->_data[i % C];
110
inline const T& operator[](size_t i) const {
112
bucket_t* n = indexToBucket(i);
115
return n->_data[i % C];
123
n = n->_nbucket.load();
127
return n->_data[i % C];
131
bucket_t* n = _begin;
135
n = n->_nbucket.load();
140
inline size_t next(size_t thread) {
141
if (_tnext[thread] == NULL || _tnext[thread]->_count == C) {
142
bucket_t* next = new bucket_t;
144
next->_nbucket = NULL;
146
memset(&next->_data, 0, sizeof(T)*C);
148
bucket_t* n = _tnext[thread];
153
next->_offset = n->_offset.load() + C; // beginning of next
155
bucket_t* tmp = NULL;
157
while (!n->_nbucket.compare_exchange_weak(tmp, next)) {
164
_tnext[thread] = next;
166
insertToIndex(next, next->_offset);
169
bucket_t* c = _tnext[thread];
171
// return old counter value, then increment
172
return c->_offset + (c->_count++);
177
inline void insertToIndex(bucket_t* bucket, size_t id)
179
index_t* tmp = _index;
186
// extend index if needed
187
index_t* nindex = new index_t;
188
memset(&nindex->_index, 0, sizeof(bucket_t*)*C);
190
if(!old->_next.compare_exchange_strong(tmp, nindex))
203
tmp->_index[id/C] = bucket;
206
inline bucket_t* indexToBucket(size_t id)
208
index_t* tmp = _index;
212
if(tmp == NULL) return NULL;
215
return tmp->_index[id/C];
217
} __attribute__ ((aligned (64)));
220
#endif /* LINKED_BUCKET_H */