Merge lp:~verifypn-cpn/verifypn/eq-push-fix into lp:~verifypn-cpn/verifypn/MCC19

Proposed by Peter Gjøl Jensen on 2019-04-17
Status: Needs review
Proposed branch: lp:~verifypn-cpn/verifypn/eq-push-fix
Merge into: lp:~verifypn-cpn/verifypn/MCC19
Diff against target: 1712 lines (+344/-296)
12 files modified
CTL/SearchStrategy/RDFSSearch.cpp (+6/-8)
CTL/SearchStrategy/SearchStrategy.h (+1/-0)
PetriEngine/Colored/Expressions.h (+1/-0)
PetriEngine/PQL/Expressions.cpp (+7/-9)
PetriEngine/Simplification/LinearPrograms.h (+1/-0)
PetriEngine/Structures/Queue.cpp (+6/-7)
PetriEngine/Structures/binarywrapper.cpp (+16/-22)
PetriEngine/Structures/binarywrapper.h (+94/-32)
PetriEngine/Structures/linked_bucket.h (+26/-20)
PetriEngine/Structures/ptrie.h (+170/-184)
PetriEngine/Structures/ptrie_stable.h (+14/-12)
makefile.osx64 (+2/-2)
To merge this branch: bzr merge lp:~verifypn-cpn/verifypn/eq-push-fix
Reviewer Review Type Date Requested Status
Jiri Srba 2019-04-17 Pending
Review via email: mp+366157@code.launchpad.net

Commit message

Fixes rewrite of X=Y when either X or Y is either 0 or 1 into <= or >= constraints.
Bug was triggered as cases were not handled symmetrically (i.e. an indexing error)

To post a comment you must log in.
240. By Peter Gjøl Jensen on 2019-07-12

updated ptries

241. By Peter Gjøl Jensen on 2019-09-05

should compile and run on osx

242. By <email address hidden> on 2019-09-06

fixed warnings of missing virtual destructors

Unmerged revisions

242. By <email address hidden> on 2019-09-06

fixed warnings of missing virtual destructors

241. By Peter Gjøl Jensen on 2019-09-05

should compile and run on osx

240. By Peter Gjøl Jensen on 2019-07-12

updated ptries

239. By Peter Gjøl Jensen on 2019-04-17

fixed error in re-write of equality into simpler form

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'CTL/SearchStrategy/RDFSSearch.cpp'
2--- CTL/SearchStrategy/RDFSSearch.cpp 2018-03-07 14:21:37 +0000
3+++ CTL/SearchStrategy/RDFSSearch.cpp 2019-09-06 10:48:45 +0000
4@@ -8,6 +8,9 @@
5 #include "RDFSSearch.h"
6 #include "CTL/DependencyGraph/Configuration.h"
7
8+#include <algorithm>
9+#include <random>
10+
11 namespace SearchStrategy {
12 size_t RDFSSearch::Wsize() const {
13 return W.size();
14@@ -25,16 +28,11 @@
15 W.push_back(edge);
16 }
17
18-
19-size_t genrand(size_t i)
20-{
21- return std::rand() % i;
22-}
23-
24+auto rng = std::default_random_engine {};
25 void RDFSSearch::flush() {
26 last_parent = std::min(last_parent, W.size());
27- std::random_shuffle(W.begin() + last_parent, W.end(), genrand);
28+ std::shuffle(W.begin() + last_parent, W.end(), rng);
29 last_parent = W.size();
30 }
31
32-}
33\ No newline at end of file
34+}
35
36=== modified file 'CTL/SearchStrategy/SearchStrategy.h'
37--- CTL/SearchStrategy/SearchStrategy.h 2018-03-07 14:21:37 +0000
38+++ CTL/SearchStrategy/SearchStrategy.h 2019-09-06 10:48:45 +0000
39@@ -29,6 +29,7 @@
40 class SearchStrategy
41 {
42 public:
43+ virtual ~SearchStrategy() {};
44 bool empty() const;
45 void pushEdge(DependencyGraph::Edge *edge);
46 void pushDependency(DependencyGraph::Edge* edge);
47
48=== modified file 'PetriEngine/Colored/Expressions.h'
49--- PetriEngine/Colored/Expressions.h 2018-06-25 09:42:12 +0000
50+++ PetriEngine/Colored/Expressions.h 2019-09-06 10:48:45 +0000
51@@ -455,6 +455,7 @@
52 ColorType* _sort;
53
54 public:
55+ virtual ~AllExpression() {};
56 std::vector<const Color*> eval(ExpressionContext& context) const {
57 std::vector<const Color*> colors;
58 assert(_sort != nullptr);
59
60=== modified file 'PetriEngine/PQL/Expressions.cpp'
61--- PetriEngine/PQL/Expressions.cpp 2019-02-11 21:18:10 +0000
62+++ PetriEngine/PQL/Expressions.cpp 2019-09-06 10:48:45 +0000
63@@ -3443,15 +3443,13 @@
64 Condition_ptr pushEqual(CompareCondition* org, bool negated, bool noteq, const EvaluationContext& context)
65 {
66 if(org->isTrivial()) return BooleanCondition::getShared(org->evaluate(context) xor negated);
67- if((*org)[0]->placeFree() && (*org)[0]->evaluate(context) == 0)
68- {
69- if(negated == noteq) return std::make_shared<LessThanOrEqualCondition>((*org)[1], std::make_shared<LiteralExpr>(0));
70- else return std::make_shared<GreaterThanOrEqualCondition>((*org)[1], std::make_shared<LiteralExpr>(1));
71- }
72- if((*org)[1]->placeFree() && (*org)[1]->evaluate(context) == 0)
73- {
74- if(negated == noteq) return std::make_shared<LessThanOrEqualCondition>((*org)[1], std::make_shared<LiteralExpr>(0));
75- else return std::make_shared<GreaterThanOrEqualCondition>((*org)[1], std::make_shared<LiteralExpr>(1));
76+ for(auto i : {0,1})
77+ {
78+ if((*org)[i]->placeFree() && (*org)[i]->evaluate(context) == 0)
79+ {
80+ if(negated == noteq) return std::make_shared<LessThanOrEqualCondition>((*org)[(i + 1) % 2], std::make_shared<LiteralExpr>(0));
81+ else return std::make_shared<GreaterThanOrEqualCondition>((*org)[(i + 1) % 2], std::make_shared<LiteralExpr>(1));
82+ }
83 }
84 if(negated == noteq) return std::make_shared<EqualCondition>((*org)[0], (*org)[1]);
85 else return std::make_shared<NotEqualCondition>((*org)[0], (*org)[1]);
86
87=== modified file 'PetriEngine/Simplification/LinearPrograms.h'
88--- PetriEngine/Simplification/LinearPrograms.h 2017-12-11 08:24:36 +0000
89+++ PetriEngine/Simplification/LinearPrograms.h 2019-09-06 10:48:45 +0000
90@@ -17,6 +17,7 @@
91 virtual void satisfiableImpl(const PQL::SimplificationContext& context, uint32_t solvetime) = 0;
92 bool has_empty = false;
93 public:
94+ virtual ~AbstractProgramCollection() {};
95 bool empty() { return has_empty; }
96
97 virtual bool satisfiable(const PQL::SimplificationContext& context, uint32_t solvetime = std::numeric_limits<uint32_t>::max())
98
99=== modified file 'PetriEngine/Structures/Queue.cpp'
100--- PetriEngine/Structures/Queue.cpp 2017-11-26 13:43:04 +0000
101+++ PetriEngine/Structures/Queue.cpp 2019-09-06 10:48:45 +0000
102@@ -18,6 +18,9 @@
103 #include "Queue.h"
104 #include "../PQL/Contexts.h"
105
106+#include <algorithm>
107+#include <random>
108+
109 namespace PetriEngine {
110 namespace Structures {
111 Queue::Queue(StateSetInterface* states) : _states(states) {}
112@@ -70,12 +73,8 @@
113
114 RDFSQueue::RDFSQueue(StateSetInterface* states) : Queue(states) {}
115 RDFSQueue::~RDFSQueue(){}
116-
117- size_t genrand(size_t i)
118- {
119- return std::rand() % i;
120- }
121-
122+
123+ auto rng = std::default_random_engine {};
124 bool RDFSQueue::pop(Structures::State& state)
125 {
126 if(_cache.empty())
127@@ -88,7 +87,7 @@
128 }
129 else
130 {
131- std::random_shuffle ( _cache.begin(), _cache.end(), genrand );
132+ std::shuffle ( _cache.begin(), _cache.end(), rng );
133 uint32_t n = _cache.back();
134 _states->decode(state, n);
135 for(size_t i = 0; i < (_cache.size() - 1); ++i)
136
137=== modified file 'PetriEngine/Structures/binarywrapper.cpp'
138--- PetriEngine/Structures/binarywrapper.cpp 2017-04-12 14:36:04 +0000
139+++ PetriEngine/Structures/binarywrapper.cpp 2019-09-06 10:48:45 +0000
140@@ -22,8 +22,6 @@
141 * Created on 10 June 2015, 19:20
142 */
143
144-#include <sstream>
145-
146 #include "binarywrapper.h"
147 namespace ptrie
148 {
149@@ -39,7 +37,7 @@
150 static_cast <uchar>(0x01)
151 };
152
153- size_t binarywrapper_t::overhead(size_t size)
154+ size_t binarywrapper_t::overhead(uint size)
155 {
156 size = size % 8;
157 if (size == 0)
158@@ -49,17 +47,15 @@
159 }
160
161
162- size_t binarywrapper_t::bytes(size_t size)
163+ size_t binarywrapper_t::bytes(uint size)
164 {
165 return (size + overhead(size))/8;
166 }
167
168
169- binarywrapper_t::binarywrapper_t(size_t size)
170+ binarywrapper_t::binarywrapper_t(uint size)
171 {
172- _nbytes = (size + overhead(size));
173- _nbytes /= 8;
174- assert(_nbytes >= (size/8));
175+ _nbytes = (size + overhead(size)) / 8;
176 _blob = zallocate(_nbytes);
177 }
178
179@@ -81,12 +77,12 @@
180 }
181
182 binarywrapper_t::binarywrapper_t
183- (uchar* raw, uint size, uint offset, uint encodingsize)
184+ (uchar* org, uint size, uint offset, uint encodingsize)
185 {
186 if(size == 0 || offset >= encodingsize)
187 {
188 _nbytes = 0;
189- _blob = NULL;
190+ _blob = nullptr;
191 return;
192 }
193
194@@ -98,31 +94,31 @@
195 _nbytes -= offset;
196 else {
197 _nbytes = 0;
198- _blob = NULL;
199+ _blob = nullptr;
200 return;
201 }
202
203- uchar* tmp = &(raw[offset]);
204+ uchar* tmp = &(org[offset]);
205 if(_nbytes <= __BW_BSIZE__)
206 {
207- memcpy(const_raw(), tmp, _nbytes);
208+ memcpy(raw(), tmp, _nbytes);
209 }
210 else
211 {
212 _blob = tmp;
213 }
214- assert(raw[offset] == const_raw()[0]);
215+ assert(org[offset] == raw()[0]);
216
217 }
218
219
220- binarywrapper_t::binarywrapper_t(uchar* raw, uint size)
221+ binarywrapper_t::binarywrapper_t(uchar* org, uint size)
222 {
223 _nbytes = size / 8 + (size % 8 ? 1 : 0);
224- _blob = raw;
225+ _blob = org;
226
227 if(_nbytes <= __BW_BSIZE__)
228- memcpy(const_raw(), raw, _nbytes);
229+ memcpy(raw(), org, _nbytes);
230
231 // assert(raw[0] == const_raw()[0]);
232 }
233@@ -155,13 +151,11 @@
234
235 void binarywrapper_t::print(std::ostream& stream, size_t length) const
236 {
237- std::stringstream ss;
238- ss << _nbytes << " bytes : ";
239+ stream << _nbytes << " bytes : ";
240 for (size_t i = 0; i < _nbytes * 8 && i < length; i++) {
241- if (i % 8 == 0 && i != 0) ss << "-";
242- ss << this->at(i);
243+ if (i % 8 == 0 && i != 0) stream << "-";
244+ stream << this->at(i);
245 }
246- stream << ss.str();
247 }
248 }
249
250
251=== modified file 'PetriEngine/Structures/binarywrapper.h'
252--- PetriEngine/Structures/binarywrapper.h 2017-04-12 14:36:04 +0000
253+++ PetriEngine/Structures/binarywrapper.h 2019-09-06 10:48:45 +0000
254@@ -27,17 +27,19 @@
255 #include <assert.h>
256 #include <limits>
257 #include <stdlib.h>
258+#ifndef __clang__
259+#include <malloc.h>
260+#endif
261 #include <stdint.h>
262
263 #ifndef BINARYWRAPPER_H
264 #define BINARYWRAPPER_H
265
266-#define __BW_BSIZE__ sizeof(size_t) // SIZE OF POINTER!
267 namespace ptrie
268 {
269 typedef unsigned int uint;
270 typedef unsigned char uchar;
271-
272+ constexpr auto __BW_BSIZE__ = sizeof(size_t); // SIZE OF POINTER!
273 /**
274 * Wrapper for binary data. This provides easy access to individual bits,
275 * heap allocation and comparison. Notice that one has to make sure to
276@@ -52,7 +54,7 @@
277 * Empty constructor, no data is allocated
278 */
279
280- binarywrapper_t() : _blob(NULL), _nbytes(0)
281+ binarywrapper_t() : _blob(nullptr), _nbytes(0)
282 {
283 }
284
285@@ -60,7 +62,7 @@
286 Allocates a room for at least size bits
287 */
288
289- binarywrapper_t(size_t size);
290+ binarywrapper_t(uint size);
291
292 /**
293 * Constructor for copying over data from latest the offset'th bit.
294@@ -94,11 +96,11 @@
295
296 /**
297 * Assign (not copy) raw data to pointer. Set number of bytes to size
298- * @param raw: some memory to point to
299+ * @param org: some memory to point to
300 * @param size: number of bytes.
301 */
302
303- binarywrapper_t(uchar* raw, uint size);
304+ binarywrapper_t(uchar* org, uint size);
305
306 /**
307 * Empty destructor. Does NOT deallocate data - do this with explicit
308@@ -130,11 +132,11 @@
309 /**
310 * Copy over size bytes form raw data. Assumes that current wrapper has
311 * enough room.
312- * @param raw: source data
313+ * @param org: source data
314 * @param size: number of bytes to copy
315 */
316
317- void copy(const uchar* raw, uint size);
318+ void copy(const uchar* org, uint size);
319
320 // accessors
321 /**
322@@ -169,7 +171,7 @@
323 * @return
324 */
325
326- inline uchar* const_raw() const
327+ inline const uchar* const_raw() const
328 {
329 if(_nbytes <= __BW_BSIZE__) return offset((uchar*)&_blob, _nbytes);
330 else
331@@ -183,7 +185,7 @@
332
333 inline uchar* raw()
334 {
335- return const_raw();
336+ return const_cast<uchar*>(const_raw());
337 }
338
339 /**
340@@ -199,10 +201,10 @@
341 * @return
342 */
343
344- static size_t overhead(size_t size);
345-
346-
347- static size_t bytes(size_t size);
348+ static size_t overhead(uint size);
349+
350+
351+ static size_t bytes(uint size);
352 // modifiers
353 /**
354 * Change value of place'th bit
355@@ -210,15 +212,15 @@
356 * @param value: desired value
357 */
358
359- inline void set(const uint place, const bool value) const
360+ inline void set(const uint place, const bool value)
361 {
362 assert(place < _nbytes*8);
363 uint offset = place % 8;
364 uint theplace = place / 8;
365 if (value) {
366- const_raw()[theplace] |= _masks[offset];
367+ raw()[theplace] |= _masks[offset];
368 } else {
369- const_raw()[theplace] &= ~_masks[offset];
370+ raw()[theplace] &= ~_masks[offset];
371 }
372 }
373
374@@ -226,11 +228,11 @@
375 * Sets all memory on heap to 0
376 */
377
378- inline void zero() const
379+ inline void zero()
380 {
381- if(_nbytes > 0 && _blob != NULL)
382+ if(_nbytes > 0 && _blob != nullptr)
383 {
384- memset(const_raw(), 0x0, _nbytes);
385+ memset(raw(), 0x0, _nbytes);
386 }
387 }
388
389@@ -242,7 +244,7 @@
390 {
391 if(_nbytes > __BW_BSIZE__)
392 dealloc(_blob);
393- _blob = NULL;
394+ _blob = nullptr;
395 _nbytes = 0;
396 }
397
398@@ -252,7 +254,7 @@
399 * @return
400 */
401
402- inline uchar operator[](size_t i) const
403+ inline uchar operator[](int i) const
404 {
405 if (i >= _nbytes) {
406 return 0x0;
407@@ -277,18 +279,78 @@
408 }
409
410 /**
411- * Compares wrappers bytes by bytes. If sizes do not match, they are not
412- * equal. If sizes match, compares byte by byte.
413+ * If sizes differs, the comparison is done here.
414+ * If sizes match, compares byte by byte.
415 * @param enc1
416 * @param enc2
417 * @return true if a match, false otherwise
418- */
419- friend bool operator==( const binarywrapper_t &enc1,
420- const binarywrapper_t &enc2) {
421+ */
422+ friend bool operator==( const binarywrapper_t &enc1,
423+ const binarywrapper_t &enc2) {
424 return enc1.cmp(enc2) == 0;
425 }
426-
427- const static uchar _masks[8];
428+
429+ /**
430+ * If sizes differs, the comparison is done here.
431+ * If sizes match, compares byte by byte.
432+ * @param enc1
433+ * @param enc2
434+ * @return true if a match, false otherwise
435+ */
436+ friend bool operator<(const binarywrapper_t &enc1,
437+ const binarywrapper_t &enc2) {
438+ return enc1.cmp(enc2) < 0;
439+ }
440+
441+ /**
442+ * If sizes differs, the comparison is done here.
443+ * If sizes match, compares byte by byte.
444+ * @param enc1
445+ * @param enc2
446+ * @return true if a match, false otherwise
447+ */
448+ friend bool operator!=(const binarywrapper_t &enc1,
449+ const binarywrapper_t &enc2) {
450+ return !(enc1 == enc2);
451+ }
452+
453+ /**
454+ * If sizes differs, the comparison is done here.
455+ * If sizes match, compares byte by byte.
456+ * @param enc1
457+ * @param enc2
458+ * @return true if a match, false otherwise
459+ */
460+ friend bool operator>=(const binarywrapper_t &enc1,
461+ const binarywrapper_t &enc2) {
462+ return !(enc1 < enc2);
463+ }
464+
465+ /**
466+ * If sizes differs, the comparison is done here.
467+ * If sizes match, compares byte by byte.
468+ * @param enc1
469+ * @param enc2
470+ * @return true if a match, false otherwise
471+ */
472+ friend bool operator>(const binarywrapper_t &enc1,
473+ const binarywrapper_t &enc2) {
474+ return enc2 < enc1;
475+ }
476+
477+ /**
478+ * If sizes differs, the comparison is done here.
479+ * If sizes match, compares byte by byte.
480+ * @param enc1
481+ * @param enc2
482+ * @return true if a match, false otherwise
483+ */
484+ friend bool operator<=(const binarywrapper_t &enc1,
485+ const binarywrapper_t &enc2) {
486+ return enc2 < enc1;
487+ }
488+
489+ const static uchar _masks[8];
490 private:
491
492 static inline uchar* allocate(size_t n)
493@@ -324,7 +386,7 @@
494 free(data);
495 }
496
497- static inline uchar* offset(uchar* data, uint32_t size)
498+ static inline uchar* offset(uchar* data, uint16_t size)
499 {
500 // if((size % __BW_BSIZE__) == 0) return data;
501 // else return &data[(__BW_BSIZE__ - (size % __BW_BSIZE__))];
502@@ -332,10 +394,10 @@
503 }
504
505 // blob of heap-allocated data
506- uchar* _blob;
507+ uchar* _blob = nullptr;
508
509 // number of bytes allocated on heap
510- uint32_t _nbytes;
511+ uint16_t _nbytes = 0;
512
513 // masks for single-bit access
514 } __attribute__((packed));
515
516=== modified file 'PetriEngine/Structures/linked_bucket.h'
517--- PetriEngine/Structures/linked_bucket.h 2016-10-31 21:35:58 +0000
518+++ PetriEngine/Structures/linked_bucket.h 2019-09-06 10:48:45 +0000
519@@ -55,42 +55,42 @@
520 linked_bucket_t(size_t threads)
521 : _tnext(threads) {
522 for (size_t i = 0; i < threads; ++i) {
523- _tnext[i] = NULL;
524+ _tnext[i] = nullptr;
525 }
526 _begin = new bucket_t;
527 _begin->_count = 0;
528 _begin->_offset = 0;
529- _begin->_nbucket = NULL;
530+ _begin->_nbucket = nullptr;
531 _tnext[0] = _begin;
532
533 _index = new index_t;
534- _index->_next = NULL;
535+ _index->_next = nullptr;
536
537 memset(&_begin->_data, 0, sizeof(T)*C);
538 memset(&_index->_index, 0, sizeof(bucket_t*)*C);
539 _index->_index[0] = _begin;
540 }
541
542- virtual ~linked_bucket_t() {
543+ ~linked_bucket_t() {
544
545 do {
546 bucket_t* n = _begin->_nbucket.load();
547 delete _begin;
548 _begin = n;
549
550- } while (_begin != NULL);
551+ } while (_begin != nullptr);
552
553 do {
554 index_t* n = _index->_next.load();
555 delete _index;
556 _index = n;
557
558- } while (_index != NULL);
559+ } while (_index != nullptr);
560 }
561
562 inline T& operator[](size_t i) {
563 bucket_t* n = indexToBucket(i);
564- if(n != NULL)
565+ if(n != nullptr)
566 {
567 return n->_data[i % C];
568 }
569@@ -100,8 +100,8 @@
570 while (b <= i) {
571 b += C;
572 n = n->_nbucket.load();
573- if(n == NULL) std::cerr << "FAILED FETCHING ID: " << i << std::endl;
574- assert(n != NULL);
575+ if(n == nullptr) std::cerr << "FAILED FETCHING ID: " << i << std::endl;
576+ assert(n != nullptr);
577 }
578
579 return n->_data[i % C];
580@@ -110,7 +110,7 @@
581 inline const T& operator[](size_t i) const {
582
583 bucket_t* n = indexToBucket(i);
584- if(n != NULL)
585+ if(n != nullptr)
586 {
587 return n->_data[i % C];
588 }
589@@ -121,7 +121,7 @@
590 while (b <= i) {
591 b += C;
592 n = n->_nbucket.load();
593- assert(n != NULL);
594+ assert(n != nullptr);
595 }
596
597 return n->_data[i % C];
598@@ -130,7 +130,7 @@
599 size_t size() {
600 bucket_t* n = _begin;
601 size_t cnt = 0;
602- while (n != NULL) {
603+ while (n != nullptr) {
604 cnt += n->_count;
605 n = n->_nbucket.load();
606 }
607@@ -138,25 +138,25 @@
608 }
609
610 inline size_t next(size_t thread) {
611- if (_tnext[thread] == NULL || _tnext[thread]->_count == C) {
612+ if (_tnext[thread] == nullptr || _tnext[thread]->_count == C) {
613 bucket_t* next = new bucket_t;
614 next->_count = 0;
615- next->_nbucket = NULL;
616+ next->_nbucket = nullptr;
617 next->_offset = 0;
618 memset(&next->_data, 0, sizeof(T)*C);
619
620 bucket_t* n = _tnext[thread];
621- if (n == NULL) {
622+ if (n == nullptr) {
623 n = _begin;
624 }
625
626 next->_offset = n->_offset.load() + C; // beginning of next
627
628- bucket_t* tmp = NULL;
629+ bucket_t* tmp = nullptr;
630
631 while (!n->_nbucket.compare_exchange_weak(tmp, next)) {
632- if (tmp != NULL) {
633- assert(tmp != NULL);
634+ if (tmp != nullptr) {
635+ assert(tmp != nullptr);
636 n = tmp;
637 next->_offset += C;
638 }
639@@ -171,6 +171,12 @@
640 // return old counter value, then increment
641 return c->_offset + (c->_count++);
642 }
643+
644+ inline void pop_back(size_t thread)
645+ {
646+ assert(_tnext[thread] != nullptr && _tnext[thread]->_count > 0);
647+ --_tnext[thread]->_count;
648+ }
649
650 private:
651
652@@ -181,7 +187,7 @@
653 {
654 index_t* old = tmp;
655 tmp = old->_next;
656- if(tmp == NULL)
657+ if(tmp == nullptr)
658 {
659 // extend index if needed
660 index_t* nindex = new index_t;
661@@ -209,7 +215,7 @@
662 while(id >= C*C)
663 {
664 tmp = tmp->_next;
665- if(tmp == NULL) return NULL;
666+ if(tmp == nullptr) return nullptr;
667 id -= C*C;
668 }
669 return tmp->_index[id/C];
670
671=== modified file 'PetriEngine/Structures/ptrie.h'
672--- PetriEngine/Structures/ptrie.h 2017-08-03 08:56:41 +0000
673+++ PetriEngine/Structures/ptrie.h 2019-09-06 10:48:45 +0000
674@@ -31,6 +31,7 @@
675 #include <stack>
676 #include <string.h>
677 #include <functional>
678+#include <memory>
679
680 #include "binarywrapper.h"
681 #include "linked_bucket.h"
682@@ -65,13 +66,14 @@
683 } __attribute__((packed));
684
685
686-#define PTRIETPL uint16_t HEAPBOUND, uint16_t SPLITBOUND, size_t ALLOCSIZE, typename T, typename I
687+#define PTRIETPL uint16_t HEAPBOUND, uint16_t SPLITBOUND, size_t ALLOCSIZE, typename T, bool E, typename I
688
689 template<
690 uint16_t HEAPBOUND = 17,
691 uint16_t SPLITBOUND = 129,
692 size_t ALLOCSIZE = (1024 * 64),
693 typename T = void,
694+ bool E = false,
695 typename I = size_t
696 >
697 class set {
698@@ -91,6 +93,9 @@
699
700 static_assert(HEAPBOUND > sizeof(size_t),
701 "HEAPBOUND MUST BE LARGER THAN sizeof(size_t)");
702+
703+ static_assert(std::is_same<void,T>::value || E,
704+ "IF T IS NOT VOID, ENTRIES MUST BE ENABLED");
705 protected:
706
707 typedef ptrie_el_t<T, I> entry_t;
708@@ -101,20 +106,20 @@
709 }
710 uchar _data;
711
712- static inline size_t overhead(size_t count, bool hasent) {
713- if (hasent)
714+ static inline size_t overhead(size_t count) {
715+ if (E)
716 return count * (sizeof (uint16_t) + sizeof (I));
717 else
718 return count * (sizeof (uint16_t));
719 }
720
721- inline I* entries(uint16_t count, bool hasent) {
722- if (hasent) return (I*) (data(0, hasent) + (count * (sizeof (uint16_t))));
723- else return NULL;
724+ inline I* entries(uint16_t count) {
725+ if (E) return (I*) (data(0) + (count * (sizeof (uint16_t))));
726+ else return nullptr;
727 }
728
729- inline uchar* data(uint16_t count, bool hasent) {
730- return ((uchar*) (&_data)) + overhead(count, hasent);
731+ inline uchar* data(uint16_t count) {
732+ return ((uchar*) (&_data)) + overhead(count);
733 }
734
735 inline uint16_t& first(uint16_t count = 0, uint16_t index = 0) {
736@@ -128,21 +133,17 @@
737 struct node_t : public base_t {
738 uint16_t _totsize = 0;
739 uint16_t _count = 0; // bucket-counts
740- bucket_t* _data = NULL; // back-pointers to data-array up to date
741+ bucket_t* _data = nullptr; // back-pointers to data-array up to date
742 } __attribute__((packed));
743
744 struct fwdnode_t : public base_t {
745 base_t* _children[256];
746 fwdnode_t* _parent;
747-
748- base_t*& operator[](size_t i) {
749- return _children[i];
750- }
751 } __attribute__((packed));
752
753- linked_bucket_t<entry_t, ALLOCSIZE>* _entries = NULL;
754+ std::shared_ptr<linked_bucket_t<entry_t, ALLOCSIZE>> _entries = nullptr;
755
756- fwdnode_t* _root;
757+ std::shared_ptr<fwdnode_t> _root;
758 protected:
759 node_t* new_node();
760 fwdnode_t* new_fwd();
761@@ -167,7 +168,6 @@
762 bool merge_down(fwdnode_t* parent, node_t* node, int byte);
763 void inject_byte(node_t* node, uchar topush, size_t totsize, std::function<uint16_t(size_t)> sizes);
764
765-
766 public:
767 set();
768 ~set();
769@@ -183,9 +183,9 @@
770 };
771
772 template<PTRIETPL>
773- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::~set() {
774+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::~set() {
775 std::stack<fwdnode_t*> stack;
776- stack.push(_root);
777+ stack.push(_root.get());
778 while(!stack.empty())
779 {
780 fwdnode_t* next = stack.top();
781@@ -209,50 +209,52 @@
782 }
783 }
784 }
785- delete next;
786+ if(_root.get() != next)
787+ delete next;
788 }
789- delete _entries;
790+ _entries = nullptr;
791+ _root = nullptr;
792 }
793
794 template<PTRIETPL>
795- void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::init()
796+ void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::init()
797 {
798- delete _entries;
799- if(_entries != NULL)
800+ _entries = nullptr;
801+ if(E)
802 {
803- _entries = new linked_bucket_t<entry_t, ALLOCSIZE>(1);
804+ _entries = std::make_unique<linked_bucket_t<entry_t, ALLOCSIZE>>(1);
805 }
806
807- _root = new_fwd();
808- _root->_parent = NULL;
809+ _root = std::unique_ptr<fwdnode_t>(new_fwd());
810+ _root->_parent = nullptr;
811 _root->_type = 255;
812 _root->_path = 0;
813
814 size_t i = 0;
815- for (; i < 256; ++i) (*_root)[i] = _root;
816+ for (; i < 256; ++i) _root->_children[i] = _root.get();
817 }
818
819 template<PTRIETPL>
820- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::set()
821+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::set()
822 {
823 init();
824 }
825
826 template<PTRIETPL>
827- typename set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::node_t*
828- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::new_node() {
829+ typename set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::node_t*
830+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::new_node() {
831 return new node_t;
832 }
833
834 template<PTRIETPL>
835- typename set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::fwdnode_t*
836- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::new_fwd() {
837+ typename set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::fwdnode_t*
838+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::new_fwd() {
839 return new fwdnode_t;
840 }
841
842 template<PTRIETPL>
843 base_t*
844- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::fast_forward(const binarywrapper_t& encoding, fwdnode_t** tree_pos, uint& byte) {
845+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::fast_forward(const binarywrapper_t& encoding, fwdnode_t** tree_pos, uint& byte) {
846 fwdnode_t* t_pos = *tree_pos;
847
848 uint16_t s = encoding.size(); // TODO remove minus to
849@@ -262,10 +264,10 @@
850 *tree_pos = t_pos;
851
852 base_t* next;
853- if (byte >= 2) next = (*t_pos)[encoding[byte - 2]];
854- else next = (*t_pos)[sc[1 - byte]];
855+ if (byte >= 2) next = t_pos->_children[encoding[byte - 2]];
856+ else next = t_pos->_children[sc[1 - byte]];
857
858- assert(next != NULL);
859+ assert(next != nullptr);
860 if(next == t_pos)
861 {
862 return t_pos;
863@@ -281,12 +283,11 @@
864 }
865
866 template<PTRIETPL>
867- bool set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::bucket_search(const binarywrapper_t& encoding, node_t* node, uint& b_index, uint byte) {
868+ bool set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::bucket_search(const binarywrapper_t& encoding, node_t* node, uint& b_index, uint byte) {
869 // run through the stored data in the bucket, looking for matches
870 // start by creating an encoding that "points" to the "unmatched"
871 // part of the encoding. Notice, this is a shallow copy, no actual
872 // heap-allocation happens!
873- const bool hasent = _entries != NULL;
874 bool found = false;
875
876 uint16_t encsize;
877@@ -341,7 +342,7 @@
878 if (b_index >= node->_count ||
879 bucket->first(node->_count, b_index) > first) return false;
880
881- uchar* data = bucket->data(node->_count, hasent);
882+ uchar* data = bucket->data(node->_count);
883 for (; b_index < node->_count; ++b_index) {
884
885 size_t b = 0;
886@@ -387,7 +388,7 @@
887 }
888
889 template<PTRIETPL>
890- bool set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::best_match(const binarywrapper_t& encoding, fwdnode_t** tree_pos, base_t** node,
891+ bool set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::best_match(const binarywrapper_t& encoding, fwdnode_t** tree_pos, base_t** node,
892 uint& byte, uint& b_index) {
893 // run through tree as long as there are branches covering some of
894 // the encoding
895@@ -402,11 +403,9 @@
896
897
898 template<PTRIETPL>
899- void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::split_fwd(node_t* node, fwdnode_t* jumppar, node_t* locked, size_t bsize, size_t byte) {
900-
901+ void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::split_fwd(node_t* node, fwdnode_t* jumppar, node_t* locked, size_t bsize, size_t byte) {
902 const uint16_t bucketsize = node->_count;
903 if(bucketsize < (sizeof(fwdnode_t) / sizeof(size_t))) return;
904- const bool hasent = _entries != NULL;
905 node_t lown;
906 fwdnode_t* fwd_n = new_fwd();
907
908@@ -422,11 +421,11 @@
909 node->_type = 1;
910
911
912- for (size_t i = 0; i < 256; ++i) (*fwd_n)[i] = (locked == NULL ? node : locked);
913-
914- (*jumppar)[fwd_n->_path] = fwd_n;
915-
916- lown._data = NULL;
917+ for (size_t i = 0; i < 256; ++i) fwd_n->_children[i] = (locked == nullptr ? node : locked);
918+
919+ jumppar->_children[fwd_n->_path] = fwd_n;
920+
921+ lown._data = nullptr;
922
923 int lcnt = 0;
924 int hcnt = 0;
925@@ -479,15 +478,13 @@
926 // allocate new buckets
927 node->_totsize = hsize > 0 ? hsize : 0;
928 node->_count = hcnt;
929- if (hcnt == 0) node->_data = NULL;
930- else node->_data = (bucket_t*) malloc(node->_totsize +
931- bucket_t::overhead(node->_count, hasent));
932+ if (hcnt == 0) node->_data = nullptr;
933+ else node->_data = (bucket_t*) malloc(node->_totsize + bucket_t::overhead(node->_count));
934
935 lown._totsize = lsize > 0 ? lsize : 0;
936 lown._count = lcnt;
937- if (lcnt == 0) lown._data = NULL;
938- else lown._data = (bucket_t*) malloc(lown._totsize +
939- bucket_t::overhead(lown._count, hasent));
940+ if (lcnt == 0) lown._data = nullptr;
941+ else lown._data = (bucket_t*) malloc(lown._totsize + bucket_t::overhead(lown._count));
942
943 // copy values
944 int lbcnt = 0;
945@@ -499,7 +496,7 @@
946 lown._data->first(lown._count, i) = (bucket->first(bucketsize, i) << 8);
947 if (lengths[i] > 0) {
948 // lown._data->length(i) = LLENGTH(i);
949- uchar* dest = &(lown._data->data(lown._count, hasent)[lbcnt]);
950+ uchar* dest = &(lown._data->data(lown._count)[lbcnt]);
951 if (LLENGTH(i) >= HEAPBOUND) {
952 uchar* data = (uchar*) malloc(LLENGTH(i));
953 memcpy(dest, &data, sizeof (uchar*));
954@@ -509,9 +506,9 @@
955
956 uchar* src;
957 if (lengths[i] >= HEAPBOUND) {
958- src = *((uchar**)&(bucket->data(bucketsize, hasent)[bcnt]));
959+ src = *((uchar**)&(bucket->data(bucketsize)[bcnt]));
960 } else {
961- src = &(bucket->data(bucketsize, hasent)[bcnt]);
962+ src = &(bucket->data(bucketsize)[bcnt]);
963 }
964
965 uchar* f = (uchar*)&(lown._data->first(lown._count, i));
966@@ -524,7 +521,7 @@
967 if (lengths[i] >= HEAPBOUND) {
968 #ifndef NDEBUG
969 if (LLENGTH(i) >= HEAPBOUND) {
970- uchar* tmp = *((uchar**)&(lown._data->data(lown._count, hasent)[lbcnt]));
971+ uchar* tmp = *((uchar**)&(lown._data->data(lown._count)[lbcnt]));
972 assert(tmp == dest);
973 assert(memcmp(tmp, &(src[1]), LLENGTH(i)) == 0);
974 }
975@@ -545,7 +542,7 @@
976 node->_data->first(node->_count, j) = (bucket->first(bucketsize, i) << 8);
977 // node->_data->length(j) = lengths[i] - 1;
978 if (lengths[i] > 0) {
979- uchar* dest = &(node->_data->data(node->_count, hasent)[hbcnt]);
980+ uchar* dest = &(node->_data->data(node->_count)[hbcnt]);
981 if (LLENGTH(i) >= HEAPBOUND) {
982 uchar* data = (uchar*) malloc(LLENGTH(i));
983 memcpy(dest, &data, sizeof (uchar*));
984@@ -555,9 +552,9 @@
985
986 uchar* src;
987 if (lengths[i] < HEAPBOUND) {
988- src = &(bucket->data(bucketsize, hasent)[bcnt]);
989+ src = &(bucket->data(bucketsize)[bcnt]);
990 } else {
991- src = *((uchar**)&(bucket->data(bucketsize, hasent)[bcnt]));
992+ src = *((uchar**)&(bucket->data(bucketsize)[bcnt]));
993 }
994
995 uchar* f = (uchar*) & node->_data->first(node->_count, j);
996@@ -571,7 +568,7 @@
997 if (lengths[i] >= HEAPBOUND) {
998 #ifndef NDEBUG
999 if (LLENGTH(i) >= HEAPBOUND) {
1000- uchar* tmp = *((uchar**)&(node->_data->data(node->_count, hasent)[hbcnt]));
1001+ uchar* tmp = *((uchar**)&(node->_data->data(node->_count)[hbcnt]));
1002 assert(tmp == dest);
1003 assert(memcmp(tmp, &(src[1]), LLENGTH(i)) == 0);
1004 }
1005@@ -591,8 +588,8 @@
1006
1007 }
1008
1009- if (hasent) {
1010- I* ents = bucket->entries(bucketsize, true);
1011+ if (E) {
1012+ I* ents = bucket->entries(bucketsize);
1013 for (size_t i = 0; i < bucketsize; ++i) {
1014 entry_t& ent = _entries->operator[](ents[i]);
1015 ent.node = (size_t)fwd_n;
1016@@ -601,18 +598,18 @@
1017 }
1018
1019 if (lcnt == 0) {
1020- if (hasent)
1021- memcpy(node->_data->entries(bucketsize, true), bucket->entries(bucketsize, true), bucketsize * sizeof (I));
1022+ if (E)
1023+ memcpy(node->_data->entries(bucketsize), bucket->entries(bucketsize), bucketsize * sizeof (I));
1024 free(bucket);
1025 // std::cout << "SPLIT ALL HIGH" << std::endl;
1026- lown._data = NULL;
1027- for (size_t i = 0; i < 128; ++i) (*fwd_n)[i] = fwd_n;
1028+ lown._data = nullptr;
1029+ for (size_t i = 0; i < 128; ++i) fwd_n->_children[i] = fwd_n;
1030 split_node(node, fwd_n, locked, bsize > 0 ? bsize - 1 : 0, byte + 1);
1031 }
1032 else if (hcnt == 0) {
1033- if (hasent)
1034- memcpy(lown._data->entries(bucketsize, true), bucket->entries(bucketsize, true), bucketsize * sizeof (I));
1035- for (size_t i = 128; i < 256; ++i) (*fwd_n)[i] = fwd_n;
1036+ if (E)
1037+ memcpy(lown._data->entries(bucketsize), bucket->entries(bucketsize), bucketsize * sizeof (I));
1038+ for (size_t i = 128; i < 256; ++i) fwd_n->_children[i] = fwd_n;
1039 free(bucket);
1040 // std::cout << "SPLIT ALL LOW" << std::endl;
1041 node->_data = lown._data;
1042@@ -628,20 +625,20 @@
1043 low_n->_count = lown._count;
1044 low_n->_path = lown._path;
1045 low_n->_type = lown._type;
1046- for (size_t i = 0; i < 128; ++i) (*fwd_n)[i] = low_n;
1047- if (hasent) {
1048+ for (size_t i = 0; i < 128; ++i) fwd_n->_children[i] = low_n;
1049+ if (E) {
1050 // We are stopping splitting here, so correct entries if needed
1051- I* ents = bucket->entries(bucketsize, true);
1052+ I* ents = bucket->entries(bucketsize);
1053
1054 for (size_t i = 0; i < bucketsize; ++i) {
1055- if (i < lown._count) lown._data->entries(lown._count, true)[i] = ents[i];
1056- else node->_data->entries(node->_count, true)[i - lown._count] = ents[i];
1057+ if (i < lown._count) lown._data->entries(lown._count)[i] = ents[i];
1058+ else node->_data->entries(node->_count)[i - lown._count] = ents[i];
1059 }
1060 }
1061 free(bucket);
1062 if(lown._count >= SPLITBOUND && lown._count >= node->_count)
1063 {
1064- split_node(low_n, fwd_n, NULL, bsize > 0 ? bsize - 1 : 0, byte + 1);
1065+ split_node(low_n, fwd_n, nullptr, bsize > 0 ? bsize - 1 : 0, byte + 1);
1066 }
1067 if(node->_count >= SPLITBOUND)
1068 {
1069@@ -654,8 +651,7 @@
1070 }
1071
1072 template<PTRIETPL>
1073- void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::split_node(node_t* node, fwdnode_t* jumppar, node_t* locked, size_t bsize, size_t byte) {
1074-
1075+ void set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::split_node(node_t* node, fwdnode_t* jumppar, node_t* locked, size_t bsize, size_t byte) {
1076 assert(bsize != std::numeric_limits<size_t>::max());
1077 if (node->_type == 8) // new fwd node!
1078 {
1079@@ -743,8 +739,6 @@
1080
1081 node->_type += 1;
1082
1083- const bool hasent = _entries != NULL;
1084-
1085 if (node->_count == 0) // only high node has data
1086 {
1087 // std::cout << "ALL HIGH" << std::endl;
1088@@ -788,27 +782,27 @@
1089 jumppar->_children[i] = h_node;
1090 }
1091
1092- // std::cout << "Moving LOW " << node->_count << " TO " << node << std::endl;
1093- // std::cout << "Moving HIGH " << h_node->_count << " TO " << h_node << std::endl;
1094- h_node->_data = (bucket_t*) malloc(h_node->_totsize +
1095- bucket_t::overhead(h_node->_count, hasent));
1096- node->_data = (bucket_t*) malloc(node->_totsize +
1097- bucket_t::overhead(node->_count, hasent));
1098+ h_node->_data = (bucket_t*) malloc(h_node->_totsize + bucket_t::overhead(h_node->_count));
1099+ node->_data = (bucket_t*) malloc(node->_totsize + bucket_t::overhead(node->_count));
1100
1101 // copy firsts
1102- memcpy(&node->_data->first(node->_count), &(old->first(bucketsize)), sizeof (uint16_t) * node->_count);
1103- memcpy(&h_node->_data->first(h_node->_count), &(old->first(bucketsize, node->_count)), sizeof (uint16_t) * h_node->_count);
1104+ memcpy(&node->_data->first(node->_count),
1105+ &(old->first(bucketsize)), sizeof (uint16_t) * node->_count);
1106+ memcpy(&h_node->_data->first(h_node->_count),
1107+ &(old->first(bucketsize, node->_count)), sizeof (uint16_t) * h_node->_count);
1108
1109 // copy data
1110- memcpy(node->_data->data(node->_count, hasent), old->data(bucketsize, hasent), node->_totsize);
1111- memcpy(h_node->_data->data(h_node->_count, hasent), &(old->data(bucketsize, hasent)[node->_totsize]), h_node->_totsize);
1112+ memcpy(node->_data->data(node->_count),
1113+ old->data(bucketsize), node->_totsize);
1114+ memcpy(h_node->_data->data(h_node->_count),
1115+ &(old->data(bucketsize)[node->_totsize]), h_node->_totsize);
1116
1117- if (hasent) {
1118- I* ents = old->entries(bucketsize, true);
1119+ if (E) {
1120+ I* ents = old->entries(bucketsize);
1121
1122 for (size_t i = 0; i < bucketsize; ++i) {
1123- if (i < node->_count) node->_data->entries(node->_count, true)[i] = ents[i];
1124- else h_node->_data->entries(h_node->_count, true)[i - node->_count] = ents[i];
1125+ if (i < node->_count) node->_data->entries(node->_count)[i] = ents[i];
1126+ else h_node->_data->entries(h_node->_count)[i - node->_count] = ents[i];
1127 }
1128 }
1129
1130@@ -819,14 +813,14 @@
1131 }
1132 if(h_node->_count >= SPLITBOUND)
1133 {
1134- split_node(h_node, jumppar, NULL, bsize, byte);
1135+ split_node(h_node, jumppar, nullptr, bsize, byte);
1136 }
1137 }
1138 }
1139
1140 template<PTRIETPL>
1141 std::pair<bool, size_t>
1142- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::exists(const uchar* data, size_t length) {
1143+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::exists(const uchar* data, size_t length) {
1144 assert(length <= 65536);
1145 binarywrapper_t encoding((uchar*) data, length * 8);
1146 // memcpy(encoding.raw()+2, data, length);
1147@@ -835,8 +829,8 @@
1148
1149 uint b_index = 0;
1150
1151- fwdnode_t* fwd = _root;
1152- base_t* base = NULL;
1153+ fwdnode_t* fwd = _root.get();
1154+ base_t* base = nullptr;
1155 uint byte = 0;
1156
1157 b_index = 0;
1158@@ -844,8 +838,8 @@
1159 returntype_t ret = returntype_t(res, std::numeric_limits<size_t>::max());
1160 if((size_t)fwd != (size_t)base) {
1161 node_t* node = (node_t*)base;
1162- if (_entries != NULL && res) {
1163- ret.second = node->_data->entries(node->_count, true)[b_index];
1164+ if (E && res) {
1165+ ret.second = node->_data->entries(node->_count)[b_index];
1166 }
1167 }
1168 return ret;
1169@@ -853,10 +847,9 @@
1170
1171 template<PTRIETPL>
1172 returntype_t
1173- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::insert(const uchar* data, size_t length) {
1174+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::insert(const uchar* data, size_t length) {
1175 assert(length <= 65536);
1176 binarywrapper_t e2((uchar*) data, length * 8);
1177- const bool hasent = _entries != NULL;
1178 // binarywrapper_t encoding(length*8+16);
1179 // memcpy(encoding.raw()+2, data, length);
1180 // length += 2;
1181@@ -864,17 +857,16 @@
1182
1183 uint b_index = 0;
1184
1185- fwdnode_t* fwd = _root;
1186- node_t* node = NULL;
1187- base_t* base = NULL;
1188+ fwdnode_t* fwd = _root.get();
1189+ node_t* node = nullptr;
1190+ base_t* base = nullptr;
1191 uint byte = 0;
1192-
1193 bool res = best_match(e2, &fwd, &base, byte, b_index);
1194 if (res) { // We are not inserting duplicates, semantics of PTrie is a set.
1195 returntype_t ret(false, 0);
1196- if (hasent) {
1197+ if (E) {
1198 node = (node_t*)base;
1199- ret = returntype_t(false, node->_data->entries(node->_count, true)[b_index]);
1200+ ret = returntype_t(false, node->_data->entries(node->_count)[b_index]);
1201 }
1202 return ret;
1203 }
1204@@ -883,7 +875,7 @@
1205 {
1206 node = new_node();
1207 node->_count = 0;
1208- node->_data = NULL;
1209+ node->_data = nullptr;
1210 node->_type = 0;
1211 node->_path = 0;
1212 assert(node);
1213@@ -913,25 +905,22 @@
1214 }
1215 } while(bit > 0 && !stop);
1216
1217- for (size_t i = min; i <= max; ++i) (*fwd)[i] = node;
1218+ for (size_t i = min; i <= max; ++i) fwd->_children[i] = node;
1219 node->_path = min;
1220 node->_type = bit;
1221- } else
1222+ }
1223+ else
1224 {
1225 node = (node_t*)base;
1226 }
1227
1228
1229 // shallow copy
1230- binarywrapper_t nenc(e2.const_raw(),
1231+ binarywrapper_t nenc(e2.raw(),
1232 (e2.size() - byte)*8,
1233 byte * 8,
1234 e2.size() * 8);
1235
1236-// std::cout << "COULD NOT FIND of size " << e2.size() << std::endl;
1237-// e2.print(std::cout);
1238-// std::cout << "Inserted into " << node << std::endl;
1239-
1240 // make a new bucket, add new entry, copy over old data
1241
1242 uint nbucketcount = node->_count + 1;
1243@@ -945,7 +934,7 @@
1244 uint nbucketsize = node->_totsize + nitemsize;
1245
1246 bucket_t* nbucket = (bucket_t*) malloc(nbucketsize +
1247- bucket_t::overhead(nbucketcount, hasent));
1248+ bucket_t::overhead(nbucketcount));
1249
1250 // copy over old "firsts"
1251 memcpy(&nbucket->first(nbucketcount), &(node->_data->first(node->_count)), b_index * sizeof (uint16_t));
1252@@ -965,13 +954,15 @@
1253 }
1254
1255 size_t entry = 0;
1256- if (hasent) {
1257+ if (E) {
1258 // copy over entries
1259- memcpy(nbucket->entries(nbucketcount, true), node->_data->entries(node->_count, true), b_index * sizeof (I));
1260- memcpy(&(nbucket->entries(nbucketcount, true)[b_index + 1]), &(node->_data->entries(node->_count, true)[b_index]),
1261+ memcpy(nbucket->entries(nbucketcount),
1262+ node->_data->entries(node->_count), b_index * sizeof (I));
1263+ memcpy(&(nbucket->entries(nbucketcount)[b_index + 1]),
1264+ &(node->_data->entries(node->_count)[b_index]),
1265 (node->_count - b_index) * sizeof (I));
1266
1267- entry = nbucket->entries(nbucketcount, true)[b_index] = _entries->next(0);
1268+ entry = nbucket->entries(nbucketcount)[b_index] = _entries->next(0);
1269 entry_t& ent = _entries->operator[](entry);
1270 ent.node = (size_t)fwd;
1271 ent.path = (nbucket->first(nbucketcount, b_index) >> 8);
1272@@ -998,16 +989,16 @@
1273 }
1274 }
1275 // copy over old data
1276- memcpy(nbucket->data(nbucketcount, hasent),
1277- node->_data->data(node->_count, hasent), tmpsize);
1278+ memcpy(nbucket->data(nbucketcount),
1279+ node->_data->data(node->_count), tmpsize);
1280
1281- memcpy(&(nbucket->data(nbucketcount, hasent)[tmpsize + nitemsize]),
1282- &(node->_data->data(node->_count, hasent)[tmpsize]), (node->_totsize - tmpsize));
1283+ memcpy(&(nbucket->data(nbucketcount)[tmpsize + nitemsize]),
1284+ &(node->_data->data(node->_count)[tmpsize]), (node->_totsize - tmpsize));
1285
1286 // copy over new data
1287 if (copyval) {
1288- memcpy(&(nbucket->data(nbucketcount, hasent)[tmpsize]),
1289- nenc.const_raw(), nenc.size());
1290+ memcpy(&(nbucket->data(nbucketcount)[tmpsize]),
1291+ nenc.raw(), nenc.size());
1292 } else {
1293 // alloc space
1294 uchar* data = (uchar*) malloc(nenc.size());
1295@@ -1016,7 +1007,7 @@
1296 memcpy(data, nenc.const_raw(), nenc.size());
1297
1298 // copy pointer in
1299- memcpy(&(nbucket->data(nbucketcount, hasent)[tmpsize]),
1300+ memcpy(&(nbucket->data(nbucketcount)[tmpsize]),
1301 &data, sizeof (uchar*));
1302 }
1303
1304@@ -1038,9 +1029,9 @@
1305
1306 #ifndef NDEBUG
1307 for (int i = byte - 1; i >= 2; --i) {
1308- assert(fwd != NULL);
1309+ assert(fwd != nullptr);
1310 assert(e2[-2 + i] == fwd->_path);
1311- assert(fwd->_parent == NULL || fwd->_parent->_children[fwd->_path] == fwd);
1312+ assert(fwd->_parent == nullptr || fwd->_parent->_children[fwd->_path] == fwd);
1313 fwd = fwd->_parent;
1314
1315 }
1316@@ -1056,27 +1047,25 @@
1317
1318 template<PTRIETPL>
1319 returntype_t
1320- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::insert(binarywrapper_t wrapper) {
1321+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::insert(binarywrapper_t wrapper) {
1322 return insert(wrapper.raw(), wrapper.size());
1323 }
1324
1325 template<PTRIETPL>
1326 returntype_t
1327- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::exists(binarywrapper_t wrapper) {
1328+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::exists(binarywrapper_t wrapper) {
1329 return exists(wrapper.raw(), wrapper.size());
1330 }
1331
1332
1333 template<PTRIETPL>
1334 void
1335- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::inject_byte(node_t* node, uchar topush, size_t totsize, std::function<uint16_t(size_t)> _sizes)
1336+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::inject_byte(node_t* node, uchar topush, size_t totsize, std::function<uint16_t(size_t)> _sizes)
1337 {
1338- const bool hasent = _entries != NULL;
1339 bucket_t *nbucket = node->_data;
1340 if(totsize > 0) {
1341 nbucket = (bucket_t *) malloc(totsize +
1342- bucket_t::overhead(node->_count,
1343- hasent));
1344+ bucket_t::overhead(node->_count));
1345 }
1346
1347 size_t dcnt = 0;
1348@@ -1094,27 +1083,27 @@
1349 {
1350 if(size < HEAPBOUND)
1351 {
1352- nbucket->data(node->_count, hasent)[dcnt] = push;
1353+ nbucket->data(node->_count)[dcnt] = push;
1354 dcnt += 1;
1355 }
1356 if(size < HEAPBOUND && size > 1)
1357 {
1358- memcpy(&(nbucket->data(node->_count, hasent)[dcnt]),
1359- &(node->_data->data(node->_count, hasent)[ocnt]),
1360+ memcpy(&(nbucket->data(node->_count)[dcnt]),
1361+ &(node->_data->data(node->_count)[ocnt]),
1362 size - 1);
1363 ocnt += size - 1;
1364 dcnt += size - 1;
1365 }
1366 else if(size >= HEAPBOUND)
1367 {
1368- uchar* src = NULL;
1369+ uchar* src = nullptr;
1370 uchar* dest = (uchar*)malloc(size);
1371- memcpy(&(nbucket->data(node->_count, hasent)[dcnt]), &dest, sizeof(size_t));
1372+ memcpy(&(nbucket->data(node->_count)[dcnt]), &dest, sizeof(size_t));
1373 ++dest;
1374 dcnt += sizeof(size_t);
1375 if(size == HEAPBOUND)
1376 {
1377- src = &(node->_data->data(node->_count, hasent)[ocnt]);
1378+ src = &(node->_data->data(node->_count)[ocnt]);
1379 memcpy(dest, src, size - 1);
1380 ocnt += size - 1;
1381 }
1382@@ -1122,7 +1111,7 @@
1383 {
1384 assert(size > HEAPBOUND);
1385 // allready on heap, but we need to expand it
1386- src = *(uchar**)&(node->_data->data(node->_count, hasent)[ocnt]);
1387+ src = *(uchar**)&(node->_data->data(node->_count)[ocnt]);
1388 memcpy(dest, src, size - 1);
1389 ocnt += sizeof(size_t);
1390 }
1391@@ -1144,9 +1133,8 @@
1392
1393 template<PTRIETPL>
1394 bool
1395- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::merge_down(fwdnode_t* parent, node_t* node, int on_heap)
1396+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::merge_down(fwdnode_t* parent, node_t* node, int on_heap)
1397 {
1398- const bool hasent = _entries != NULL;
1399 if(node->_type == 0)
1400 {
1401 if(node->_count < SPLITBOUND/3) return true;
1402@@ -1155,7 +1143,7 @@
1403 for(size_t i = 0; i < 256; ++i) parent->_children[i] = parent;
1404 delete node;
1405 do {
1406- if (parent != this->_root) {
1407+ if (parent != this->_root.get()) {
1408 // we can remove fwd and go back one level
1409 parent->_parent->_children[parent->_path] = parent->_parent;
1410 ++on_heap;
1411@@ -1169,7 +1157,7 @@
1412 {
1413 if(other != parent)
1414 {
1415- other = NULL;
1416+ other = nullptr;
1417 break;
1418 }
1419 else
1420@@ -1179,7 +1167,7 @@
1421 }
1422 }
1423
1424- if(other == NULL)
1425+ if(other == nullptr)
1426 {
1427 return true;
1428 }
1429@@ -1199,10 +1187,10 @@
1430 }
1431 } while(true);
1432 }
1433- else if(parent != this->_root)
1434+ else if(parent != this->_root.get())
1435 {
1436 // we need to re-add path to items here.
1437- if(parent->_parent == this->_root) {
1438+ if(parent->_parent == this->_root.get()) {
1439 // something
1440 uint16_t sizes[256];
1441 size_t totsize = 0;
1442@@ -1240,7 +1228,7 @@
1443 uchar* l = (uchar*)&length;
1444 fwdnode_t* tmp = parent;
1445
1446- while(tmp != this->_root)
1447+ while(tmp != this->_root.get())
1448 {
1449 l[0] = l[1];
1450 l[1] = tmp->_path;
1451@@ -1295,7 +1283,7 @@
1452 return merge_down(next, node, on_heap + 1);
1453 }
1454 }
1455- if(parent != this->_root)
1456+ if(parent != this->_root.get())
1457 {
1458 assert(node->_count > 0);
1459 return merge_down(parent->_parent, node, on_heap);
1460@@ -1338,7 +1326,7 @@
1461 return false;
1462
1463 bucket_t *nbucket = (bucket_t *) malloc(nbucketsize +
1464- bucket_t::overhead(nbucketcount, hasent));
1465+ bucket_t::overhead(nbucketcount));
1466 node_t *first = node;
1467 node_t *second = other;
1468 if (path & binarywrapper_t::_masks[node->_type - 1]) {
1469@@ -1353,24 +1341,24 @@
1470 &(second->_data->first(second->_count)),
1471 second->_count * sizeof(uint16_t));
1472
1473- if (hasent) {
1474+ if (E) {
1475 // copy over entries
1476- memcpy(nbucket->entries(nbucketcount, true),
1477- first->_data->entries(first->_count, true),
1478+ memcpy(nbucket->entries(nbucketcount),
1479+ first->_data->entries(first->_count),
1480 first->_count * sizeof(I));
1481- memcpy(&(nbucket->entries(nbucketcount, true)[first->_count]),
1482- second->_data->entries(second->_count, true),
1483+ memcpy(&(nbucket->entries(nbucketcount)[first->_count]),
1484+ second->_data->entries(second->_count),
1485 second->_count * sizeof(I));
1486
1487 }
1488
1489 // copy over old data
1490 if (nbucketsize > 0) {
1491- memcpy(nbucket->data(nbucketcount, hasent),
1492- first->_data->data(first->_count, hasent), first->_totsize);
1493+ memcpy(nbucket->data(nbucketcount),
1494+ first->_data->data(first->_count), first->_totsize);
1495
1496- memcpy(&(nbucket->data(nbucketcount, hasent)[first->_totsize]),
1497- second->_data->data(second->_count, hasent), second->_totsize);
1498+ memcpy(&(nbucket->data(nbucketcount)[first->_totsize]),
1499+ second->_data->data(second->_count), second->_totsize);
1500
1501 }
1502 free(node->_data);
1503@@ -1412,14 +1400,12 @@
1504
1505 template<PTRIETPL>
1506 void
1507- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::erase(fwdnode_t* parent, node_t* node, size_t bindex, int on_heap)
1508+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::erase(fwdnode_t* parent, node_t* node, size_t bindex, int on_heap)
1509 {
1510- const bool hasent = _entries != NULL;
1511-
1512 // first find size and amount before
1513 uint16_t size = 0;
1514 uint16_t before = 0;
1515- if (parent == this->_root)
1516+ if (parent == this->_root.get())
1517 {
1518 for(size_t i = 0; i < bindex; ++i)
1519 {
1520@@ -1427,7 +1413,7 @@
1521 }
1522 size = node->_data->first(node->_count, bindex);
1523 }
1524- else if(parent->_parent == this->_root) {
1525+ else if(parent->_parent == this->_root.get()) {
1526 for(size_t i = 0; i <= bindex; ++i)
1527 {
1528 uint16_t t = 0;
1529@@ -1449,7 +1435,7 @@
1530 if(size >= HEAPBOUND)
1531 {
1532 before = sizeof(size_t)*bindex;
1533- uchar* src = *((uchar**)&(node->_data->data(node->_count, hasent)[before]));
1534+ uchar* src = *((uchar**)&(node->_data->data(node->_count)[before]));
1535 free(src);
1536 size = sizeof(size_t);
1537 }
1538@@ -1459,7 +1445,7 @@
1539 uint nbucketsize = node->_totsize - size;
1540
1541 bucket_t *nbucket = (bucket_t *) malloc(nbucketsize +
1542- bucket_t::overhead(nbucketcount, hasent));
1543+ bucket_t::overhead(nbucketcount));
1544
1545 // copy over old "firsts", [0,bindex) to [0,bindex) then (bindex,node->_count) to [bindex, nbucketcount)
1546 memcpy(&nbucket->first(nbucketcount),
1547@@ -1470,13 +1456,13 @@
1548 &(node->_data->first(node->_count, bindex + 1)),
1549 (nbucketcount - bindex) * sizeof(uint16_t));
1550
1551- if (hasent) {
1552+ if (E) {
1553 // copy over entries
1554- memcpy(nbucket->entries(nbucketcount, true),
1555- node->_data->entries(node->_count, true),
1556+ memcpy(nbucket->entries(nbucketcount),
1557+ node->_data->entries(node->_count),
1558 bindex * sizeof(I));
1559- memcpy(&(nbucket->entries(nbucketcount, true)[bindex]),
1560- &(node->_data->entries(node->_count, true)[bindex + 1]),
1561+ memcpy(&(nbucket->entries(nbucketcount)[bindex]),
1562+ &(node->_data->entries(node->_count)[bindex + 1]),
1563 (nbucketcount - bindex) * sizeof(I));
1564
1565 // copy back entries here in _entries!
1566@@ -1485,11 +1471,11 @@
1567
1568 // copy over old data
1569 if (nbucketsize > 0) {
1570- memcpy(nbucket->data(nbucketcount, hasent),
1571- node->_data->data(node->_count, hasent), before);
1572+ memcpy(nbucket->data(nbucketcount),
1573+ node->_data->data(node->_count), before);
1574 assert(nbucketsize >= before);
1575- memcpy(&(nbucket->data(nbucketcount, hasent)[before]),
1576- &(node->_data->data(node->_count, hasent)[before + size]),
1577+ memcpy(&(nbucket->data(nbucketcount)[before]),
1578+ &(node->_data->data(node->_count)[before + size]),
1579 (nbucketsize - before));
1580
1581 }
1582@@ -1502,7 +1488,7 @@
1583 else
1584 {
1585 free(node->_data);
1586- node->_data = NULL;
1587+ node->_data = nullptr;
1588 node->_count = 0;
1589 node->_totsize = 0;
1590 }
1591@@ -1512,13 +1498,13 @@
1592
1593 template<PTRIETPL>
1594 bool
1595- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::erase(binarywrapper_t encoding)
1596+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::erase(binarywrapper_t encoding)
1597 {
1598 assert(encoding.size() <= 65536);
1599 uint b_index = 0;
1600
1601- fwdnode_t* fwd = this->_root;
1602- base_t* base = NULL;
1603+ fwdnode_t* fwd = this->_root.get();
1604+ base_t* base = nullptr;
1605 uint byte = 0;
1606
1607 b_index = 0;
1608@@ -1540,7 +1526,7 @@
1609
1610 template<PTRIETPL>
1611 bool
1612- set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::erase(const uchar *data, size_t length)
1613+ set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, E, I>::erase(const uchar *data, size_t length)
1614 {
1615 binarywrapper_t b((uchar*)data, length*8);
1616 return erase(b);
1617
1618=== modified file 'PetriEngine/Structures/ptrie_stable.h'
1619--- PetriEngine/Structures/ptrie_stable.h 2017-08-03 14:12:33 +0000
1620+++ PetriEngine/Structures/ptrie_stable.h 2019-09-06 10:48:45 +0000
1621@@ -37,16 +37,18 @@
1622 typename T = void,
1623 typename I = size_t
1624 >
1625- class set_stable : public set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I> {
1626+ class set_stable : public set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, true, I> {
1627 #ifdef __APPLE__
1628-#define pt set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>
1629+#define pt set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, true, I>
1630 #else
1631- using pt = set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>;
1632+ using pt = set<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, true, I>;
1633 #endif
1634+ protected:
1635+ virtual bool uses_entries() const { return true; }
1636 public:
1637 set_stable() : pt()
1638 {
1639- this->_entries = new linked_bucket_t<typename pt::entry_t, ALLOCSIZE>(1);
1640+ this->_entries = std::make_unique<linked_bucket_t<typename pt::entry_t, ALLOCSIZE>>(1);
1641 }
1642
1643 size_t size() const {
1644@@ -56,11 +58,11 @@
1645 size_t unpack(I index, uchar* destination);
1646 };
1647
1648- template<PTRIETPL>
1649+ template<uint16_t HEAPBOUND,uint16_t SPLITBOUND,size_t ALLOCSIZE,typename T,typename I>
1650 size_t
1651 set_stable<HEAPBOUND, SPLITBOUND, ALLOCSIZE, T, I>::unpack(I index, uchar* destination) {
1652- typename pt::node_t* node = NULL;
1653- typename pt::fwdnode_t* par = NULL;
1654+ typename pt::node_t* node = nullptr;
1655+ typename pt::fwdnode_t* par = nullptr;
1656 // we can find size without bothering anyone (to much)
1657 std::stack<uchar> path;
1658 size_t bindex = 0;
1659@@ -72,7 +74,7 @@
1660 par = (typename pt::fwdnode_t*)ent.node;
1661 node = (typename pt::node_t*)par->_children[ent.path];
1662 typename pt::bucket_t* bckt = node->_data;
1663- I* ents = bckt->entries(node->_count, true);
1664+ I* ents = bckt->entries(node->_count);
1665 for (size_t i = 0; i < node->_count; ++i) {
1666 if (ents[i] == index) {
1667 bindex = i;
1668@@ -87,7 +89,7 @@
1669
1670
1671
1672- while (par != this->_root) {
1673+ while (par != this->_root.get()) {
1674 path.push(par->_path);
1675 par = par->_parent;
1676 }
1677@@ -131,9 +133,9 @@
1678 if (size > ps) {
1679 uchar* src;
1680 if ((size - ps) >= HEAPBOUND) {
1681- src = *((uchar**)&(node->_data->data(node->_count, true)[offset]));
1682+ src = *((uchar**)&(node->_data->data(node->_count)[offset]));
1683 } else {
1684- src = &(node->_data->data(node->_count, true)[offset]);
1685+ src = &(node->_data->data(node->_count)[offset]);
1686 }
1687
1688 memcpy(&(destination[ps]), src, (size - ps));
1689@@ -164,4 +166,4 @@
1690 }
1691
1692 #undef pt
1693-#endif /* PTRIE_MAP_H */
1694\ No newline at end of file
1695+#endif /* PTRIE_MAP_H */
1696
1697=== modified file 'lpsolve/liblpsolve55-osx64.a'
1698Binary files lpsolve/liblpsolve55-osx64.a 2013-10-19 18:20:00 +0000 and lpsolve/liblpsolve55-osx64.a 2019-09-06 10:48:45 +0000 differ
1699=== modified file 'makefile.osx64'
1700--- makefile.osx64 2019-01-09 20:51:51 +0000
1701+++ makefile.osx64 2019-09-06 10:48:45 +0000
1702@@ -8,8 +8,8 @@
1703 CUT = cut
1704
1705 # Compiler and linker flags
1706-CFLAGS = -mmacosx-version-min=10.7 -std=c++17 -m64 -I. -stdlib=libc++
1707-LDFLAGS = -dynamic -mmacosx-version-min=10.7 -std=c++17 -m64 lpsolve/liblpsolve55-osx64.a -stdlib=libc++ -lc++
1708+CFLAGS = -mmacosx-version-min=10.14 -std=c++17 -m64 -I. -stdlib=libc++
1709+LDFLAGS = -dynamic -mmacosx-version-min=10.14 -std=c++17 -m64 lpsolve/liblpsolve55-osx64.a -stdlib=libc++ -lc++
1710
1711 # Input files
1712 FLEX_SOURCES = $(shell find * -name "*.l")

Subscribers

People subscribed via source and target branches

to all changes: