1 /**
2 Utility functions for array processing
3
4 Copyright: © 2012 Sönke Ludwig
5 License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file.
6 Authors: Sönke Ludwig
7 */
8 module vibe.internal.array;
9
10 import vibe.internal.allocator;
11
12 import std.algorithm;
13 import std.range : isInputRange, isOutputRange;
14 import std.traits;
15 static import std.utf;
16
17
18 void removeFromArray(T)(ref T[] array, T item)
19 {
20 foreach( i; 0 .. array.length )
21 if( array[i] is item ){
22 removeFromArrayIdx(array, i);
23 return;
24 }
25 }
26
27 void removeFromArrayIdx(T)(ref T[] array, size_t idx)
28 {
29 foreach( j; idx+1 .. array.length)
30 array[j-1] = array[j];
31 array.length = array.length-1;
32 }
33
34 enum AppenderResetMode {
35 keepData,
36 freeData,
37 reuseData
38 }
39
40 struct AllocAppender(ArrayType : E[], E) {
41 alias ElemType = Unqual!E;
42
43 static assert(!hasIndirections!E && !hasElaborateDestructor!E);
44
45 private {
46 ElemType[] m_data;
47 ElemType[] m_remaining;
48 IAllocator m_alloc;
49 bool m_allocatedBuffer = false;
50 }
51
52 this(IAllocator alloc, ElemType[] initial_buffer = null)
53 @safe {
54 m_alloc = alloc;
55 m_data = initial_buffer;
56 m_remaining = initial_buffer;
57 }
58
59 @disable this(this);
60
61 @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_data.length - m_remaining.length]; }
62
63 void reset(AppenderResetMode reset_mode = AppenderResetMode.keepData)
64 {
65 if (reset_mode == AppenderResetMode.keepData) m_data = null;
66 else if (reset_mode == AppenderResetMode.freeData) { if (m_allocatedBuffer) m_alloc.deallocate(m_data); m_data = null; }
67 m_remaining = m_data;
68 }
69
70 /** Grows the capacity of the internal buffer so that it can hold a minumum amount of elements.
71
72 Params:
73 amount = The minimum amount of elements that shall be appendable without
74 triggering a re-allocation.
75
76 */
77 void reserve(size_t amount)
78 @safe {
79 size_t nelems = m_data.length - m_remaining.length;
80 if (!m_data.length) {
81 m_data = () @trusted { return cast(ElemType[])m_alloc.allocate(amount*E.sizeof); } ();
82 m_remaining = m_data;
83 m_allocatedBuffer = true;
84 }
85 if (m_remaining.length < amount) {
86 debug {
87 import std.digest.crc;
88 auto checksum = crc32Of(m_data[0 .. nelems]);
89 }
90 if (m_allocatedBuffer) () @trusted {
91 auto vdata = cast(void[])m_data;
92 m_alloc.reallocate(vdata, (nelems+amount)*E.sizeof);
93 m_data = cast(ElemType[])vdata;
94 } (); else {
95 auto newdata = () @trusted { return cast(ElemType[])m_alloc.allocate((nelems+amount)*E.sizeof); } ();
96 newdata[0 .. nelems] = m_data[0 .. nelems];
97 m_data = newdata;
98 m_allocatedBuffer = true;
99 }
100 debug assert(crc32Of(m_data[0 .. nelems]) == checksum);
101 }
102 m_remaining = m_data[nelems .. m_data.length];
103 }
104
105 void put(E el)
106 @safe {
107 if( m_remaining.length == 0 ) grow(1);
108 m_remaining[0] = el;
109 m_remaining = m_remaining[1 .. $];
110 }
111
112 void put(ArrayType arr)
113 @safe {
114 if (m_remaining.length < arr.length) grow(arr.length);
115 m_remaining[0 .. arr.length] = arr[];
116 m_remaining = m_remaining[arr.length .. $];
117 }
118
119 static if( !hasAliasing!E ){
120 void put(in ElemType[] arr) @trusted {
121 put(cast(ArrayType)arr);
122 }
123 }
124
125 static if( is(ElemType == char) ){
126 void put(dchar el)
127 @trusted {
128 if( el < 128 ) put(cast(char)el);
129 else {
130 char[4] buf;
131 auto len = std.utf.encode(buf, el);
132 put(cast(ArrayType)buf[0 .. len]);
133 }
134 }
135 }
136
137 static if( is(ElemType == wchar) ){
138 void put(dchar el)
139 @trusted {
140 if( el < 128 ) put(cast(wchar)el);
141 else {
142 wchar[3] buf;
143 auto len = std.utf.encode(buf, el);
144 put(cast(ArrayType)buf[0 .. len]);
145 }
146 }
147 }
148
149 static if (!is(E == immutable) || !hasAliasing!E) {
150 /** Appends a number of bytes in-place.
151
152 The delegate will get the memory slice of the memory that follows
153 the already written data. Use `reserve` to ensure that this slice
154 has enough room. The delegate should overwrite as much of the
155 slice as desired and then has to return the number of elements
156 that should be appended (counting from the start of the slice).
157 */
158 void append(scope size_t delegate(scope ElemType[] dst) del)
159 {
160 auto n = del(m_remaining);
161 assert(n <= m_remaining.length);
162 m_remaining = m_remaining[n .. $];
163 }
164 }
165
166 void grow(size_t min_free)
167 @safe {
168 if( !m_data.length && min_free < 16 ) min_free = 16;
169
170 auto min_size = m_data.length + min_free - m_remaining.length;
171 auto new_size = max(m_data.length, 16);
172 while( new_size < min_size )
173 new_size = (new_size * 3) / 2;
174 reserve(new_size - m_data.length + m_remaining.length);
175 }
176 }
177
178 unittest {
179 auto a = AllocAppender!string(theAllocator());
180 a.put("Hello");
181 a.put(' ');
182 a.put("World");
183 assert(a.data == "Hello World");
184 a.reset();
185 assert(a.data == "");
186 }
187
188 unittest {
189 char[4] buf;
190 auto a = AllocAppender!string(theAllocator(), buf);
191 a.put("He");
192 assert(a.data == "He");
193 assert(a.data.ptr == buf.ptr);
194 a.put("ll");
195 assert(a.data == "Hell");
196 assert(a.data.ptr == buf.ptr);
197 a.put('o');
198 assert(a.data == "Hello");
199 assert(a.data.ptr != buf.ptr);
200 }
201
202 unittest {
203 char[4] buf;
204 auto a = AllocAppender!string(theAllocator(), buf);
205 a.put("Hello");
206 assert(a.data == "Hello");
207 assert(a.data.ptr != buf.ptr);
208 }
209
210 unittest {
211 auto app = AllocAppender!(int[])(theAllocator);
212 app.reserve(2);
213 app.append((scope mem) {
214 assert(mem.length >= 2);
215 mem[0] = 1;
216 mem[1] = 2;
217 return 2;
218 });
219 assert(app.data == [1, 2]);
220 }
221
222 unittest {
223 auto app = AllocAppender!string(theAllocator);
224 app.reserve(3);
225 app.append((scope mem) {
226 assert(mem.length >= 3);
227 mem[0] = 'f';
228 mem[1] = 'o';
229 mem[2] = 'o';
230 return 3;
231 });
232 assert(app.data == "foo");
233 }
234
235
236 struct FixedAppender(ArrayType : E[], size_t NELEM, E) {
237 alias ElemType = Unqual!E;
238 private {
239 ElemType[NELEM] m_data;
240 size_t m_fill;
241 }
242
243 void clear()
244 {
245 m_fill = 0;
246 }
247
248 void put(E el)
249 {
250 m_data[m_fill++] = el;
251 }
252
253 static if( is(ElemType == char) ){
254 void put(dchar el)
255 {
256 if( el < 128 ) put(cast(char)el);
257 else {
258 char[4] buf;
259 auto len = std.utf.encode(buf, el);
260 put(cast(ArrayType)buf[0 .. len]);
261 }
262 }
263 }
264
265 static if( is(ElemType == wchar) ){
266 void put(dchar el)
267 {
268 if( el < 128 ) put(cast(wchar)el);
269 else {
270 wchar[3] buf;
271 auto len = std.utf.encode(buf, el);
272 put(cast(ArrayType)buf[0 .. len]);
273 }
274 }
275 }
276
277 void put(ArrayType arr)
278 {
279 m_data[m_fill .. m_fill+arr.length] = (cast(ElemType[])arr)[];
280 m_fill += arr.length;
281 }
282
283 @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_fill]; }
284
285 static if (!is(E == immutable)) {
286 void reset() { m_fill = 0; }
287 }
288 }
289
290
291 /**
292 TODO: clear ring buffer fields upon removal (to run struct destructors, if T is a struct)
293 */
294 struct FixedRingBuffer(T, size_t N = 0, bool INITIALIZE = true) {
295 private {
296 static if( N > 0 ) {
297 static if (INITIALIZE) T[N] m_buffer;
298 else T[N] m_buffer = void;
299 } else T[] m_buffer;
300 size_t m_start = 0;
301 size_t m_fill = 0;
302 }
303
304 static if( N == 0 ){
305 bool m_freeOnDestruct;
306 this(size_t capacity) { m_buffer = new T[capacity]; }
307 ~this() { if (m_freeOnDestruct && m_buffer.length > 0) deleteCompat(m_buffer); }
308 }
309
310 @property bool empty() const { return m_fill == 0; }
311
312 @property bool full() const { return m_fill == m_buffer.length; }
313
314 @property size_t length() const { return m_fill; }
315
316 @property size_t freeSpace() const { return m_buffer.length - m_fill; }
317
318 @property size_t capacity() const { return m_buffer.length; }
319
320 static if( N == 0 ){
321 deprecated @property void freeOnDestruct(bool b) { m_freeOnDestruct = b; }
322
323 /// Resets the capacity to zero and explicitly frees the memory for the buffer.
324 void dispose()
325 {
326 deleteCompat(m_buffer);
327 m_buffer = null;
328 m_start = m_fill = 0;
329 }
330
331 @property void capacity(size_t new_size)
332 {
333 if( m_buffer.length ){
334 auto newbuffer = new T[new_size];
335 auto dst = newbuffer;
336 auto newfill = min(m_fill, new_size);
337 read(dst[0 .. newfill]);
338 if (m_freeOnDestruct && m_buffer.length > 0) () @trusted {
339 deleteCompat(m_buffer);
340 } ();
341 m_buffer = newbuffer;
342 m_start = 0;
343 m_fill = newfill;
344 } else {
345 if (m_freeOnDestruct && m_buffer.length > 0) () @trusted {
346 deleteCompat(m_buffer);
347 } ();
348 m_buffer = new T[new_size];
349 }
350 }
351 }
352
353 @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_start]; }
354
355 @property ref inout(T) back() inout { assert(!empty); return m_buffer[mod(m_start+m_fill-1)]; }
356
357 void clear()
358 {
359 popFrontN(length);
360 assert(m_fill == 0);
361 m_start = 0;
362 }
363
364 void put()(T itm) { assert(m_fill < m_buffer.length); move(itm, m_buffer[mod(m_start + m_fill++)]); }
365 void put(TC : T)(scope TC[] itms)
366 {
367 if( !itms.length ) return;
368 assert(m_fill+itms.length <= m_buffer.length);
369 if( mod(m_start+m_fill) >= mod(m_start+m_fill+itms.length) ){
370 size_t chunk1 = m_buffer.length - (m_start+m_fill);
371 size_t chunk2 = itms.length - chunk1;
372 m_buffer[m_start+m_fill .. m_buffer.length] = itms[0 .. chunk1];
373 m_buffer[0 .. chunk2] = itms[chunk1 .. $];
374 } else {
375 m_buffer[mod(m_start+m_fill) .. mod(m_start+m_fill)+itms.length] = itms[];
376 }
377 m_fill += itms.length;
378 }
379 void putN(size_t n) { assert(m_fill+n <= m_buffer.length); m_fill += n; }
380
381 void popFront() { assert(!empty); m_start = mod(m_start+1); m_fill--; }
382 void popFrontN(size_t n) { assert(length >= n); m_start = mod(m_start + n); m_fill -= n; }
383
384 void popBack() { assert(!empty); m_fill--; }
385 void popBackN(size_t n) { assert(length >= n); m_fill -= n; }
386
387 void removeAt(Range r)
388 {
389 assert(r.m_buffer is m_buffer);
390 if( m_start + m_fill > m_buffer.length ){
391 assert(r.m_start >= m_start && r.m_start < m_buffer.length || r.m_start < mod(m_start+m_fill));
392 if( r.m_start > m_start ){
393 foreach(i; r.m_start .. m_buffer.length-1)
394 move(m_buffer[i+1], m_buffer[i]);
395 move(m_buffer[0], m_buffer[$-1]);
396 foreach(i; 0 .. mod(m_start + m_fill - 1))
397 move(m_buffer[i+1], m_buffer[i]);
398 } else {
399 foreach(i; r.m_start .. mod(m_start + m_fill - 1))
400 move(m_buffer[i+1], m_buffer[i]);
401 }
402 } else {
403 assert(r.m_start >= m_start && r.m_start < m_start+m_fill);
404 foreach(i; r.m_start .. m_start+m_fill-1)
405 move(m_buffer[i+1], m_buffer[i]);
406 }
407 m_fill--;
408 destroy(m_buffer[mod(m_start+m_fill)]); // TODO: only call destroy for non-POD T
409 }
410
411 inout(T)[] peek() inout { return m_buffer[m_start .. min(m_start+m_fill, m_buffer.length)]; }
412 T[] peekDst() {
413 if (!m_buffer.length) return null;
414 if( m_start + m_fill < m_buffer.length ) return m_buffer[m_start+m_fill .. $];
415 else return m_buffer[mod(m_start+m_fill) .. m_start];
416 }
417
418 void read(scope T[] dst)
419 {
420 assert(dst.length <= length);
421 if( !dst.length ) return;
422 if( mod(m_start) >= mod(m_start+dst.length) ){
423 size_t chunk1 = m_buffer.length - m_start;
424 size_t chunk2 = dst.length - chunk1;
425 static if (isCopyable!T) {
426 dst[0 .. chunk1] = m_buffer[m_start .. $];
427 dst[chunk1 .. $] = m_buffer[0 .. chunk2];
428 } else {
429 foreach (i; 0 .. chunk1) move(m_buffer[m_start+i], dst[i]);
430 foreach (i; chunk1 .. this.length) move(m_buffer[i-chunk1], dst[i]);
431 }
432 } else {
433 static if (isCopyable!T) {
434 dst[] = m_buffer[m_start .. m_start+dst.length];
435 } else {
436 foreach (i; 0 .. dst.length)
437 move(m_buffer[m_start + i], dst[i]);
438 }
439 }
440 popFrontN(dst.length);
441 }
442
443 int opApply(scope int delegate(ref T itm) del)
444 {
445 if( m_start+m_fill > m_buffer.length ){
446 foreach(i; m_start .. m_buffer.length)
447 if( auto ret = del(m_buffer[i]) )
448 return ret;
449 foreach(i; 0 .. mod(m_start+m_fill))
450 if( auto ret = del(m_buffer[i]) )
451 return ret;
452 } else {
453 foreach(i; m_start .. m_start+m_fill)
454 if( auto ret = del(m_buffer[i]) )
455 return ret;
456 }
457 return 0;
458 }
459
460 /// iterate through elements with index
461 int opApply(scope int delegate(size_t i, ref T itm) del)
462 {
463 if( m_start+m_fill > m_buffer.length ){
464 foreach(i; m_start .. m_buffer.length)
465 if( auto ret = del(i - m_start, m_buffer[i]) )
466 return ret;
467 foreach(i; 0 .. mod(m_start+m_fill))
468 if( auto ret = del(i + m_buffer.length - m_start, m_buffer[i]) )
469 return ret;
470 } else {
471 foreach(i; m_start .. m_start+m_fill)
472 if( auto ret = del(i - m_start, m_buffer[i]) )
473 return ret;
474 }
475 return 0;
476 }
477
478 ref inout(T) opIndex(size_t idx) inout { assert(idx < length); return m_buffer[mod(m_start+idx)]; }
479
480 Range opSlice() { return Range(m_buffer, m_start, m_fill); }
481
482 Range opSlice(size_t from, size_t to)
483 {
484 assert(from <= to);
485 assert(to <= m_fill);
486 return Range(m_buffer, mod(m_start+from), to-from);
487 }
488
489 size_t opDollar(size_t dim)() const if(dim == 0) { return length; }
490
491 private size_t mod(size_t n)
492 const {
493 static if( N == 0 ){
494 /*static if(PotOnly){
495 return x & (m_buffer.length-1);
496 } else {*/
497 return n % m_buffer.length;
498 //}
499 } else static if( ((N - 1) & N) == 0 ){
500 return n & (N - 1);
501 } else return n % N;
502 }
503
504 static struct Range {
505 private {
506 T[] m_buffer;
507 size_t m_start;
508 size_t m_length;
509 }
510
511 private this(T[] buffer, size_t start, size_t length)
512 {
513 m_buffer = buffer;
514 m_start = start;
515 m_length = length;
516 }
517
518 @property bool empty() const { return m_length == 0; }
519
520 @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_start]; }
521
522 void popFront()
523 {
524 assert(!empty);
525 m_start++;
526 m_length--;
527 if( m_start >= m_buffer.length )
528 m_start = 0;
529 }
530 }
531 }
532
533 unittest {
534 static assert(isInputRange!(FixedRingBuffer!int) && isOutputRange!(FixedRingBuffer!int, int));
535
536 FixedRingBuffer!(int, 5) buf;
537 assert(buf.length == 0 && buf.freeSpace == 5); buf.put(1); // |1 . . . .
538 assert(buf.length == 1 && buf.freeSpace == 4); buf.put(2); // |1 2 . . .
539 assert(buf.length == 2 && buf.freeSpace == 3); buf.put(3); // |1 2 3 . .
540 assert(buf.length == 3 && buf.freeSpace == 2); buf.put(4); // |1 2 3 4 .
541 assert(buf.length == 4 && buf.freeSpace == 1); buf.put(5); // |1 2 3 4 5
542 assert(buf.length == 5 && buf.freeSpace == 0);
543 assert(buf.front == 1);
544 buf.popFront(); // .|2 3 4 5
545 assert(buf.front == 2);
546 buf.popFrontN(2); // . . .|4 5
547 assert(buf.front == 4);
548 assert(buf.length == 2 && buf.freeSpace == 3);
549 buf.put([6, 7, 8]); // 6 7 8|4 5
550 assert(buf.length == 5 && buf.freeSpace == 0);
551 int[5] dst;
552 buf.read(dst); // . . .|. .
553 assert(dst == [4, 5, 6, 7, 8]);
554 assert(buf.length == 0 && buf.freeSpace == 5);
555 buf.put([1, 2]); // . . .|1 2
556 assert(buf.length == 2 && buf.freeSpace == 3);
557 buf.read(dst[0 .. 2]); //|. . . . .
558 assert(dst[0 .. 2] == [1, 2]);
559
560 buf.put([0, 0, 0, 1, 2]); //|0 0 0 1 2
561 buf.popFrontN(2); //. .|0 1 2
562 buf.put([3, 4]); // 3 4|0 1 2
563 foreach(i, item; buf)
564 {
565 assert(i == item);
566 }
567 }
568
569
570 /// Write a single batch and drain
571 struct BatchBuffer(T, size_t N = 0) {
572 private {
573 size_t m_fill;
574 size_t m_first;
575 static if (N == 0) T[] m_buffer;
576 else T[N] m_buffer;
577 }
578
579 static if (N == 0) {
580 @property void capacity(size_t n) { assert(n >= m_fill); m_buffer.length = n; }
581 }
582
583 @property bool empty() const { assert(m_first < m_fill || m_fill == 0 && m_first == 0); return m_first >= m_fill; }
584 @property size_t capacity() const { return m_buffer.length; }
585 @property size_t length() const { return m_fill - m_first; }
586 @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_first]; }
587 void popFront() { popFrontN(1); }
588 void popFrontN(size_t n) {
589 assert(n <= length);
590 m_first += n;
591 if (m_first == m_fill)
592 m_first = m_fill = 0;
593 }
594 inout(T)[] peek() inout { return m_buffer[m_first .. m_fill]; }
595 T[] peekDst() { assert(empty); return m_buffer; }
596 void putN(size_t n) { assert(empty && n <= m_buffer.length); m_fill = n; }
597 void putN(T[] elems) { assert(empty && elems.length <= m_buffer.length); m_buffer[0 .. elems.length] = elems[]; m_fill = elems.length; }
598 void read(T[] dst) {
599 assert(length() >= dst.length);
600 dst[] = m_buffer[m_first .. m_first + dst.length];
601 popFrontN(dst.length);
602 }
603 }
604
605
606 struct ArraySet(Key)
607 {
608 private {
609 Key[4] m_staticEntries;
610 Key[] m_entries;
611 }
612
613 @property ArraySet dup()
614 {
615 return ArraySet(m_staticEntries, m_entries.dup);
616 }
617
618 bool opBinaryRight(string op)(Key key) if (op == "in") { return contains(key); }
619
620 int opApply(int delegate(ref Key) del)
621 {
622 foreach (ref k; m_staticEntries)
623 if (k != Key.init)
624 if (auto ret = del(k))
625 return ret;
626 foreach (ref k; m_entries)
627 if (k != Key.init)
628 if (auto ret = del(k))
629 return ret;
630 return 0;
631 }
632
633 bool contains(Key key)
634 const {
635 foreach (ref k; m_staticEntries) if (k == key) return true;
636 foreach (ref k; m_entries) if (k == key) return true;
637 return false;
638 }
639
640 void insert(Key key)
641 {
642 if (contains(key)) return;
643 foreach (ref k; m_staticEntries)
644 if (k == Key.init) {
645 k = key;
646 return;
647 }
648 foreach (ref k; m_entries)
649 if (k == Key.init) {
650 k = key;
651 return;
652 }
653 m_entries ~= key;
654 }
655
656 void remove(Key key)
657 {
658 foreach (ref k; m_staticEntries) if (k == key) { k = Key.init; return; }
659 foreach (ref k; m_entries) if (k == key) { k = Key.init; return; }
660 }
661 }
662
663 private void deleteCompat(T)(ref T v)
664 {
665 static if (__VERSION__ >= 2079) {
666 import core.memory : __delete;
667 __delete(v);
668 } else mixin("delete v;");
669 }