Line data Source code
1 : /** Copyright 2025 Alexander G. Lopez
2 :
3 : Licensed under the Apache License, Version 2.0 (the "License");
4 : you may not use this file except in compliance with the License.
5 : You may obtain a copy of the License at
6 :
7 : http://www.apache.org/licenses/LICENSE-2.0
8 :
9 : Unless required by applicable law or agreed to in writing, software
10 : distributed under the License is distributed on an "AS IS" BASIS,
11 : WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : See the License for the specific language governing permissions and
13 : limitations under the License.
14 :
15 : This file implements a splay tree that does not support duplicates.
16 : The code to support a splay tree that does not allow duplicates is much simpler
17 : than the code to support a multimap implementation. This implementation is
18 : based on the following source.
19 :
20 : 1. Daniel Sleator, Carnegie Mellon University. Sleator's implementation of a
21 : topdown splay tree was instrumental in starting things off, but required
22 : extensive modification. I had to update parent and child tracking, and
23 : unite the left and right cases for fun. See the code for a generalizable
24 : strategy to eliminate symmetric left and right cases for any binary tree
25 : code. https://www.link.cs.cmu.edu/splay/
26 :
27 : Because this is a self-optimizing data structure it may benefit from many
28 : constant time queries for frequently accessed elements. It is also in a Struct
29 : of Arrays layout to improve memory alignment and reduce wasted space. While
30 : it is recommended that the user reserve space for the needed nodes ahead of
31 : time, the amortized O(log(N)) run times of a Splay Tree remain the same in
32 : the dynamic resizing case. */
33 : /** C23 provided headers. */
34 : #include <stdalign.h>
35 : #include <stddef.h>
36 :
37 : /** CCC provided headers. */
38 : #include "ccc/array_adaptive_map.h"
39 : #include "ccc/configuration.h"
40 : #include "ccc/private/private_array_adaptive_map.h"
41 : #include "ccc/types.h"
42 :
43 : /*======================== Data Alignment Test ==========================*/
44 :
45 : /** @internal A macro version of the runtime alignment operations we perform
46 : for calculating bytes. This way we can use in static assert. The user data type
47 : may not be the same alignment as the nodes and therefore the nodes array must
48 : start at next aligned byte. */
49 : #define roundup(bytes_to_round, alignment) \
50 : (((bytes_to_round) + (alignment) - 1) & ~((alignment) - 1))
51 :
52 : enum : size_t {
53 : /* @internal Test capacity. */
54 : TCAP = 3,
55 : };
56 : /** @internal This is a static fixed size map exclusive to this translation unit
57 : used to ensure assumptions about data layout are correct. The following static
58 : asserts must be true in order to support the Struct of Array style layout we
59 : use for the data and nodes. It is important that in our user code when we set
60 : the positions of the node pointer relative to the data pointer the positions are
61 : correct regardless of backing storage as a fixed map or heap allocation.
62 :
63 : Use an int because that will force the nodes array to be wary of
64 : where to start. The nodes are 8 byte aligned but an int is 4. This means the
65 : nodes need to start after a 4 byte Buffer of padding at end of data array. */
66 : static __auto_type const static_data_nodes_layout_test
67 : = CCC_array_adaptive_map_storage_for((int const[TCAP]){});
68 : /** Some assumptions in the code assume that nodes array is last so ensure that
69 : is the case here. Also good to assume user data comes first. */
70 : static_assert(
71 : (char const *)static_data_nodes_layout_test.data
72 : < (char const *)static_data_nodes_layout_test.nodes,
73 : "The order of the arrays in a Struct of Arrays map is data, then "
74 : "nodes."
75 : );
76 : /** We don't care about the alignment or padding after the nodes array because
77 : we never need to set or move any pointers to that position. The alignment is
78 : important for the nodes pointer to be set to the correct aligned position and
79 : so that we allocate enough bytes for our single allocation if the map is dynamic
80 : and not a fixed type. */
81 : static_assert(
82 : (char const *)&static_data_nodes_layout_test.nodes[TCAP]
83 : - (char const *)&static_data_nodes_layout_test.data[0]
84 : == roundup(
85 : (sizeof(*static_data_nodes_layout_test.data) * TCAP),
86 : alignof(*static_data_nodes_layout_test.nodes)
87 : ) + (sizeof(*static_data_nodes_layout_test.nodes) * TCAP),
88 : "The pointer difference in bytes between end of the nodes array and start "
89 : "of user data array must be the same as the total bytes we assume to be "
90 : "stored in that range. Alignment of user data must be considered."
91 : );
92 : static_assert(
93 : (char const *)&static_data_nodes_layout_test.data
94 : + roundup(
95 : (sizeof(*static_data_nodes_layout_test.data) * TCAP),
96 : alignof(*static_data_nodes_layout_test.nodes)
97 : )
98 : == (char const *)&static_data_nodes_layout_test.nodes,
99 : "The start of the nodes array must begin at the next aligned "
100 : "byte given alignment of a node."
101 : );
102 :
103 : /*========================== Type Declarations ===========================*/
104 :
105 : /** @internal */
106 : enum {
107 : LR = 2,
108 : };
109 :
110 : /** @internal */
111 : enum Branch {
112 : L = 0,
113 : R,
114 : };
115 :
116 : #define INORDER R
117 : #define INORDER_REVERSE L
118 :
119 : enum {
120 : /** 0th slot is sentinel. Count will be 2 when inserting new root. */
121 : INSERT_ROOT_NODE_COUNT = 2,
122 : };
123 :
124 : /* Buffer allocates before insert. "Empty" has nil 0th slot and one more. */
125 :
126 : /*============================== Prototypes ==============================*/
127 :
128 : static size_t splay(struct CCC_Array_adaptive_map *, size_t, void const *);
129 : static struct CCC_Array_adaptive_map_node *
130 : node_at(struct CCC_Array_adaptive_map const *, size_t);
131 : static void *data_at(struct CCC_Array_adaptive_map const *, size_t);
132 : static struct CCC_Array_adaptive_map_handle
133 : handle(struct CCC_Array_adaptive_map *, void const *);
134 : static size_t erase(struct CCC_Array_adaptive_map *, void const *);
135 : static size_t maybe_allocate_insert(
136 : struct CCC_Array_adaptive_map *, void const *, CCC_Allocator const *
137 : );
138 : static CCC_Result
139 : resize(struct CCC_Array_adaptive_map *, size_t, CCC_Allocator const *);
140 : static void copy_soa(struct CCC_Array_adaptive_map const *, void *, size_t);
141 : static size_t data_bytes(size_t, size_t);
142 : static size_t nodes_bytes(size_t);
143 : static struct CCC_Array_adaptive_map_node *
144 : nodes_base_address(size_t, void const *, size_t);
145 : static size_t find(struct CCC_Array_adaptive_map *, void const *);
146 : static void
147 : connect_new_root(struct CCC_Array_adaptive_map *, size_t, CCC_Order);
148 : static void insert(struct CCC_Array_adaptive_map *, size_t n);
149 : static void *key_in_slot(struct CCC_Array_adaptive_map const *, void const *);
150 : static size_t
151 : allocate_slot(struct CCC_Array_adaptive_map *, CCC_Allocator const *);
152 : static size_t total_bytes(size_t, size_t);
153 : static CCC_Handle_range equal_range(
154 : struct CCC_Array_adaptive_map *, void const *, void const *, enum Branch
155 : );
156 : static void *key_at(struct CCC_Array_adaptive_map const *, size_t);
157 : static CCC_Order
158 : order_nodes(struct CCC_Array_adaptive_map const *, void const *, size_t);
159 : static size_t remove_from_tree(struct CCC_Array_adaptive_map *, size_t);
160 : static size_t
161 : min_max_from(struct CCC_Array_adaptive_map const *, size_t, enum Branch);
162 : static size_t next(struct CCC_Array_adaptive_map const *, size_t, enum Branch);
163 : static size_t
164 : branch_index(struct CCC_Array_adaptive_map const *, size_t, enum Branch);
165 : static size_t parent_index(struct CCC_Array_adaptive_map const *, size_t);
166 : static size_t *
167 : branch_pointer(struct CCC_Array_adaptive_map const *, size_t, enum Branch);
168 : static size_t *parent_pointer(struct CCC_Array_adaptive_map const *, size_t);
169 : static CCC_Tribool validate(struct CCC_Array_adaptive_map const *);
170 : static void init_node(struct CCC_Array_adaptive_map const *, size_t);
171 : static void swap(void *, void *, void *, size_t);
172 : static void link(struct CCC_Array_adaptive_map *, size_t, enum Branch, size_t);
173 : static size_t max(size_t, size_t);
174 : static void
175 : delete_nodes(struct CCC_Array_adaptive_map *, CCC_Destructor const *);
176 :
177 : /*============================== Interface ==============================*/
178 :
179 : void *
180 16781 : CCC_array_adaptive_map_at(
181 : CCC_Array_adaptive_map const *const map, CCC_Handle_index const index
182 : ) {
183 16781 : if (!map || !index) {
184 13 : return NULL;
185 : }
186 16768 : return data_at(map, index);
187 16781 : }
188 :
189 : CCC_Tribool
190 66 : CCC_array_adaptive_map_contains(
191 : CCC_Array_adaptive_map *const map, void const *const key
192 : ) {
193 66 : if (!map || !key) {
194 2 : return CCC_TRIBOOL_ERROR;
195 : }
196 64 : map->root = splay(map, map->root, key);
197 64 : return order_nodes(map, key, map->root) == CCC_ORDER_EQUAL;
198 66 : }
199 :
200 : CCC_Handle_index
201 2017 : CCC_array_adaptive_map_get_key_value(
202 : CCC_Array_adaptive_map *const map, void const *const key
203 : ) {
204 2017 : if (!map || !key) {
205 2 : return 0;
206 : }
207 2015 : return find(map, key);
208 2017 : }
209 :
210 : CCC_Array_adaptive_map_handle
211 13044 : CCC_array_adaptive_map_handle(
212 : CCC_Array_adaptive_map *const map, void const *const key
213 : ) {
214 13044 : if (!map || !key) {
215 2 : return (CCC_Array_adaptive_map_handle){
216 : .status = CCC_ENTRY_ARGUMENT_ERROR,
217 : };
218 : }
219 13042 : return handle(map, key);
220 13044 : }
221 :
222 : CCC_Handle_index
223 8381 : CCC_array_adaptive_map_insert_handle(
224 : CCC_Array_adaptive_map_handle const *const handle,
225 : void const *const key_val_type,
226 : CCC_Allocator const *const allocator
227 : ) {
228 8381 : if (!handle || !key_val_type || !allocator) {
229 3 : return 0;
230 : }
231 8378 : if (handle->status == CCC_ENTRY_OCCUPIED) {
232 3105 : void *const ret = data_at(handle->map, handle->index);
233 3105 : if (key_val_type != ret) {
234 3105 : (void)memcpy(ret, key_val_type, handle->map->sizeof_type);
235 3105 : }
236 3105 : return handle->index;
237 3105 : }
238 5273 : return maybe_allocate_insert(handle->map, key_val_type, allocator);
239 8381 : }
240 :
241 : CCC_Array_adaptive_map_handle *
242 112 : CCC_array_adaptive_map_and_modify(
243 : CCC_Array_adaptive_map_handle *const handle,
244 : CCC_Modifier const *const modifier
245 : ) {
246 112 : if (!handle || !modifier) {
247 2 : return NULL;
248 : }
249 110 : if (modifier->modify && handle->status & CCC_ENTRY_OCCUPIED) {
250 168 : modifier->modify((CCC_Arguments){
251 56 : .type = data_at(handle->map, handle->index),
252 56 : .context = modifier->context,
253 : });
254 56 : }
255 110 : return handle;
256 112 : }
257 :
258 : CCC_Handle_index
259 262 : CCC_array_adaptive_map_or_insert(
260 : CCC_Array_adaptive_map_handle const *const handle,
261 : void const *const key_val_type,
262 : CCC_Allocator const *const allocator
263 : ) {
264 262 : if (!handle || !key_val_type || !allocator) {
265 3 : return 0;
266 : }
267 259 : if (handle->status & CCC_ENTRY_OCCUPIED) {
268 153 : return handle->index;
269 : }
270 106 : return maybe_allocate_insert(handle->map, key_val_type, allocator);
271 262 : }
272 :
273 : CCC_Handle
274 1565 : CCC_array_adaptive_map_swap_handle(
275 : CCC_Array_adaptive_map *const map,
276 : void *const type_output,
277 : CCC_Allocator const *const allocator
278 : ) {
279 1565 : if (!map || !type_output || !allocator) {
280 3 : return (CCC_Handle){.status = CCC_ENTRY_ARGUMENT_ERROR};
281 : }
282 1562 : size_t const found = find(map, key_in_slot(map, type_output));
283 1562 : if (found) {
284 107 : assert(map->root);
285 107 : void *const ret = data_at(map, map->root);
286 107 : void *const temp = data_at(map, 0);
287 107 : swap(temp, type_output, ret, map->sizeof_type);
288 214 : return (CCC_Handle){
289 107 : .index = found,
290 : .status = CCC_ENTRY_OCCUPIED,
291 : };
292 107 : }
293 1455 : size_t const inserted = maybe_allocate_insert(map, type_output, allocator);
294 1455 : if (!inserted) {
295 1 : return (CCC_Handle){
296 : .index = 0,
297 : .status = CCC_ENTRY_INSERT_ERROR,
298 : };
299 : }
300 2908 : return (CCC_Handle){
301 1454 : .index = inserted,
302 : .status = CCC_ENTRY_VACANT,
303 : };
304 1565 : }
305 :
306 : CCC_Handle
307 1224 : CCC_array_adaptive_map_try_insert(
308 : CCC_Array_adaptive_map *const map,
309 : void const *const key_val_type,
310 : CCC_Allocator const *const allocator
311 : ) {
312 1224 : if (!map || !key_val_type || !allocator) {
313 3 : return (CCC_Handle){.status = CCC_ENTRY_ARGUMENT_ERROR};
314 : }
315 1221 : size_t const found = find(map, key_in_slot(map, key_val_type));
316 1221 : if (found) {
317 412 : assert(map->root);
318 824 : return (CCC_Handle){
319 412 : .index = found,
320 : .status = CCC_ENTRY_OCCUPIED,
321 : };
322 : }
323 809 : size_t const inserted = maybe_allocate_insert(map, key_val_type, allocator);
324 809 : if (!inserted) {
325 1 : return (CCC_Handle){
326 : .index = 0,
327 : .status = CCC_ENTRY_INSERT_ERROR,
328 : };
329 : }
330 1616 : return (CCC_Handle){
331 808 : .index = inserted,
332 : .status = CCC_ENTRY_VACANT,
333 : };
334 1224 : }
335 :
336 : CCC_Handle
337 3037 : CCC_array_adaptive_map_insert_or_assign(
338 : CCC_Array_adaptive_map *const map,
339 : void const *const key_val_type,
340 : CCC_Allocator const *const allocator
341 : ) {
342 3037 : if (!map || !key_val_type || !allocator) {
343 3 : return (CCC_Handle){.status = CCC_ENTRY_ARGUMENT_ERROR};
344 : }
345 3034 : size_t const found = find(map, key_in_slot(map, key_val_type));
346 3034 : if (found) {
347 363 : assert(map->root);
348 363 : void *const f_base = data_at(map, found);
349 363 : if (key_val_type != f_base) {
350 363 : memcpy(f_base, key_val_type, map->sizeof_type);
351 363 : }
352 726 : return (CCC_Handle){
353 363 : .index = found,
354 : .status = CCC_ENTRY_OCCUPIED,
355 : };
356 363 : }
357 2671 : size_t const inserted = maybe_allocate_insert(map, key_val_type, allocator);
358 2671 : if (!inserted) {
359 3 : return (CCC_Handle){
360 : .index = 0,
361 : .status = CCC_ENTRY_INSERT_ERROR,
362 : };
363 : }
364 5336 : return (CCC_Handle){
365 2668 : .index = inserted,
366 : .status = CCC_ENTRY_VACANT,
367 : };
368 3037 : }
369 :
370 : CCC_Handle
371 2301 : CCC_array_adaptive_map_remove_key_value(
372 : CCC_Array_adaptive_map *const map, void *const type_output
373 : ) {
374 2301 : if (!map || !type_output) {
375 2 : return (CCC_Handle){.status = CCC_ENTRY_ARGUMENT_ERROR};
376 : }
377 2299 : size_t const removed = erase(map, key_in_slot(map, type_output));
378 2299 : if (!removed) {
379 3 : return (CCC_Handle){
380 : .index = 0,
381 : .status = CCC_ENTRY_VACANT,
382 : };
383 : }
384 2296 : assert(removed);
385 2296 : void const *const r = data_at(map, removed);
386 2296 : if (type_output != r) {
387 2296 : (void)memcpy(type_output, r, map->sizeof_type);
388 2296 : }
389 2296 : return (CCC_Handle){
390 : .index = 0,
391 : .status = CCC_ENTRY_OCCUPIED,
392 : };
393 2301 : }
394 :
395 : CCC_Handle
396 55 : CCC_array_adaptive_map_remove_handle(
397 : CCC_Array_adaptive_map_handle *const handle
398 : ) {
399 55 : if (!handle) {
400 1 : return (CCC_Handle){.status = CCC_ENTRY_ARGUMENT_ERROR};
401 : }
402 54 : if (handle->status == CCC_ENTRY_OCCUPIED) {
403 88 : size_t const erased
404 44 : = erase(handle->map, key_at(handle->map, handle->index));
405 44 : assert(erased);
406 88 : return (CCC_Handle){
407 44 : .index = erased,
408 : .status = CCC_ENTRY_OCCUPIED,
409 : };
410 44 : }
411 10 : return (CCC_Handle){
412 : .index = 0,
413 : .status = CCC_ENTRY_VACANT,
414 : };
415 55 : }
416 :
417 : CCC_Handle_index
418 16 : CCC_array_adaptive_map_unwrap(
419 : CCC_Array_adaptive_map_handle const *const handle
420 : ) {
421 16 : if (!handle) {
422 1 : return 0;
423 : }
424 15 : return handle->status == CCC_ENTRY_OCCUPIED ? handle->index : 0;
425 16 : }
426 :
427 : CCC_Tribool
428 3 : CCC_array_adaptive_map_insert_error(
429 : CCC_Array_adaptive_map_handle const *const handle
430 : ) {
431 3 : if (!handle) {
432 2 : return CCC_TRIBOOL_ERROR;
433 : }
434 1 : return (handle->status & CCC_ENTRY_INSERT_ERROR) != 0;
435 3 : }
436 :
437 : CCC_Tribool
438 84 : CCC_array_adaptive_map_occupied(
439 : CCC_Array_adaptive_map_handle const *const handle
440 : ) {
441 84 : if (!handle) {
442 1 : return CCC_TRIBOOL_ERROR;
443 : }
444 83 : return (handle->status & CCC_ENTRY_OCCUPIED) != 0;
445 84 : }
446 :
447 : CCC_Handle_status
448 2 : CCC_array_adaptive_map_handle_status(
449 : CCC_Array_adaptive_map_handle const *const handle
450 : ) {
451 2 : return handle ? handle->status : CCC_ENTRY_ARGUMENT_ERROR;
452 : }
453 :
454 : CCC_Tribool
455 2372 : CCC_array_adaptive_map_is_empty(CCC_Array_adaptive_map const *const map) {
456 2372 : if (!map) {
457 1 : return CCC_TRIBOOL_ERROR;
458 : }
459 2371 : return !CCC_array_adaptive_map_count(map).count;
460 2372 : }
461 :
462 : CCC_Count
463 2526 : CCC_array_adaptive_map_count(CCC_Array_adaptive_map const *const map) {
464 2526 : if (!map) {
465 1 : return (CCC_Count){.error = CCC_RESULT_ARGUMENT_ERROR};
466 : }
467 5050 : return (CCC_Count){
468 2525 : .count = map->count ? map->count - 1 : 0,
469 : };
470 2526 : }
471 :
472 : CCC_Count
473 10 : CCC_array_adaptive_map_capacity(CCC_Array_adaptive_map const *const map) {
474 10 : if (!map) {
475 1 : return (CCC_Count){.error = CCC_RESULT_ARGUMENT_ERROR};
476 : }
477 9 : return (CCC_Count){.count = map->capacity};
478 10 : }
479 :
480 : CCC_Handle_index
481 16 : CCC_array_adaptive_map_begin(CCC_Array_adaptive_map const *const map) {
482 16 : if (!map || !map->capacity) {
483 3 : return 0;
484 : }
485 13 : size_t const n = min_max_from(map, map->root, L);
486 13 : return n;
487 16 : }
488 :
489 : CCC_Handle_index
490 3 : CCC_array_adaptive_map_reverse_begin(CCC_Array_adaptive_map const *const map) {
491 3 : if (!map || !map->capacity) {
492 1 : return 0;
493 : }
494 2 : size_t const n = min_max_from(map, map->root, R);
495 2 : return n;
496 3 : }
497 :
498 : CCC_Handle_index
499 3020 : CCC_array_adaptive_map_next(
500 : CCC_Array_adaptive_map const *const map, CCC_Handle_index const iterator
501 : ) {
502 3020 : if (!map || !map->capacity) {
503 1 : return 0;
504 : }
505 3019 : size_t const n = next(map, iterator, INORDER);
506 3019 : return n;
507 3020 : }
508 :
509 : CCC_Handle_index
510 1296 : CCC_array_adaptive_map_reverse_next(
511 : CCC_Array_adaptive_map const *const map, CCC_Handle_index const iterator
512 : ) {
513 1296 : if (!map || !iterator || !map->capacity) {
514 1 : return 0;
515 : }
516 1295 : size_t const n = next(map, iterator, INORDER_REVERSE);
517 1295 : return n;
518 1296 : }
519 :
520 : CCC_Handle_index
521 4293 : CCC_array_adaptive_map_end(CCC_Array_adaptive_map const *const) {
522 4293 : return 0;
523 : }
524 :
525 : CCC_Handle_index
526 4 : CCC_array_adaptive_map_reverse_end(CCC_Array_adaptive_map const *const) {
527 4 : return 0;
528 : }
529 :
530 : CCC_Handle_range
531 8 : CCC_array_adaptive_map_equal_range(
532 : CCC_Array_adaptive_map *const map,
533 : void const *const begin_key,
534 : void const *const end_key
535 : ) {
536 8 : if (!map || !begin_key || !end_key) {
537 3 : return (CCC_Handle_range){};
538 : }
539 5 : return equal_range(map, begin_key, end_key, INORDER);
540 8 : }
541 :
542 : CCC_Handle_range_reverse
543 8 : CCC_array_adaptive_map_equal_range_reverse(
544 : CCC_Array_adaptive_map *const map,
545 : void const *const reverse_begin_key,
546 : void const *const reverse_end_key
547 : )
548 :
549 : {
550 8 : if (!map || !reverse_begin_key || !reverse_end_key) {
551 3 : return (CCC_Handle_range_reverse){};
552 : }
553 5 : CCC_Handle_range const range
554 5 : = equal_range(map, reverse_begin_key, reverse_end_key, INORDER_REVERSE);
555 15 : return (CCC_Handle_range_reverse){
556 5 : .reverse_begin = range.begin,
557 5 : .reverse_end = range.end,
558 : };
559 8 : }
560 :
561 : CCC_Result
562 15 : CCC_array_adaptive_map_reserve(
563 : CCC_Array_adaptive_map *const map,
564 : size_t const to_add,
565 : CCC_Allocator const *const allocator
566 : ) {
567 15 : if (!map || !to_add || !allocator || !allocator->allocate) {
568 3 : return CCC_RESULT_ARGUMENT_ERROR;
569 : }
570 : /* Once initialized the Buffer always has a size of one for root node. */
571 12 : size_t const needed = map->count + to_add + (map->count == 0);
572 12 : if (needed <= map->capacity) {
573 1 : return CCC_RESULT_OK;
574 : }
575 11 : size_t const old_count = map->count;
576 11 : size_t old_cap = map->capacity;
577 11 : CCC_Result const r = resize(map, needed, allocator);
578 11 : if (r != CCC_RESULT_OK) {
579 1 : return r;
580 : }
581 10 : if (!old_cap) {
582 10 : map->count = 1;
583 10 : }
584 10 : old_cap = old_count ? old_cap : 0;
585 10 : size_t const new_cap = map->capacity;
586 10 : size_t prev = 0;
587 10 : size_t i = new_cap;
588 1445 : while (i--) {
589 1445 : if (i <= old_cap) {
590 10 : break;
591 : }
592 1435 : node_at(map, i)->next_free = prev;
593 1435 : prev = i;
594 : }
595 10 : if (!map->free_list) {
596 10 : map->free_list = prev;
597 10 : }
598 10 : return CCC_RESULT_OK;
599 15 : }
600 :
601 : CCC_Result
602 7 : CCC_array_adaptive_map_copy(
603 : CCC_Array_adaptive_map *const destination,
604 : CCC_Array_adaptive_map const *const source,
605 : CCC_Allocator const *const allocator
606 : ) {
607 7 : if (!destination || !source || !allocator || source == destination
608 7 : || (destination->capacity < source->capacity && !allocator->allocate)) {
609 2 : return CCC_RESULT_ARGUMENT_ERROR;
610 : }
611 5 : if (!source->capacity) {
612 1 : return CCC_RESULT_OK;
613 : }
614 4 : if (destination->capacity < source->capacity) {
615 3 : CCC_Result const r = resize(destination, source->capacity, allocator);
616 3 : if (r != CCC_RESULT_OK) {
617 1 : return r;
618 : }
619 3 : } else {
620 : /* Might not be necessary but not worth finding out. Do every time. */
621 1 : destination->nodes = nodes_base_address(
622 1 : destination->sizeof_type, destination->data, destination->capacity
623 : );
624 : }
625 3 : if (!destination->data || !source->data) {
626 1 : return CCC_RESULT_ARGUMENT_ERROR;
627 : }
628 2 : copy_soa(source, destination->data, destination->capacity);
629 2 : destination->free_list = source->free_list;
630 2 : destination->root = source->root;
631 2 : destination->count = source->count;
632 2 : destination->comparator = source->comparator;
633 2 : destination->sizeof_type = source->sizeof_type;
634 2 : destination->key_offset = source->key_offset;
635 2 : return CCC_RESULT_OK;
636 7 : }
637 :
638 : CCC_Result
639 2 : CCC_array_adaptive_map_clear(
640 : CCC_Array_adaptive_map *const map, CCC_Destructor const *const destructor
641 : ) {
642 2 : if (!map || !destructor) {
643 1 : return CCC_RESULT_ARGUMENT_ERROR;
644 : }
645 1 : if (destructor->destroy) {
646 1 : delete_nodes(map, destructor);
647 1 : }
648 1 : map->count = 1;
649 1 : map->root = 0;
650 1 : return CCC_RESULT_OK;
651 2 : }
652 :
653 : CCC_Result
654 18 : CCC_array_adaptive_map_clear_and_free(
655 : CCC_Array_adaptive_map *const map,
656 : CCC_Destructor const *const destructor,
657 : CCC_Allocator const *const allocator
658 : ) {
659 18 : if (!map || !destructor || !allocator || !allocator->allocate) {
660 4 : return CCC_RESULT_ARGUMENT_ERROR;
661 : }
662 14 : if (destructor->destroy) {
663 1 : delete_nodes(map, destructor);
664 1 : }
665 14 : map->root = 0;
666 14 : map->count = 0;
667 14 : map->capacity = 0;
668 42 : (void)allocator->allocate((CCC_Allocator_arguments){
669 14 : .input = map->data,
670 : .bytes = 0,
671 14 : .context = allocator->context,
672 : });
673 14 : map->data = NULL;
674 14 : map->nodes = NULL;
675 14 : return CCC_RESULT_OK;
676 18 : }
677 :
678 : CCC_Tribool
679 9885 : CCC_array_adaptive_map_validate(CCC_Array_adaptive_map const *const map) {
680 9885 : if (!map) {
681 1 : return CCC_TRIBOOL_ERROR;
682 : }
683 9884 : return validate(map);
684 9885 : }
685 :
686 : /*=========================== Private Interface ===========================*/
687 :
688 : void
689 144 : CCC_private_array_adaptive_map_insert(
690 : struct CCC_Array_adaptive_map *const map, size_t const elem_i
691 : ) {
692 144 : insert(map, elem_i);
693 144 : }
694 :
695 : struct CCC_Array_adaptive_map_handle
696 48 : CCC_private_array_adaptive_map_handle(
697 : struct CCC_Array_adaptive_map *const map, void const *const key
698 : ) {
699 48 : return handle(map, key);
700 48 : }
701 :
702 : void *
703 36 : CCC_private_array_adaptive_map_key_at(
704 : struct CCC_Array_adaptive_map const *const map, size_t const slot
705 : ) {
706 36 : return key_at(map, slot);
707 : }
708 :
709 : void *
710 2207 : CCC_private_array_adaptive_map_data_at(
711 : struct CCC_Array_adaptive_map const *const map, size_t const slot
712 : ) {
713 2207 : return data_at(map, slot);
714 : }
715 :
716 : size_t
717 146 : CCC_private_array_adaptive_map_allocate_slot(
718 : struct CCC_Array_adaptive_map *const map,
719 : CCC_Allocator const *const allocator
720 : ) {
721 146 : return allocate_slot(map, allocator);
722 : }
723 :
724 : /*=========================== Static Helpers ===========================*/
725 :
726 : static CCC_Handle_range
727 10 : equal_range(
728 : struct CCC_Array_adaptive_map *const t,
729 : void const *const begin_key,
730 : void const *const end_key,
731 : enum Branch const traversal
732 : ) {
733 10 : if (CCC_array_adaptive_map_is_empty(t)) {
734 2 : return (CCC_Handle_range){};
735 : }
736 : /* As with most BST code the cases are perfectly symmetrical. If we
737 : are seeking an increasing or decreasing range we need to make sure
738 : we follow the [inclusive, exclusive) range rule. This means double
739 : checking we don't need to progress to the next greatest or next
740 : lesser element depending on the direction we are traversing. */
741 8 : CCC_Order const les_or_grt[2] = {CCC_ORDER_LESSER, CCC_ORDER_GREATER};
742 8 : size_t b = splay(t, t->root, begin_key);
743 8 : if (order_nodes(t, begin_key, b) == les_or_grt[traversal]) {
744 2 : b = next(t, b, traversal);
745 2 : }
746 8 : size_t e = splay(t, t->root, end_key);
747 8 : if (order_nodes(t, end_key, e) != les_or_grt[!traversal]) {
748 5 : e = next(t, e, traversal);
749 5 : }
750 24 : return (CCC_Handle_range){
751 8 : .begin = b,
752 8 : .end = e,
753 : };
754 10 : }
755 :
756 : static struct CCC_Array_adaptive_map_handle
757 13090 : handle(struct CCC_Array_adaptive_map *const map, void const *const key) {
758 13090 : size_t const found = find(map, key);
759 13090 : if (found) {
760 22542 : return (struct CCC_Array_adaptive_map_handle){
761 7514 : .map = map,
762 7514 : .index = found,
763 : .status = CCC_ENTRY_OCCUPIED,
764 : };
765 : }
766 11152 : return (struct CCC_Array_adaptive_map_handle){
767 5576 : .map = map,
768 : .index = 0,
769 : .status = CCC_ENTRY_VACANT,
770 : };
771 13090 : }
772 :
773 : static size_t
774 10314 : maybe_allocate_insert(
775 : struct CCC_Array_adaptive_map *const map,
776 : void const *const user_type,
777 : CCC_Allocator const *const allocator
778 : ) {
779 : /* The end sentinel node will always be at 0. This also means once
780 : initialized the internal size for implementer is always at least 1. */
781 10314 : size_t const node = allocate_slot(map, allocator);
782 10314 : if (!node) {
783 8 : return 0;
784 : }
785 10306 : (void)memcpy(data_at(map, node), user_type, map->sizeof_type);
786 10306 : insert(map, node);
787 10306 : return node;
788 10314 : }
789 :
790 : static size_t
791 10460 : allocate_slot(
792 : struct CCC_Array_adaptive_map *const map,
793 : CCC_Allocator const *const allocator
794 : ) {
795 : /* The end sentinel node will always be at 0. This also means once
796 : initialized the internal size for implementer is always at least 1. */
797 10460 : size_t const old_count = map->count;
798 10460 : size_t old_cap = map->capacity;
799 10460 : if (!old_count || old_count == old_cap) {
800 93 : assert(!map->free_list);
801 93 : if (old_count == old_cap) {
802 49 : if (resize(map, max(old_cap * 2, 8), allocator) != CCC_RESULT_OK) {
803 10 : return 0;
804 : }
805 39 : } else {
806 44 : map->nodes = nodes_base_address(
807 44 : map->sizeof_type, map->data, map->capacity
808 : );
809 : }
810 83 : old_cap = old_count ? old_cap : 1;
811 83 : size_t const new_cap = map->capacity;
812 83 : size_t prev = 0;
813 16916 : for (size_t i = new_cap - 1; i >= old_cap; prev = i, --i) {
814 16833 : node_at(map, i)->next_free = prev;
815 16833 : }
816 83 : map->free_list = prev;
817 83 : map->count = max(old_count, 1);
818 83 : }
819 10450 : assert(map->free_list);
820 10450 : ++map->count;
821 10450 : size_t const slot = map->free_list;
822 10450 : map->free_list = node_at(map, slot)->next_free;
823 10450 : return slot;
824 10460 : }
825 :
826 : static CCC_Result
827 63 : resize(
828 : struct CCC_Array_adaptive_map *const map,
829 : size_t const new_capacity,
830 : CCC_Allocator const *const allocator
831 : ) {
832 63 : if (!allocator->allocate) {
833 9 : return CCC_RESULT_NO_ALLOCATION_FUNCTION;
834 : }
835 162 : void *const new_data = allocator->allocate((CCC_Allocator_arguments){
836 : .input = NULL,
837 54 : .bytes = total_bytes(map->sizeof_type, new_capacity),
838 54 : .context = allocator->context,
839 : });
840 54 : if (!new_data) {
841 3 : return CCC_RESULT_ALLOCATOR_ERROR;
842 : }
843 51 : copy_soa(map, new_data, new_capacity);
844 51 : map->nodes = nodes_base_address(map->sizeof_type, new_data, new_capacity);
845 153 : allocator->allocate((CCC_Allocator_arguments){
846 51 : .input = map->data,
847 : .bytes = 0,
848 51 : .context = allocator->context,
849 : });
850 51 : map->data = new_data;
851 51 : map->capacity = new_capacity;
852 51 : return CCC_RESULT_OK;
853 63 : }
854 :
855 : static void
856 10450 : insert(struct CCC_Array_adaptive_map *const map, size_t const n) {
857 10450 : init_node(map, n);
858 10450 : if (map->count == INSERT_ROOT_NODE_COUNT) {
859 59 : map->root = n;
860 59 : return;
861 : }
862 10391 : void const *const key = key_at(map, n);
863 10391 : map->root = splay(map, map->root, key);
864 10391 : CCC_Order const root_order = order_nodes(map, key, map->root);
865 10391 : if (CCC_ORDER_EQUAL == root_order) {
866 0 : return;
867 : }
868 10391 : connect_new_root(map, n, root_order);
869 20841 : }
870 :
871 : static void
872 10391 : connect_new_root(
873 : struct CCC_Array_adaptive_map *const map,
874 : size_t const new_root,
875 : CCC_Order const order_result
876 : ) {
877 10391 : enum Branch const dir = CCC_ORDER_GREATER == order_result;
878 10391 : link(map, new_root, dir, branch_index(map, map->root, dir));
879 10391 : link(map, new_root, !dir, map->root);
880 10391 : *branch_pointer(map, map->root, dir) = 0;
881 10391 : map->root = new_root;
882 10391 : *parent_pointer(map, map->root) = 0;
883 10391 : }
884 :
885 : static size_t
886 2343 : erase(struct CCC_Array_adaptive_map *const map, void const *const key) {
887 2343 : if (CCC_array_adaptive_map_is_empty(map)) {
888 1 : return 0;
889 : }
890 2342 : size_t const ret = splay(map, map->root, key);
891 2342 : CCC_Order const found = order_nodes(map, key, ret);
892 2342 : if (found != CCC_ORDER_EQUAL) {
893 2 : return 0;
894 : }
895 2340 : return remove_from_tree(map, ret);
896 2343 : }
897 :
898 : static size_t
899 2340 : remove_from_tree(struct CCC_Array_adaptive_map *const map, size_t const ret) {
900 2340 : if (!branch_index(map, ret, L)) {
901 387 : map->root = branch_index(map, ret, R);
902 387 : *parent_pointer(map, map->root) = 0;
903 387 : } else {
904 1953 : map->root = splay(map, branch_index(map, ret, L), key_at(map, ret));
905 1953 : link(map, map->root, R, branch_index(map, ret, R));
906 : }
907 2340 : node_at(map, ret)->next_free = map->free_list;
908 2340 : map->free_list = ret;
909 2340 : --map->count;
910 2340 : return ret;
911 : }
912 :
913 : static size_t
914 20922 : find(struct CCC_Array_adaptive_map *const map, void const *const key) {
915 20922 : if (!map->root) {
916 76 : return 0;
917 : }
918 20846 : map->root = splay(map, map->root, key);
919 20846 : return order_nodes(map, key, map->root) == CCC_ORDER_EQUAL ? map->root : 0;
920 20922 : }
921 :
922 : /** Adopts D. Sleator technique for splaying. Notable to this method is the
923 : general improvement to the tree that occurs because we always splay the key
924 : to the root, OR the next closest value to the key to the root. This has
925 : interesting performance implications for real data sets.
926 :
927 : This implementation has been modified to unite the left and right symmetries
928 : and manage the parent pointers. Parent pointers are not usual for splay trees
929 : but are necessary for a clean iteration API. */
930 : static size_t
931 35612 : splay(
932 : struct CCC_Array_adaptive_map *const map, size_t root, void const *const key
933 : ) {
934 35612 : assert(root);
935 : /* Splaying brings the key element up to the root. The zigzag fixes of
936 : splaying repair the tree and we remember the roots of these changes in
937 : this helper tree. At the end, make the root pick up these modified left
938 : and right helpers. The nil node should NULL initialized to start. */
939 35612 : struct CCC_Array_adaptive_map_node *const nil = node_at(map, 0);
940 35612 : nil->branch[L] = nil->branch[R] = nil->parent = 0;
941 35612 : size_t left_right_subtrees[LR] = {0, 0};
942 156737 : for (;;) {
943 156737 : CCC_Order const root_order = order_nodes(map, key, root);
944 156737 : enum Branch const order_link = CCC_ORDER_GREATER == root_order;
945 156737 : size_t const child = branch_index(map, root, order_link);
946 156737 : if (CCC_ORDER_EQUAL == root_order || !child) {
947 26995 : break;
948 : }
949 259484 : CCC_Order const child_order
950 129742 : = order_nodes(map, key, branch_index(map, root, order_link));
951 129742 : enum Branch const child_order_link = CCC_ORDER_GREATER == child_order;
952 : /* A straight line has formed from root->child->grandchild. An
953 : opportunity to splay and heal the tree arises. */
954 129742 : if (CCC_ORDER_EQUAL != child_order && order_link == child_order_link) {
955 82841 : link(map, root, order_link, branch_index(map, child, !order_link));
956 82841 : link(map, child, !order_link, root);
957 82841 : root = child;
958 82841 : if (!branch_index(map, root, order_link)) {
959 8617 : break;
960 : }
961 74224 : }
962 121125 : link(map, left_right_subtrees[!order_link], order_link, root);
963 121125 : left_right_subtrees[!order_link] = root;
964 121125 : root = branch_index(map, root, order_link);
965 156737 : }
966 35612 : link(map, left_right_subtrees[L], R, branch_index(map, root, L));
967 35612 : link(map, left_right_subtrees[R], L, branch_index(map, root, R));
968 35612 : link(map, root, L, nil->branch[R]);
969 35612 : link(map, root, R, nil->branch[L]);
970 35612 : map->root = root;
971 35612 : *parent_pointer(map, map->root) = 0;
972 71224 : return root;
973 35612 : }
974 :
975 : /** Links the parent node to node starting at subtree root via direction dir.
976 : updates the parent of the child being picked up by the new parent as well. */
977 : static inline void
978 451990 : link(
979 : struct CCC_Array_adaptive_map *const map,
980 : size_t const parent,
981 : enum Branch const dir,
982 : size_t const subtree
983 : ) {
984 451990 : *branch_pointer(map, parent, dir) = subtree;
985 451990 : *parent_pointer(map, subtree) = parent;
986 451990 : }
987 :
988 : static size_t
989 15 : min_max_from(
990 : struct CCC_Array_adaptive_map const *const map,
991 : size_t start,
992 : enum Branch const dir
993 : ) {
994 15 : if (!start) {
995 1 : return 0;
996 : }
997 118 : for (; branch_index(map, start, dir);
998 104 : start = branch_index(map, start, dir)) {}
999 14 : return start;
1000 15 : }
1001 :
1002 : static size_t
1003 4321 : next(
1004 : struct CCC_Array_adaptive_map const *const map,
1005 : size_t n,
1006 : enum Branch const traversal
1007 : ) {
1008 4321 : if (!n) {
1009 0 : return 0;
1010 : }
1011 4321 : assert(!parent_index(map, map->root));
1012 : /* The node is an internal one that has a sub-tree to explore first. */
1013 4321 : if (branch_index(map, n, traversal)) {
1014 : /* The goal is to get far left/right ASAP in any traversal. */
1015 4504 : for (n = branch_index(map, n, traversal);
1016 4504 : branch_index(map, n, !traversal);
1017 2380 : n = branch_index(map, n, !traversal)) {}
1018 2124 : return n;
1019 : }
1020 : /* This is how to return internal nodes on the way back up from a leaf. */
1021 2197 : size_t p = parent_index(map, n);
1022 3906 : for (; p && branch_index(map, p, !traversal) != n;
1023 1709 : n = p, p = parent_index(map, p)) {}
1024 2197 : return p;
1025 4321 : }
1026 :
1027 : /** Deletes all nodes in the tree by calling destructor function on them in
1028 : linear time and constant space. This function modifies nodes as it deletes the
1029 : tree elements. Assumes the destructor function is non-null.
1030 :
1031 : This function does not update any count or capacity fields of the map, it
1032 : simply calls the destructor on each node and removes the nodes references to
1033 : other tree elements. */
1034 : static void
1035 2 : delete_nodes(
1036 : struct CCC_Array_adaptive_map *const map,
1037 : CCC_Destructor const *const destructor
1038 : ) {
1039 2 : size_t node = map->root;
1040 31 : while (node) {
1041 29 : struct CCC_Array_adaptive_map_node *const e = node_at(map, node);
1042 29 : if (e->branch[L]) {
1043 14 : size_t const left = e->branch[L];
1044 14 : e->branch[L] = node_at(map, left)->branch[R];
1045 14 : node_at(map, left)->branch[R] = node;
1046 14 : node = left;
1047 : continue;
1048 14 : }
1049 15 : size_t const next = e->branch[R];
1050 15 : e->branch[L] = e->branch[R] = 0;
1051 15 : e->parent = 0;
1052 45 : destructor->destroy((CCC_Arguments){
1053 15 : .type = data_at(map, node),
1054 15 : .context = destructor->context,
1055 : });
1056 15 : node = next;
1057 29 : }
1058 2 : }
1059 :
1060 : static inline CCC_Order
1061 6797086 : order_nodes(
1062 : struct CCC_Array_adaptive_map const *const map,
1063 : void const *const key,
1064 : size_t const node
1065 : ) {
1066 27188344 : return map->comparator.compare((CCC_Key_comparator_arguments){
1067 6797086 : .key_left = key,
1068 6797086 : .type_right = data_at(map, node),
1069 6797086 : .context = map->comparator.context,
1070 : });
1071 : }
1072 :
1073 : static inline void
1074 10450 : init_node(struct CCC_Array_adaptive_map const *const map, size_t const node) {
1075 10450 : struct CCC_Array_adaptive_map_node *const e = node_at(map, node);
1076 10450 : e->branch[L] = e->branch[R] = e->parent = 0;
1077 10450 : }
1078 :
1079 : /** Calculates the number of bytes needed for user data INCLUDING any bytes we
1080 : need to add to the end of the array such that the following nodes array starts
1081 : on an aligned byte boundary given the alignment requirements of a node. This
1082 : means the value returned from this function may or may not be slightly larger
1083 : then the raw size of just user elements if rounding up must occur. */
1084 : static inline size_t
1085 258 : data_bytes(size_t const sizeof_type, size_t const capacity) {
1086 516 : return ((sizeof_type * capacity)
1087 258 : + alignof(*(struct CCC_Array_adaptive_map){}.nodes) - 1)
1088 258 : & ~(alignof(*(struct CCC_Array_adaptive_map){}.nodes) - 1);
1089 : }
1090 :
1091 : /** Calculates the number of bytes needed for the nodes array without any
1092 : consideration for end padding as no arrays follow. */
1093 : static inline size_t
1094 90 : nodes_bytes(size_t const capacity) {
1095 90 : return sizeof(*(struct CCC_Array_adaptive_map){}.nodes) * capacity;
1096 : }
1097 :
1098 : /** Calculates the number of bytes needed for all arrays in the Struct of Arrays
1099 : map design INCLUDING any extra padding bytes that need to be added between the
1100 : data and node arrays and the node and parity arrays. Padding might be needed if
1101 : the alignment of the type in next array that follows a preceding array is
1102 : different from the preceding array. In that case it is the preceding array's
1103 : responsibility to add padding bytes to its end such that the next array begins
1104 : on an aligned byte boundary for its own type. This means that the bytes returned
1105 : by this function may be greater than summing the (sizeof(type) * capacity) for
1106 : each array in the conceptual struct. */
1107 : static inline size_t
1108 54 : total_bytes(size_t sizeof_type, size_t const capacity) {
1109 54 : return data_bytes(sizeof_type, capacity) + nodes_bytes(capacity);
1110 : }
1111 :
1112 : /** Returns the base of the node array relative to the data base pointer. This
1113 : positions is guaranteed to be the first aligned byte given the alignment of the
1114 : node type after the data array. The data array has added any necessary padding
1115 : after it to ensure that the base of the node array is aligned for its type. */
1116 : static inline struct CCC_Array_adaptive_map_node *
1117 168 : nodes_base_address(
1118 : size_t const sizeof_type, void const *const data, size_t const capacity
1119 : ) {
1120 336 : return (struct CCC_Array_adaptive_map_node *)((char *)data
1121 168 : + data_bytes(
1122 168 : sizeof_type, capacity
1123 : ));
1124 : }
1125 :
1126 : /** Copies over the Struct of Arrays contained within the one contiguous
1127 : allocation of the map to the new memory provided. Assumes the new_data pointer
1128 : points to the base of an allocation that has been allocated with sufficient
1129 : bytes to support the user data, nodes, and parity arrays for the provided new
1130 : capacity. */
1131 : static inline void
1132 53 : copy_soa(
1133 : struct CCC_Array_adaptive_map const *const source,
1134 : void *const destination_data_base,
1135 : size_t const destination_capacity
1136 : ) {
1137 53 : if (!source->data) {
1138 17 : return;
1139 : }
1140 36 : assert(destination_capacity >= source->capacity);
1141 36 : size_t const sizeof_type = source->sizeof_type;
1142 : /* Each section of the allocation "grows" when we re-size so one copy would
1143 : not work. Instead each component is copied over allowing each to grow. */
1144 36 : (void)memcpy(
1145 36 : destination_data_base,
1146 36 : source->data,
1147 36 : data_bytes(sizeof_type, source->capacity)
1148 : );
1149 36 : (void)memcpy(
1150 36 : nodes_base_address(
1151 36 : sizeof_type, destination_data_base, destination_capacity
1152 : ),
1153 36 : nodes_base_address(sizeof_type, source->data, source->capacity),
1154 36 : nodes_bytes(source->capacity)
1155 : );
1156 89 : }
1157 :
1158 : static inline void
1159 107 : swap(void *const temp, void *const a, void *const b, size_t const sizeof_type) {
1160 107 : if (a == b) {
1161 0 : return;
1162 : }
1163 107 : (void)memcpy(temp, a, sizeof_type);
1164 107 : (void)memcpy(a, b, sizeof_type);
1165 107 : (void)memcpy(b, temp, sizeof_type);
1166 214 : }
1167 :
1168 : static inline struct CCC_Array_adaptive_map_node *
1169 30657758 : node_at(struct CCC_Array_adaptive_map const *const map, size_t const i) {
1170 30657758 : return &map->nodes[i];
1171 : }
1172 :
1173 : static inline void *
1174 13321788 : data_at(struct CCC_Array_adaptive_map const *const map, size_t const i) {
1175 13321788 : return (char *)map->data + (i * map->sizeof_type);
1176 : }
1177 :
1178 : static inline size_t
1179 21693037 : branch_index(
1180 : struct CCC_Array_adaptive_map const *const map,
1181 : size_t const parent,
1182 : enum Branch const dir
1183 : ) {
1184 21693037 : return node_at(map, parent)->branch[dir];
1185 : }
1186 :
1187 : static inline size_t
1188 3510571 : parent_index(
1189 : struct CCC_Array_adaptive_map const *const map, size_t const child
1190 : ) {
1191 3510571 : return node_at(map, child)->parent;
1192 : }
1193 :
1194 : static inline size_t *
1195 462381 : branch_pointer(
1196 : struct CCC_Array_adaptive_map const *const map,
1197 : size_t const node,
1198 : enum Branch const branch
1199 : ) {
1200 462381 : return &node_at(map, node)->branch[branch];
1201 : }
1202 :
1203 : static inline size_t *
1204 498380 : parent_pointer(
1205 : struct CCC_Array_adaptive_map const *const map, size_t const node
1206 : ) {
1207 498380 : return &node_at(map, node)->parent;
1208 : }
1209 :
1210 : static inline void *
1211 6489372 : key_at(struct CCC_Array_adaptive_map const *const map, size_t const i) {
1212 6489372 : return (char *)data_at(map, i) + map->key_offset;
1213 : }
1214 :
1215 : static void *
1216 8116 : key_in_slot(
1217 : struct CCC_Array_adaptive_map const *map, void const *const user_struct
1218 : ) {
1219 8116 : return (char *)user_struct + map->key_offset;
1220 : }
1221 :
1222 : static inline size_t
1223 132 : max(size_t const a, size_t const b) {
1224 132 : return a > b ? a : b;
1225 : }
1226 :
1227 : /*=========================== Validation ===============================*/
1228 :
1229 : /* NOLINTBEGIN(*misc-no-recursion) */
1230 :
1231 : /** @internal */
1232 : struct Tree_range {
1233 : size_t low;
1234 : size_t root;
1235 : size_t high;
1236 : };
1237 :
1238 : static size_t
1239 7014564 : recursive_count(
1240 : struct CCC_Array_adaptive_map const *const map, size_t const r
1241 : ) {
1242 7014564 : if (!r) {
1243 3512220 : return 0;
1244 : }
1245 7004688 : return 1 + recursive_count(map, branch_index(map, r, R))
1246 3502344 : + recursive_count(map, branch_index(map, r, L));
1247 7014564 : }
1248 :
1249 : static CCC_Tribool
1250 7014564 : are_subtrees_valid(
1251 : struct CCC_Array_adaptive_map const *map, struct Tree_range const r
1252 : ) {
1253 7014564 : if (!r.root) {
1254 3512220 : return CCC_TRUE;
1255 : }
1256 3502344 : if (r.low
1257 3502344 : && order_nodes(map, key_at(map, r.low), r.root) != CCC_ORDER_LESSER) {
1258 0 : return CCC_FALSE;
1259 : }
1260 3502344 : if (r.high
1261 3502344 : && order_nodes(map, key_at(map, r.high), r.root) != CCC_ORDER_GREATER) {
1262 0 : return CCC_FALSE;
1263 : }
1264 7004688 : return are_subtrees_valid(
1265 3502344 : map,
1266 14009376 : (struct Tree_range){
1267 3502344 : .low = r.low,
1268 3502344 : .root = branch_index(map, r.root, L),
1269 3502344 : .high = r.root,
1270 : }
1271 : )
1272 3502344 : && are_subtrees_valid(
1273 3502344 : map,
1274 14009376 : (struct Tree_range){
1275 3502344 : .low = r.root,
1276 3502344 : .root = branch_index(map, r.root, R),
1277 3502344 : .high = r.high,
1278 : }
1279 : );
1280 7014564 : }
1281 :
1282 : static CCC_Tribool
1283 7014564 : is_storing_parent(
1284 : struct CCC_Array_adaptive_map const *const map,
1285 : size_t const p,
1286 : size_t const root
1287 : ) {
1288 7014564 : if (!root) {
1289 3512220 : return CCC_TRUE;
1290 : }
1291 3502344 : if (parent_index(map, root) != p) {
1292 0 : return CCC_FALSE;
1293 : }
1294 7004688 : return is_storing_parent(map, root, branch_index(map, root, L))
1295 3502344 : && is_storing_parent(map, root, branch_index(map, root, R));
1296 7014564 : }
1297 :
1298 : static CCC_Tribool
1299 9876 : is_free_list_valid(struct CCC_Array_adaptive_map const *const map) {
1300 9876 : if (!map->count) {
1301 0 : return CCC_TRUE;
1302 : }
1303 9876 : size_t cur_free_index = map->free_list;
1304 9876 : size_t list_count = 0;
1305 4426088 : while (cur_free_index && list_count < map->capacity) {
1306 4416212 : cur_free_index = node_at(map, cur_free_index)->next_free;
1307 4416212 : ++list_count;
1308 : }
1309 9876 : if (cur_free_index) {
1310 0 : return CCC_FALSE;
1311 : }
1312 9876 : if (list_count + map->count != map->capacity) {
1313 0 : return CCC_FALSE;
1314 : }
1315 9876 : return CCC_TRUE;
1316 9876 : }
1317 :
1318 : static CCC_Tribool
1319 9884 : validate(struct CCC_Array_adaptive_map const *const map) {
1320 9884 : if (!map->count) {
1321 8 : return CCC_TRUE;
1322 : }
1323 9876 : if (!are_subtrees_valid(map, (struct Tree_range){.root = map->root})) {
1324 0 : return CCC_FALSE;
1325 : }
1326 9876 : size_t const size = recursive_count(map, map->root);
1327 9876 : if (size && size != map->count - 1) {
1328 0 : return CCC_FALSE;
1329 : }
1330 9876 : if (!is_storing_parent(map, 0, map->root)) {
1331 0 : return CCC_FALSE;
1332 : }
1333 9876 : if (!is_free_list_valid(map)) {
1334 0 : return CCC_FALSE;
1335 : }
1336 9876 : return CCC_TRUE;
1337 9884 : }
1338 :
1339 : /* NOLINTEND(*misc-no-recursion) */
|