ONE - On-device Neural Engine
Loading...
Searching...
No Matches
flexbuffers.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 Google Inc. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef FLATBUFFERS_FLEXBUFFERS_H_
19#define FLATBUFFERS_FLEXBUFFERS_H_
20
21#include <map>
22// Used to select STL variant.
23#include "flatbuffers/base.h"
24// We use the basic binary writing functions from the regular FlatBuffers.
25#include "flatbuffers/util.h"
26
27#ifdef _MSC_VER
28#include <intrin.h>
29#endif
30
31#if defined(_MSC_VER)
32#pragma warning(push)
33#pragma warning(disable : 4127) // C4127: conditional expression is constant
34#endif
35
36namespace flexbuffers
37{
38
39class Reference;
40class Map;
41
42// These are used in the lower 2 bits of a type field to determine the size of
43// the elements (and or size field) of the item pointed to (e.g. vector).
51
52// These are used as the upper 6 bits of a type field to indicate the actual
53// type.
54enum Type
55{
60 // Types above stored inline, types below store an offset.
67 FBT_VECTOR = 10, // Untyped.
68 FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
72 // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
73 // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
75 FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
78 FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
81 FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
86 FBT_VECTOR_BOOL = 36, // To Allow the same type of conversion of type to vector type
87};
88
89inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
90
92{
93 return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
94}
95
96inline bool IsTypedVector(Type t)
97{
99}
100
101inline bool IsFixedTypedVector(Type t) { return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; }
102
103inline Type ToTypedVector(Type t, size_t fixed_len = 0)
104{
106 switch (fixed_len)
107 {
108 case 0:
109 return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
110 case 2:
111 return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
112 case 3:
113 return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
114 case 4:
115 return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
116 default:
118 return FBT_NULL;
119 }
120}
121
123{
125 return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
126}
127
129{
131 auto fixed_type = t - FBT_VECTOR_INT2;
132 *len = static_cast<uint8_t>(fixed_type / 3 + 2); // 3 types each, starting from length 2.
133 return static_cast<Type>(fixed_type % 3 + FBT_INT);
134}
135
136// TODO: implement proper support for 8/16bit floats, or decide not to
137// support them.
138typedef int16_t half;
139typedef int8_t quarter;
140
141// TODO: can we do this without conditionals using intrinsics or inline asm
142// on some platforms? Given branch prediction the method below should be
143// decently quick, but it is the most frequently executed function.
144// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
145// which that doesn't work (or where we'd read into un-owned memory).
146template <typename R, typename T1, typename T2, typename T4, typename T8>
147R ReadSizedScalar(const uint8_t *data, uint8_t byte_width)
148{
149 return byte_width < 4 ? (byte_width < 2 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
150 : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
151 : (byte_width < 8 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
152 : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
153}
154
155inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width)
156{
157 return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(data, byte_width);
158}
159
160inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width)
161{
162#if defined(_MSC_VER) && ((defined(_M_X64) && !defined(_M_ARM64EC)) || defined _M_IX86)
163 uint64_t u = 0;
164 __movsb(reinterpret_cast<uint8_t *>(&u), reinterpret_cast<const uint8_t *>(data), byte_width);
165 return flatbuffers::EndianScalar(u);
166#else
167 return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(data, byte_width);
168#endif
169 // clang-format on
170}
171
172inline double ReadDouble(const uint8_t *data, uint8_t byte_width)
173{
174 return ReadSizedScalar<double, quarter, half, float, double>(data, byte_width);
175}
176
177inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width)
178{
179 return offset - ReadUInt64(offset, byte_width);
180}
181
182template <typename T> const uint8_t *Indirect(const uint8_t *offset)
183{
184 return offset - flatbuffers::ReadScalar<T>(offset);
185}
186
187inline BitWidth WidthU(uint64_t u)
188{
189#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
190 { \
191 if (!((u) & ~((1ULL << (width)) - 1ULL))) \
192 return BIT_WIDTH_##width; \
193 }
197#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
198 return BIT_WIDTH_64;
199}
200
201inline BitWidth WidthI(int64_t i)
202{
203 auto u = static_cast<uint64_t>(i) << 1;
204 return WidthU(i >= 0 ? u : ~u);
205}
206
207inline BitWidth WidthF(double f)
208{
209 return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 : BIT_WIDTH_64;
210}
211
212// Base class of all types below.
213// Points into the data buffer and allows access to one type.
215{
216public:
217 Object(const uint8_t *data, uint8_t byte_width) : data_(data), byte_width_(byte_width) {}
218
219protected:
220 const uint8_t *data_;
221 uint8_t byte_width_;
222};
223
224// Object that has a size, obtained either from size prefix, or elsewhere.
225class Sized : public Object
226{
227public:
228 // Size prefix.
229 Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width), size_(read_size()) {}
230 // Manual size.
231 Sized(const uint8_t *data, uint8_t byte_width, size_t sz) : Object(data, byte_width), size_(sz) {}
232 size_t size() const { return size_; }
233 // Access size stored in `byte_width_` bytes before data_ pointer.
234 size_t read_size() const
235 {
236 return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
237 }
238
239protected:
240 size_t size_;
241};
242
243class String : public Sized
244{
245public:
246 // Size prefix.
247 String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
248 // Manual size.
249 String(const uint8_t *data, uint8_t byte_width, size_t sz) : Sized(data, byte_width, sz) {}
250
251 size_t length() const { return size(); }
252 const char *c_str() const { return reinterpret_cast<const char *>(data_); }
253 std::string str() const { return std::string(c_str(), size()); }
254
256 {
257 static const char *empty_string = "";
258 return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
259 }
260 bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
261};
262
263class Blob : public Sized
264{
265public:
266 Blob(const uint8_t *data_buf, uint8_t byte_width) : Sized(data_buf, byte_width) {}
267
269 {
270 static const uint8_t empty_blob[] = {0 /*len*/};
271 return Blob(empty_blob + 1, 1);
272 }
273 bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
274 const uint8_t *data() const { return data_; }
275};
276
277class Vector : public Sized
278{
279public:
280 Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
281
282 Reference operator[](size_t i) const;
283
285 {
286 static const uint8_t empty_vector[] = {0 /*len*/};
287 return Vector(empty_vector + 1, 1);
288 }
289 bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
290};
291
292class TypedVector : public Sized
293{
294public:
295 TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
296 : Sized(data, byte_width), type_(element_type)
297 {
298 }
299
300 Reference operator[](size_t i) const;
301
303 {
304 static const uint8_t empty_typed_vector[] = {0 /*len*/};
305 return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
306 }
308
309 Type ElementType() { return type_; }
310
311 friend Reference;
312
313private:
314 Type type_;
315
316 friend Map;
317};
318
320{
321public:
322 FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, uint8_t len)
323 : Object(data, byte_width), type_(element_type), len_(len)
324 {
325 }
326
327 Reference operator[](size_t i) const;
328
330 {
331 static const uint8_t fixed_empty_vector[] = {0 /* unused */};
332 return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
333 }
338
339 Type ElementType() { return type_; }
340 uint8_t size() { return len_; }
341
342private:
343 Type type_;
344 uint8_t len_;
345};
346
347class Map : public Vector
348{
349public:
350 Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
351
352 Reference operator[](const char *key) const;
353 Reference operator[](const std::string &key) const;
354
355 Vector Values() const { return Vector(data_, byte_width_); }
356
358 {
359 const size_t num_prefixed_fields = 3;
360 auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
361 return TypedVector(Indirect(keys_offset, byte_width_),
362 static_cast<uint8_t>(ReadUInt64(keys_offset + byte_width_, byte_width_)),
363 FBT_KEY);
364 }
365
366 static Map EmptyMap()
367 {
368 static const uint8_t empty_map[] = {
369 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
370 };
371 return Map(empty_map + 4, 1);
372 }
373
374 bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
375};
376
377template <typename T> void AppendToString(std::string &s, T &&v, bool keys_quoted)
378{
379 s += "[ ";
380 for (size_t i = 0; i < v.size(); i++)
381 {
382 if (i)
383 s += ", ";
384 v[i].ToString(true, keys_quoted, s);
385 }
386 s += " ]";
387}
388
390{
391public:
392 Reference() : data_(nullptr), parent_width_(0), byte_width_(BIT_WIDTH_8), type_(FBT_NULL) {}
393
394 Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, Type type)
395 : data_(data), parent_width_(parent_width), byte_width_(byte_width), type_(type)
396 {
397 }
398
399 Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
400 : data_(data), parent_width_(parent_width)
401 {
402 byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
403 type_ = static_cast<Type>(packed_type >> 2);
404 }
405
406 Type GetType() const { return type_; }
407
408 bool IsNull() const { return type_ == FBT_NULL; }
409 bool IsBool() const { return type_ == FBT_BOOL; }
410 bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
411 bool IsUInt() const { return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; }
412 bool IsIntOrUint() const { return IsInt() || IsUInt(); }
413 bool IsFloat() const { return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; }
414 bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
415 bool IsString() const { return type_ == FBT_STRING; }
416 bool IsKey() const { return type_ == FBT_KEY; }
417 bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
418 bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
419 bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
421 bool IsAnyVector() const { return (IsTypedVector() || IsFixedTypedVector() || IsVector()); }
422 bool IsMap() const { return type_ == FBT_MAP; }
423 bool IsBlob() const { return type_ == FBT_BLOB; }
424 bool AsBool() const
425 {
426 return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) : AsUInt64()) != 0;
427 }
428
429 // Reads any type as a int64_t. Never fails, does most sensible conversion.
430 // Truncates floats, strings are attempted to be parsed for a number,
431 // vectors/maps return their size. Returns 0 if all else fails.
432 int64_t AsInt64() const
433 {
434 if (type_ == FBT_INT)
435 {
436 // A fast path for the common case.
437 return ReadInt64(data_, parent_width_);
438 }
439 else
440 switch (type_)
441 {
442 case FBT_INDIRECT_INT:
443 return ReadInt64(Indirect(), byte_width_);
444 case FBT_UINT:
445 return ReadUInt64(data_, parent_width_);
447 return ReadUInt64(Indirect(), byte_width_);
448 case FBT_FLOAT:
449 return static_cast<int64_t>(ReadDouble(data_, parent_width_));
451 return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
452 case FBT_NULL:
453 return 0;
454 case FBT_STRING:
455 return flatbuffers::StringToInt(AsString().c_str());
456 case FBT_VECTOR:
457 return static_cast<int64_t>(AsVector().size());
458 case FBT_BOOL:
459 return ReadInt64(data_, parent_width_);
460 default:
461 // Convert other things to int.
462 return 0;
463 }
464 }
465
466 // TODO: could specialize these to not use AsInt64() if that saves
467 // extension ops in generated code, and use a faster op than ReadInt64.
468 int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
469 int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
470 int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
471
472 uint64_t AsUInt64() const
473 {
474 if (type_ == FBT_UINT)
475 {
476 // A fast path for the common case.
477 return ReadUInt64(data_, parent_width_);
478 }
479 else
480 switch (type_)
481 {
483 return ReadUInt64(Indirect(), byte_width_);
484 case FBT_INT:
485 return ReadInt64(data_, parent_width_);
486 case FBT_INDIRECT_INT:
487 return ReadInt64(Indirect(), byte_width_);
488 case FBT_FLOAT:
489 return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
491 return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
492 case FBT_NULL:
493 return 0;
494 case FBT_STRING:
495 return flatbuffers::StringToUInt(AsString().c_str());
496 case FBT_VECTOR:
497 return static_cast<uint64_t>(AsVector().size());
498 case FBT_BOOL:
499 return ReadUInt64(data_, parent_width_);
500 default:
501 // Convert other things to uint.
502 return 0;
503 }
504 }
505
506 uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
507 uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
508 uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
509
510 double AsDouble() const
511 {
512 if (type_ == FBT_FLOAT)
513 {
514 // A fast path for the common case.
515 return ReadDouble(data_, parent_width_);
516 }
517 else
518 switch (type_)
519 {
521 return ReadDouble(Indirect(), byte_width_);
522 case FBT_INT:
523 return static_cast<double>(ReadInt64(data_, parent_width_));
524 case FBT_UINT:
525 return static_cast<double>(ReadUInt64(data_, parent_width_));
526 case FBT_INDIRECT_INT:
527 return static_cast<double>(ReadInt64(Indirect(), byte_width_));
529 return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
530 case FBT_NULL:
531 return 0.0;
532 case FBT_STRING:
533 {
534 double d;
536 return d;
537 }
538 case FBT_VECTOR:
539 return static_cast<double>(AsVector().size());
540 case FBT_BOOL:
541 return static_cast<double>(ReadUInt64(data_, parent_width_));
542 default:
543 // Convert strings and other things to float.
544 return 0;
545 }
546 }
547
548 float AsFloat() const { return static_cast<float>(AsDouble()); }
549
550 const char *AsKey() const
551 {
552 if (type_ == FBT_KEY || type_ == FBT_STRING)
553 {
554 return reinterpret_cast<const char *>(Indirect());
555 }
556 else
557 {
558 return "";
559 }
560 }
561
562 // This function returns the empty string if you try to read something that
563 // is not a string or key.
565 {
566 if (type_ == FBT_STRING)
567 {
568 return String(Indirect(), byte_width_);
569 }
570 else if (type_ == FBT_KEY)
571 {
572 auto key = Indirect();
573 return String(key, byte_width_, strlen(reinterpret_cast<const char *>(key)));
574 }
575 else
576 {
577 return String::EmptyString();
578 }
579 }
580
581 // Unlike AsString(), this will convert any type to a std::string.
582 std::string ToString() const
583 {
584 std::string s;
585 ToString(false, false, s);
586 return s;
587 }
588
589 // Convert any type to a JSON-like string. strings_quoted determines if
590 // string values at the top level receive "" quotes (inside other values
591 // they always do). keys_quoted determines if keys are quoted, at any level.
592 // TODO(wvo): add further options to have indentation/newlines.
593 void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const
594 {
595 if (type_ == FBT_STRING)
596 {
597 String str(Indirect(), byte_width_);
598 if (strings_quoted)
599 {
600 flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
601 }
602 else
603 {
604 s.append(str.c_str(), str.length());
605 }
606 }
607 else if (IsKey())
608 {
609 auto str = AsKey();
610 if (keys_quoted)
611 {
612 flatbuffers::EscapeString(str, strlen(str), &s, true, false);
613 }
614 else
615 {
616 s += str;
617 }
618 }
619 else if (IsInt())
620 {
622 }
623 else if (IsUInt())
624 {
626 }
627 else if (IsFloat())
628 {
630 }
631 else if (IsNull())
632 {
633 s += "null";
634 }
635 else if (IsBool())
636 {
637 s += AsBool() ? "true" : "false";
638 }
639 else if (IsMap())
640 {
641 s += "{ ";
642 auto m = AsMap();
643 auto keys = m.Keys();
644 auto vals = m.Values();
645 for (size_t i = 0; i < keys.size(); i++)
646 {
647 keys[i].ToString(true, keys_quoted, s);
648 s += ": ";
649 vals[i].ToString(true, keys_quoted, s);
650 if (i < keys.size() - 1)
651 s += ", ";
652 }
653 s += " }";
654 }
655 else if (IsVector())
656 {
657 AppendToString<Vector>(s, AsVector(), keys_quoted);
658 }
659 else if (IsTypedVector())
660 {
661 AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
662 }
663 else if (IsFixedTypedVector())
664 {
665 AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
666 }
667 else if (IsBlob())
668 {
669 auto blob = AsBlob();
670 flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()), blob.size(), &s, true,
671 false);
672 }
673 else
674 {
675 s += "(?)";
676 }
677 }
678
679 // This function returns the empty blob if you try to read a not-blob.
680 // Strings can be viewed as blobs too.
681 Blob AsBlob() const
682 {
683 if (type_ == FBT_BLOB || type_ == FBT_STRING)
684 {
685 return Blob(Indirect(), byte_width_);
686 }
687 else
688 {
689 return Blob::EmptyBlob();
690 }
691 }
692
693 // This function returns the empty vector if you try to read a not-vector.
694 // Maps can be viewed as vectors too.
696 {
697 if (type_ == FBT_VECTOR || type_ == FBT_MAP)
698 {
699 return Vector(Indirect(), byte_width_);
700 }
701 else
702 {
703 return Vector::EmptyVector();
704 }
705 }
706
708 {
709 if (IsTypedVector())
710 {
711 auto tv = TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
712 if (tv.type_ == FBT_STRING)
713 {
714 // These can't be accessed as strings, since we don't know the bit-width
715 // of the size field, see the declaration of
716 // FBT_VECTOR_STRING_DEPRECATED above for details.
717 // We change the type here to be keys, which are a subtype of strings,
718 // and will ignore the size field. This will truncate strings with
719 // embedded nulls.
720 tv.type_ = FBT_KEY;
721 }
722 return tv;
723 }
724 else
725 {
727 }
728 }
729
731 {
732 if (IsFixedTypedVector())
733 {
734 uint8_t len = 0;
735 auto vtype = ToFixedTypedVectorElementType(type_, &len);
736 return FixedTypedVector(Indirect(), byte_width_, vtype, len);
737 }
738 else
739 {
741 }
742 }
743
744 Map AsMap() const
745 {
746 if (type_ == FBT_MAP)
747 {
748 return Map(Indirect(), byte_width_);
749 }
750 else
751 {
752 return Map::EmptyMap();
753 }
754 }
755
756 template <typename T> T As() const;
757
758 // Experimental: Mutation functions.
759 // These allow scalars in an already created buffer to be updated in-place.
760 // Since by default scalars are stored in the smallest possible space,
761 // the new value may not fit, in which case these functions return false.
762 // To avoid this, you can construct the values you intend to mutate using
763 // Builder::ForceMinimumBitWidth.
764 bool MutateInt(int64_t i)
765 {
766 if (type_ == FBT_INT)
767 {
768 return Mutate(data_, i, parent_width_, WidthI(i));
769 }
770 else if (type_ == FBT_INDIRECT_INT)
771 {
772 return Mutate(Indirect(), i, byte_width_, WidthI(i));
773 }
774 else if (type_ == FBT_UINT)
775 {
776 auto u = static_cast<uint64_t>(i);
777 return Mutate(data_, u, parent_width_, WidthU(u));
778 }
779 else if (type_ == FBT_INDIRECT_UINT)
780 {
781 auto u = static_cast<uint64_t>(i);
782 return Mutate(Indirect(), u, byte_width_, WidthU(u));
783 }
784 else
785 {
786 return false;
787 }
788 }
789
790 bool MutateBool(bool b)
791 {
792 return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
793 }
794
795 bool MutateUInt(uint64_t u)
796 {
797 if (type_ == FBT_UINT)
798 {
799 return Mutate(data_, u, parent_width_, WidthU(u));
800 }
801 else if (type_ == FBT_INDIRECT_UINT)
802 {
803 return Mutate(Indirect(), u, byte_width_, WidthU(u));
804 }
805 else if (type_ == FBT_INT)
806 {
807 auto i = static_cast<int64_t>(u);
808 return Mutate(data_, i, parent_width_, WidthI(i));
809 }
810 else if (type_ == FBT_INDIRECT_INT)
811 {
812 auto i = static_cast<int64_t>(u);
813 return Mutate(Indirect(), i, byte_width_, WidthI(i));
814 }
815 else
816 {
817 return false;
818 }
819 }
820
821 bool MutateFloat(float f)
822 {
823 if (type_ == FBT_FLOAT)
824 {
825 return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
826 }
827 else if (type_ == FBT_INDIRECT_FLOAT)
828 {
829 return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
830 }
831 else
832 {
833 return false;
834 }
835 }
836
837 bool MutateFloat(double d)
838 {
839 if (type_ == FBT_FLOAT)
840 {
841 return MutateF(data_, d, parent_width_, WidthF(d));
842 }
843 else if (type_ == FBT_INDIRECT_FLOAT)
844 {
845 return MutateF(Indirect(), d, byte_width_, WidthF(d));
846 }
847 else
848 {
849 return false;
850 }
851 }
852
853 bool MutateString(const char *str, size_t len)
854 {
855 auto s = AsString();
856 if (s.IsTheEmptyString())
857 return false;
858 // This is very strict, could allow shorter strings, but that creates
859 // garbage.
860 if (s.length() != len)
861 return false;
862 memcpy(const_cast<char *>(s.c_str()), str, len);
863 return true;
864 }
865 bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
866 bool MutateString(const std::string &str) { return MutateString(str.data(), str.length()); }
867
868private:
869 const uint8_t *Indirect() const { return flexbuffers::Indirect(data_, parent_width_); }
870
871 template <typename T>
872 bool Mutate(const uint8_t *dest, T t, size_t byte_width, BitWidth value_width)
873 {
874 auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= byte_width;
875 if (fits)
876 {
877 t = flatbuffers::EndianScalar(t);
878 memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
879 }
880 return fits;
881 }
882
883 template <typename T>
884 bool MutateF(const uint8_t *dest, T t, size_t byte_width, BitWidth value_width)
885 {
886 if (byte_width == sizeof(double))
887 return Mutate(dest, static_cast<double>(t), byte_width, value_width);
888 if (byte_width == sizeof(float))
889 return Mutate(dest, static_cast<float>(t), byte_width, value_width);
890 FLATBUFFERS_ASSERT(false);
891 return false;
892 }
893
894 const uint8_t *data_;
895 uint8_t parent_width_;
896 uint8_t byte_width_;
897 Type type_;
898};
899
900// Template specialization for As().
901template <> inline bool Reference::As<bool>() const { return AsBool(); }
902
903template <> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
904template <> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
905template <> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
906template <> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
907
908template <> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
909template <> inline uint16_t Reference::As<uint16_t>() const { return AsUInt16(); }
910template <> inline uint32_t Reference::As<uint32_t>() const { return AsUInt32(); }
911template <> inline uint64_t Reference::As<uint64_t>() const { return AsUInt64(); }
912
913template <> inline double Reference::As<double>() const { return AsDouble(); }
914template <> inline float Reference::As<float>() const { return AsFloat(); }
915
916template <> inline String Reference::As<String>() const { return AsString(); }
917template <> inline std::string Reference::As<std::string>() const { return AsString().str(); }
918
919template <> inline Blob Reference::As<Blob>() const { return AsBlob(); }
920template <> inline Vector Reference::As<Vector>() const { return AsVector(); }
921template <> inline TypedVector Reference::As<TypedVector>() const { return AsTypedVector(); }
922template <> inline FixedTypedVector Reference::As<FixedTypedVector>() const
923{
924 return AsFixedTypedVector();
925}
926template <> inline Map Reference::As<Map>() const { return AsMap(); }
927
928inline uint8_t PackedType(BitWidth bit_width, Type type)
929{
930 return static_cast<uint8_t>(bit_width | (type << 2));
931}
932
933inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
934
935// Vector accessors.
936// Note: if you try to access outside of bounds, you get a Null value back
937// instead. Normally this would be an assert, but since this is "dynamically
938// typed" data, you may not want that (someone sends you a 2d vector and you
939// wanted 3d).
940// The Null converts seamlessly into a default value for any other type.
941// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
942inline Reference Vector::operator[](size_t i) const
943{
944 auto len = size();
945 if (i >= len)
946 return Reference(nullptr, 1, NullPackedType());
947 auto packed_type = (data_ + len * byte_width_)[i];
948 auto elem = data_ + i * byte_width_;
949 return Reference(elem, byte_width_, packed_type);
950}
951
952inline Reference TypedVector::operator[](size_t i) const
953{
954 auto len = size();
955 if (i >= len)
956 return Reference(nullptr, 1, NullPackedType());
957 auto elem = data_ + i * byte_width_;
958 return Reference(elem, byte_width_, 1, type_);
959}
960
962{
963 if (i >= len_)
964 return Reference(nullptr, 1, NullPackedType());
965 auto elem = data_ + i * byte_width_;
966 return Reference(elem, byte_width_, 1, type_);
967}
968
969template <typename T> int KeyCompare(const void *key, const void *elem)
970{
971 auto str_elem =
972 reinterpret_cast<const char *>(Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
973 auto skey = reinterpret_cast<const char *>(key);
974 return strcmp(skey, str_elem);
975}
976
977inline Reference Map::operator[](const char *key) const
978{
979 auto keys = Keys();
980 // We can't pass keys.byte_width_ to the comparison function, so we have
981 // to pick the right one ahead of time.
982 int (*comp)(const void *, const void *) = nullptr;
983 switch (keys.byte_width_)
984 {
985 case 1:
986 comp = KeyCompare<uint8_t>;
987 break;
988 case 2:
989 comp = KeyCompare<uint16_t>;
990 break;
991 case 4:
992 comp = KeyCompare<uint32_t>;
993 break;
994 case 8:
995 comp = KeyCompare<uint64_t>;
996 break;
997 }
998 auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
999 if (!res)
1000 return Reference(nullptr, 1, NullPackedType());
1001 auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
1002 return (*static_cast<const Vector *>(this))[i];
1003}
1004
1005inline Reference Map::operator[](const std::string &key) const { return (*this)[key.c_str()]; }
1006
1007inline Reference GetRoot(const uint8_t *buffer, size_t size)
1008{
1009 // See Finish() below for the serialization counterpart of this.
1010 // The root starts at the end of the buffer, so we parse backwards from there.
1011 auto end = buffer + size;
1012 auto byte_width = *--end;
1013 auto packed_type = *--end;
1014 end -= byte_width; // The root data item.
1015 return Reference(end, byte_width, packed_type);
1016}
1017
1018inline Reference GetRoot(const std::vector<uint8_t> &buffer)
1019{
1020 return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
1021}
1022
1023// Flags that configure how the Builder behaves.
1024// The "Share" flags determine if the Builder automatically tries to pool
1025// this type. Pooling can reduce the size of serialized data if there are
1026// multiple maps of the same kind, at the expense of slightly slower
1027// serialization (the cost of lookups) and more memory use (std::set).
1028// By default this is on for keys, but off for strings.
1029// Turn keys off if you have e.g. only one map.
1030// Turn strings on if you expect many non-unique string values.
1031// Additionally, sharing key vectors can save space if you have maps with
1032// identical field populations.
1042
1044{
1045public:
1046 Builder(size_t initial_size = 256, BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
1047 : buf_(initial_size), finished_(false), has_duplicate_keys_(false), flags_(flags),
1048 force_min_bit_width_(BIT_WIDTH_8), key_pool(KeyOffsetCompare(buf_)),
1049 string_pool(StringOffsetCompare(buf_))
1050 {
1051 buf_.clear();
1052 }
1053
1054#ifdef FLATBUFFERS_DEFAULT_DECLARATION
1055 Builder(Builder &&) = default;
1056 Builder &operator=(Builder &&) = default;
1057#endif
1058
1061 const std::vector<uint8_t> &GetBuffer() const
1062 {
1063 Finished();
1064 return buf_;
1065 }
1066
1067 // Size of the buffer. Does not include unfinished values.
1068 size_t GetSize() const { return buf_.size(); }
1069
1070 // Reset all state so we can re-use the buffer.
1071 void Clear()
1072 {
1073 buf_.clear();
1074 stack_.clear();
1075 finished_ = false;
1076 // flags_ remains as-is;
1077 force_min_bit_width_ = BIT_WIDTH_8;
1078 key_pool.clear();
1079 string_pool.clear();
1080 }
1081
1082 // All value constructing functions below have two versions: one that
1083 // takes a key (for placement inside a map) and one that doesn't (for inside
1084 // vectors and elsewhere).
1085
1086 void Null() { stack_.push_back(Value()); }
1087 void Null(const char *key)
1088 {
1089 Key(key);
1090 Null();
1091 }
1092
1093 void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
1094 void Int(const char *key, int64_t i)
1095 {
1096 Key(key);
1097 Int(i);
1098 }
1099
1100 void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
1101 void UInt(const char *key, uint64_t u)
1102 {
1103 Key(key);
1104 UInt(u);
1105 }
1106
1107 void Float(float f) { stack_.push_back(Value(f)); }
1108 void Float(const char *key, float f)
1109 {
1110 Key(key);
1111 Float(f);
1112 }
1113
1114 void Double(double f) { stack_.push_back(Value(f)); }
1115 void Double(const char *key, double d)
1116 {
1117 Key(key);
1118 Double(d);
1119 }
1120
1121 void Bool(bool b) { stack_.push_back(Value(b)); }
1122 void Bool(const char *key, bool b)
1123 {
1124 Key(key);
1125 Bool(b);
1126 }
1127
1128 void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
1129 void IndirectInt(const char *key, int64_t i)
1130 {
1131 Key(key);
1132 IndirectInt(i);
1133 }
1134
1135 void IndirectUInt(uint64_t u) { PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); }
1136 void IndirectUInt(const char *key, uint64_t u)
1137 {
1138 Key(key);
1139 IndirectUInt(u);
1140 }
1141
1142 void IndirectFloat(float f) { PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); }
1143 void IndirectFloat(const char *key, float f)
1144 {
1145 Key(key);
1146 IndirectFloat(f);
1147 }
1148
1149 void IndirectDouble(double f) { PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); }
1150 void IndirectDouble(const char *key, double d)
1151 {
1152 Key(key);
1153 IndirectDouble(d);
1154 }
1155
1156 size_t Key(const char *str, size_t len)
1157 {
1158 auto sloc = buf_.size();
1159 WriteBytes(str, len + 1);
1160 if (flags_ & BUILDER_FLAG_SHARE_KEYS)
1161 {
1162 auto it = key_pool.find(sloc);
1163 if (it != key_pool.end())
1164 {
1165 // Already in the buffer. Remove key we just serialized, and use
1166 // existing offset instead.
1167 buf_.resize(sloc);
1168 sloc = *it;
1169 }
1170 else
1171 {
1172 key_pool.insert(sloc);
1173 }
1174 }
1175 stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
1176 return sloc;
1177 }
1178
1179 size_t Key(const char *str) { return Key(str, strlen(str)); }
1180 size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
1181
1182 size_t String(const char *str, size_t len)
1183 {
1184 auto reset_to = buf_.size();
1185 auto sloc = CreateBlob(str, len, 1, FBT_STRING);
1186 if (flags_ & BUILDER_FLAG_SHARE_STRINGS)
1187 {
1188 StringOffset so(sloc, len);
1189 auto it = string_pool.find(so);
1190 if (it != string_pool.end())
1191 {
1192 // Already in the buffer. Remove string we just serialized, and use
1193 // existing offset instead.
1194 buf_.resize(reset_to);
1195 sloc = it->first;
1196 stack_.back().u_ = sloc;
1197 }
1198 else
1199 {
1200 string_pool.insert(so);
1201 }
1202 }
1203 return sloc;
1204 }
1205 size_t String(const char *str) { return String(str, strlen(str)); }
1206 size_t String(const std::string &str) { return String(str.c_str(), str.size()); }
1207 void String(const flexbuffers::String &str) { String(str.c_str(), str.length()); }
1208
1209 void String(const char *key, const char *str)
1210 {
1211 Key(key);
1212 String(str);
1213 }
1214 void String(const char *key, const std::string &str)
1215 {
1216 Key(key);
1217 String(str);
1218 }
1219 void String(const char *key, const flexbuffers::String &str)
1220 {
1221 Key(key);
1222 String(str);
1223 }
1224
1225 size_t Blob(const void *data, size_t len) { return CreateBlob(data, len, 0, FBT_BLOB); }
1226 size_t Blob(const std::vector<uint8_t> &v)
1227 {
1228 return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
1229 }
1230
1231 // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
1232 // e.g. Vector etc. Also in overloaded versions.
1233 // Also some FlatBuffers types?
1234
1235 size_t StartVector() { return stack_.size(); }
1236 size_t StartVector(const char *key)
1237 {
1238 Key(key);
1239 return stack_.size();
1240 }
1241 size_t StartMap() { return stack_.size(); }
1242 size_t StartMap(const char *key)
1243 {
1244 Key(key);
1245 return stack_.size();
1246 }
1247
1248 // TODO(wvo): allow this to specify an aligment greater than the natural
1249 // alignment.
1250 size_t EndVector(size_t start, bool typed, bool fixed)
1251 {
1252 auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
1253 // Remove temp elements and return vector.
1254 stack_.resize(start);
1255 stack_.push_back(vec);
1256 return static_cast<size_t>(vec.u_);
1257 }
1258
1259 size_t EndMap(size_t start)
1260 {
1261 // We should have interleaved keys and values on the stack.
1262 // Make sure it is an even number:
1263 auto len = stack_.size() - start;
1264 FLATBUFFERS_ASSERT(!(len & 1));
1265 len /= 2;
1266 // Make sure keys are all strings:
1267 for (auto key = start; key < stack_.size(); key += 2)
1268 {
1269 FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
1270 }
1271 // Now sort values, so later we can do a binary search lookup.
1272 // We want to sort 2 array elements at a time.
1273 struct TwoValue
1274 {
1275 Value key;
1276 Value val;
1277 };
1278 // TODO(wvo): strict aliasing?
1279 // TODO(wvo): allow the caller to indicate the data is already sorted
1280 // for maximum efficiency? With an assert to check sortedness to make sure
1281 // we're not breaking binary search.
1282 // Or, we can track if the map is sorted as keys are added which would be
1283 // be quite cheap (cheaper than checking it here), so we can skip this
1284 // step automatically when appliccable, and encourage people to write in
1285 // sorted fashion.
1286 // std::sort is typically already a lot faster on sorted data though.
1287 auto dict = reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
1288 std::sort(dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool {
1289 auto as = reinterpret_cast<const char *>(flatbuffers::vector_data(buf_) + a.key.u_);
1290 auto bs = reinterpret_cast<const char *>(flatbuffers::vector_data(buf_) + b.key.u_);
1291 auto comp = strcmp(as, bs);
1292 // We want to disallow duplicate keys, since this results in a
1293 // map where values cannot be found.
1294 // But we can't assert here (since we don't want to fail on
1295 // random JSON input) or have an error mechanism.
1296 // Instead, we set has_duplicate_keys_ in the builder to
1297 // signal this.
1298 // TODO: Have to check for pointer equality, as some sort
1299 // implementation apparently call this function with the same
1300 // element?? Why?
1301 if (!comp && &a != &b)
1302 has_duplicate_keys_ = true;
1303 return comp < 0;
1304 });
1305 // First create a vector out of all keys.
1306 // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
1307 // the first vector.
1308 auto keys = CreateVector(start, len, 2, true, false);
1309 auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
1310 // Remove temp elements and return map.
1311 stack_.resize(start);
1312 stack_.push_back(vec);
1313 return static_cast<size_t>(vec.u_);
1314 }
1315
1316 // Call this after EndMap to see if the map had any duplicate keys.
1317 // Any map with such keys won't be able to retrieve all values.
1318 bool HasDuplicateKeys() const { return has_duplicate_keys_; }
1319
1320 template <typename F> size_t Vector(F f)
1321 {
1322 auto start = StartVector();
1323 f();
1324 return EndVector(start, false, false);
1325 }
1326 template <typename F, typename T> size_t Vector(F f, T &state)
1327 {
1328 auto start = StartVector();
1329 f(state);
1330 return EndVector(start, false, false);
1331 }
1332 template <typename F> size_t Vector(const char *key, F f)
1333 {
1334 auto start = StartVector(key);
1335 f();
1336 return EndVector(start, false, false);
1337 }
1338 template <typename F, typename T> size_t Vector(const char *key, F f, T &state)
1339 {
1340 auto start = StartVector(key);
1341 f(state);
1342 return EndVector(start, false, false);
1343 }
1344
1345 template <typename T> void Vector(const T *elems, size_t len)
1346 {
1348 {
1349 // This path should be a lot quicker and use less space.
1350 ScalarVector(elems, len, false);
1351 }
1352 else
1353 {
1354 auto start = StartVector();
1355 for (size_t i = 0; i < len; i++)
1356 Add(elems[i]);
1357 EndVector(start, false, false);
1358 }
1359 }
1360 template <typename T> void Vector(const char *key, const T *elems, size_t len)
1361 {
1362 Key(key);
1363 Vector(elems, len);
1364 }
1365 template <typename T> void Vector(const std::vector<T> &vec)
1366 {
1367 Vector(flatbuffers::vector_data(vec), vec.size());
1368 }
1369
1370 template <typename F> size_t TypedVector(F f)
1371 {
1372 auto start = StartVector();
1373 f();
1374 return EndVector(start, true, false);
1375 }
1376 template <typename F, typename T> size_t TypedVector(F f, T &state)
1377 {
1378 auto start = StartVector();
1379 f(state);
1380 return EndVector(start, true, false);
1381 }
1382 template <typename F> size_t TypedVector(const char *key, F f)
1383 {
1384 auto start = StartVector(key);
1385 f();
1386 return EndVector(start, true, false);
1387 }
1388 template <typename F, typename T> size_t TypedVector(const char *key, F f, T &state)
1389 {
1390 auto start = StartVector(key);
1391 f(state);
1392 return EndVector(start, true, false);
1393 }
1394
1395 template <typename T> size_t FixedTypedVector(const T *elems, size_t len)
1396 {
1397 // We only support a few fixed vector lengths. Anything bigger use a
1398 // regular typed vector.
1399 FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
1400 // And only scalar values.
1401 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1402 return ScalarVector(elems, len, true);
1403 }
1404
1405 template <typename T> size_t FixedTypedVector(const char *key, const T *elems, size_t len)
1406 {
1407 Key(key);
1408 return FixedTypedVector(elems, len);
1409 }
1410
1411 template <typename F> size_t Map(F f)
1412 {
1413 auto start = StartMap();
1414 f();
1415 return EndMap(start);
1416 }
1417 template <typename F, typename T> size_t Map(F f, T &state)
1418 {
1419 auto start = StartMap();
1420 f(state);
1421 return EndMap(start);
1422 }
1423 template <typename F> size_t Map(const char *key, F f)
1424 {
1425 auto start = StartMap(key);
1426 f();
1427 return EndMap(start);
1428 }
1429 template <typename F, typename T> size_t Map(const char *key, F f, T &state)
1430 {
1431 auto start = StartMap(key);
1432 f(state);
1433 return EndMap(start);
1434 }
1435 template <typename T> void Map(const std::map<std::string, T> &map)
1436 {
1437 auto start = StartMap();
1438 for (auto it = map.begin(); it != map.end(); ++it)
1439 Add(it->first.c_str(), it->second);
1440 EndMap(start);
1441 }
1442
1443 // If you wish to share a value explicitly (a value not shared automatically
1444 // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
1445 // functions. Or if you wish to turn those flags off for performance reasons
1446 // and still do some explicit sharing. For example:
1447 // builder.IndirectDouble(M_PI);
1448 // auto id = builder.LastValue(); // Remember where we stored it.
1449 // .. more code goes here ..
1450 // builder.ReuseValue(id); // Refers to same double by offset.
1451 // LastValue works regardless of whether the value has a key or not.
1452 // Works on any data type.
1453 struct Value;
1454 Value LastValue() { return stack_.back(); }
1455 void ReuseValue(Value v) { stack_.push_back(v); }
1456 void ReuseValue(const char *key, Value v)
1457 {
1458 Key(key);
1459 ReuseValue(v);
1460 }
1461
1462 // Overloaded Add that tries to call the correct function above.
1463 void Add(int8_t i) { Int(i); }
1464 void Add(int16_t i) { Int(i); }
1465 void Add(int32_t i) { Int(i); }
1466 void Add(int64_t i) { Int(i); }
1467 void Add(uint8_t u) { UInt(u); }
1468 void Add(uint16_t u) { UInt(u); }
1469 void Add(uint32_t u) { UInt(u); }
1470 void Add(uint64_t u) { UInt(u); }
1471 void Add(float f) { Float(f); }
1472 void Add(double d) { Double(d); }
1473 void Add(bool b) { Bool(b); }
1474 void Add(const char *str) { String(str); }
1475 void Add(const std::string &str) { String(str); }
1476 void Add(const flexbuffers::String &str) { String(str); }
1477
1478 template <typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
1479
1480 template <typename T> void Add(const char *key, const T &t)
1481 {
1482 Key(key);
1483 Add(t);
1484 }
1485
1486 template <typename T> void Add(const std::map<std::string, T> &map) { Map(map); }
1487
1488 template <typename T> void operator+=(const T &t) { Add(t); }
1489
1490 // This function is useful in combination with the Mutate* functions above.
1491 // It forces elements of vectors and maps to have a minimum size, such that
1492 // they can later be updated without failing.
1493 // Call with no arguments to reset.
1494 void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { force_min_bit_width_ = bw; }
1495
1496 void Finish()
1497 {
1498 // If you hit this assert, you likely have objects that were never included
1499 // in a parent. You need to have exactly one root to finish a buffer.
1500 // Check your Start/End calls are matched, and all objects are inside
1501 // some other object.
1502 FLATBUFFERS_ASSERT(stack_.size() == 1);
1503
1504 // Write root value.
1505 auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
1506 WriteAny(stack_[0], byte_width);
1507 // Write root type.
1508 Write(stack_[0].StoredPackedType(), 1);
1509 // Write root size. Normally determined by parent, but root has no parent :)
1510 Write(byte_width, 1);
1511
1512 finished_ = true;
1513 }
1514
1515private:
1516 void Finished() const
1517 {
1518 // If you get this assert, you're attempting to get access a buffer
1519 // which hasn't been finished yet. Be sure to call
1520 // Builder::Finish with your root object.
1521 FLATBUFFERS_ASSERT(finished_);
1522 }
1523
1524 // Align to prepare for writing a scalar with a certain size.
1525 uint8_t Align(BitWidth alignment)
1526 {
1527 auto byte_width = 1U << alignment;
1528 buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), 0);
1529 return static_cast<uint8_t>(byte_width);
1530 }
1531
1532 void WriteBytes(const void *val, size_t size)
1533 {
1534 buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
1535 reinterpret_cast<const uint8_t *>(val) + size);
1536 }
1537
1538 template <typename T> void Write(T val, size_t byte_width)
1539 {
1540 FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
1541 val = flatbuffers::EndianScalar(val);
1542 WriteBytes(&val, byte_width);
1543 }
1544
1545 void WriteDouble(double f, uint8_t byte_width)
1546 {
1547 switch (byte_width)
1548 {
1549 case 8:
1550 Write(f, byte_width);
1551 break;
1552 case 4:
1553 Write(static_cast<float>(f), byte_width);
1554 break;
1555 // case 2: Write(static_cast<half>(f), byte_width); break;
1556 // case 1: Write(static_cast<quarter>(f), byte_width); break;
1557 default:
1559 }
1560 }
1561
1562 void WriteOffset(uint64_t o, uint8_t byte_width)
1563 {
1564 auto reloff = buf_.size() - o;
1565 FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
1566 Write(reloff, byte_width);
1567 }
1568
1569 template <typename T> void PushIndirect(T val, Type type, BitWidth bit_width)
1570 {
1571 auto byte_width = Align(bit_width);
1572 auto iloc = buf_.size();
1573 Write(val, byte_width);
1574 stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
1575 }
1576
1577 static BitWidth WidthB(size_t byte_width)
1578 {
1579 switch (byte_width)
1580 {
1581 case 1:
1582 return BIT_WIDTH_8;
1583 case 2:
1584 return BIT_WIDTH_16;
1585 case 4:
1586 return BIT_WIDTH_32;
1587 case 8:
1588 return BIT_WIDTH_64;
1589 default:
1590 FLATBUFFERS_ASSERT(false);
1591 return BIT_WIDTH_64;
1592 }
1593 }
1594
1595 template <typename T> static Type GetScalarType()
1596 {
1597 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1599 ? FBT_FLOAT
1601 ? FBT_BOOL
1603 }
1604
1605public:
1606 // This was really intended to be private, except for LastValue/ReuseValue.
1607 struct Value
1608 {
1609 union {
1610 int64_t i_;
1611 uint64_t u_;
1612 double f_;
1613 };
1614
1616
1617 // For scalars: of itself, for vector: of its elements, for string: length.
1619
1620 Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
1621
1622 Value(bool b) : u_(static_cast<uint64_t>(b)), type_(FBT_BOOL), min_bit_width_(BIT_WIDTH_8) {}
1623
1624 Value(int64_t i, Type t, BitWidth bw) : i_(i), type_(t), min_bit_width_(bw) {}
1625 Value(uint64_t u, Type t, BitWidth bw) : u_(u), type_(t), min_bit_width_(bw) {}
1626
1627 Value(float f) : f_(static_cast<double>(f)), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {}
1628 Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
1629
1630 uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const
1631 {
1632 return PackedType(StoredWidth(parent_bit_width_), type_);
1633 }
1634
1635 BitWidth ElemWidth(size_t buf_size, size_t elem_index) const
1636 {
1637 if (IsInline(type_))
1638 {
1639 return min_bit_width_;
1640 }
1641 else
1642 {
1643 // We have an absolute offset, but want to store a relative offset
1644 // elem_index elements beyond the current buffer end. Since whether
1645 // the relative offset fits in a certain byte_width depends on
1646 // the size of the elements before it (and their alignment), we have
1647 // to test for each size in turn.
1648 for (size_t byte_width = 1; byte_width <= sizeof(flatbuffers::largest_scalar_t);
1649 byte_width *= 2)
1650 {
1651 // Where are we going to write this offset?
1652 auto offset_loc =
1653 buf_size + flatbuffers::PaddingBytes(buf_size, byte_width) + elem_index * byte_width;
1654 // Compute relative offset.
1655 auto offset = offset_loc - u_;
1656 // Does it fit?
1657 auto bit_width = WidthU(offset);
1658 if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == byte_width)
1659 return bit_width;
1660 }
1661 FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
1662 return BIT_WIDTH_64;
1663 }
1664 }
1665
1666 BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const
1667 {
1668 if (IsInline(type_))
1669 {
1670 return (std::max)(min_bit_width_, parent_bit_width_);
1671 }
1672 else
1673 {
1674 return min_bit_width_;
1675 }
1676 }
1677 };
1678
1679private:
1680 void WriteAny(const Value &val, uint8_t byte_width)
1681 {
1682 switch (val.type_)
1683 {
1684 case FBT_NULL:
1685 case FBT_INT:
1686 Write(val.i_, byte_width);
1687 break;
1688 case FBT_BOOL:
1689 case FBT_UINT:
1690 Write(val.u_, byte_width);
1691 break;
1692 case FBT_FLOAT:
1693 WriteDouble(val.f_, byte_width);
1694 break;
1695 default:
1696 WriteOffset(val.u_, byte_width);
1697 break;
1698 }
1699 }
1700
1701 size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type)
1702 {
1703 auto bit_width = WidthU(len);
1704 auto byte_width = Align(bit_width);
1705 Write<uint64_t>(len, byte_width);
1706 auto sloc = buf_.size();
1707 WriteBytes(data, len + trailing);
1708 stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
1709 return sloc;
1710 }
1711
1712 template <typename T> size_t ScalarVector(const T *elems, size_t len, bool fixed)
1713 {
1714 auto vector_type = GetScalarType<T>();
1715 auto byte_width = sizeof(T);
1716 auto bit_width = WidthB(byte_width);
1717 // If you get this assert, you're trying to write a vector with a size
1718 // field that is bigger than the scalars you're trying to write (e.g. a
1719 // byte vector > 255 elements). For such types, write a "blob" instead.
1720 // TODO: instead of asserting, could write vector with larger elements
1721 // instead, though that would be wasteful.
1722 FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
1723 Align(bit_width);
1724 if (!fixed)
1725 Write<uint64_t>(len, byte_width);
1726 auto vloc = buf_.size();
1727 for (size_t i = 0; i < len; i++)
1728 Write(elems[i], byte_width);
1729 stack_.push_back(
1730 Value(static_cast<uint64_t>(vloc), ToTypedVector(vector_type, fixed ? len : 0), bit_width));
1731 return vloc;
1732 }
1733
1734 Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, bool fixed,
1735 const Value *keys = nullptr)
1736 {
1737 FLATBUFFERS_ASSERT(!fixed || typed); // typed=false, fixed=true combination is not supported.
1738 // Figure out smallest bit width we can store this vector with.
1739 auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
1740 auto prefix_elems = 1;
1741 if (keys)
1742 {
1743 // If this vector is part of a map, we will pre-fix an offset to the keys
1744 // to this vector.
1745 bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
1746 prefix_elems += 2;
1747 }
1748 Type vector_type = FBT_KEY;
1749 // Check bit widths and types for all elements.
1750 for (size_t i = start; i < stack_.size(); i += step)
1751 {
1752 auto elem_width = stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
1753 bit_width = (std::max)(bit_width, elem_width);
1754 if (typed)
1755 {
1756 if (i == start)
1757 {
1758 vector_type = stack_[i].type_;
1759 }
1760 else
1761 {
1762 // If you get this assert, you are writing a typed vector with
1763 // elements that are not all the same type.
1764 FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
1765 }
1766 }
1767 }
1768 // If you get this assert, your fixed types are not one of:
1769 // Int / UInt / Float / Key.
1770 FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
1771 auto byte_width = Align(bit_width);
1772 // Write vector. First the keys width/offset if available, and size.
1773 if (keys)
1774 {
1775 WriteOffset(keys->u_, byte_width);
1776 Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
1777 }
1778 if (!fixed)
1779 Write<uint64_t>(vec_len, byte_width);
1780 // Then the actual data.
1781 auto vloc = buf_.size();
1782 for (size_t i = start; i < stack_.size(); i += step)
1783 {
1784 WriteAny(stack_[i], byte_width);
1785 }
1786 // Then the types.
1787 if (!typed)
1788 {
1789 for (size_t i = start; i < stack_.size(); i += step)
1790 {
1791 buf_.push_back(stack_[i].StoredPackedType(bit_width));
1792 }
1793 }
1794 return Value(static_cast<uint64_t>(vloc),
1795 keys ? FBT_MAP
1796 : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) : FBT_VECTOR),
1797 bit_width);
1798 }
1799
1800 // You shouldn't really be copying instances of this class.
1801 Builder(const Builder &);
1802 Builder &operator=(const Builder &);
1803
1804 std::vector<uint8_t> buf_;
1805 std::vector<Value> stack_;
1806
1807 bool finished_;
1808 bool has_duplicate_keys_;
1809
1810 BuilderFlag flags_;
1811
1812 BitWidth force_min_bit_width_;
1813
1814 struct KeyOffsetCompare
1815 {
1816 explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
1817 bool operator()(size_t a, size_t b) const
1818 {
1819 auto stra = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
1820 auto strb = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
1821 return strcmp(stra, strb) < 0;
1822 }
1823 const std::vector<uint8_t> *buf_;
1824 };
1825
1826 typedef std::pair<size_t, size_t> StringOffset;
1827 struct StringOffsetCompare
1828 {
1829 explicit StringOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
1830 bool operator()(const StringOffset &a, const StringOffset &b) const
1831 {
1832 auto stra = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a.first);
1833 auto strb = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b.first);
1834 return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
1835 }
1836 const std::vector<uint8_t> *buf_;
1837 };
1838
1839 typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
1840 typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
1841
1842 KeyOffsetMap key_pool;
1843 StringOffsetMap string_pool;
1844};
1845
1846} // namespace flexbuffers
1847
1848#if defined(_MSC_VER)
1849#pragma warning(pop)
1850#endif
1851
1852#endif // FLATBUFFERS_FLEXBUFFERS_H_
void Add(const float *input1_data, const Dims< 4 > &input1_dims, const float *input2_data, const Dims< 4 > &input2_dims, float *output_data, const Dims< 4 > &output_dims)
Definition Add.float.cpp:28
#define FLATBUFFERS_ASSERT
Definition base.h:37
Blob(const uint8_t *data_buf, uint8_t byte_width)
static Blob EmptyBlob()
const uint8_t * data() const
bool IsTheEmptyBlob() const
size_t StartMap(const char *key)
void IndirectFloat(const char *key, float f)
void ReuseValue(const char *key, Value v)
size_t TypedVector(const char *key, F f)
size_t Map(const char *key, F f)
size_t Vector(const char *key, F f)
void Vector(const std::vector< T > &vec)
void Float(const char *key, float f)
void String(const flexbuffers::String &str)
size_t Key(const std::string &str)
void IndirectInt(const char *key, int64_t i)
size_t Map(const char *key, F f, T &state)
void ForceMinimumBitWidth(BitWidth bw=BIT_WIDTH_8)
void String(const char *key, const flexbuffers::String &str)
void Int(const char *key, int64_t i)
void Bool(const char *key, bool b)
void Add(const flexbuffers::String &str)
size_t StartVector(const char *key)
void Add(const std::vector< T > &vec)
void Vector(const T *elems, size_t len)
void Map(const std::map< std::string, T > &map)
size_t String(const char *str, size_t len)
size_t Key(const char *str, size_t len)
size_t TypedVector(const char *key, F f, T &state)
void UInt(const char *key, uint64_t u)
void IndirectUInt(const char *key, uint64_t u)
void Double(const char *key, double d)
void String(const char *key, const std::string &str)
void Add(const std::string &str)
size_t EndVector(size_t start, bool typed, bool fixed)
size_t String(const std::string &str)
void Add(const char *key, const T &t)
void IndirectDouble(const char *key, double d)
void Vector(const char *key, const T *elems, size_t len)
size_t Blob(const std::vector< uint8_t > &v)
Builder(size_t initial_size=256, BuilderFlag flags=BUILDER_FLAG_SHARE_KEYS)
size_t Blob(const void *data, size_t len)
size_t Vector(const char *key, F f, T &state)
void Add(const std::map< std::string, T > &map)
size_t FixedTypedVector(const T *elems, size_t len)
const std::vector< uint8_t > & GetBuffer() const
Get the serialized buffer (after you call Finish()).
size_t FixedTypedVector(const char *key, const T *elems, size_t len)
void String(const char *key, const char *str)
Reference operator[](size_t i) const
static FixedTypedVector EmptyFixedTypedVector()
FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, uint8_t len)
bool IsTheEmptyFixedTypedVector() const
Map(const uint8_t *data, uint8_t byte_width)
TypedVector Keys() const
static Map EmptyMap()
bool IsTheEmptyMap() const
Reference operator[](const char *key) const
Vector Values() const
const uint8_t * data_
Object(const uint8_t *data, uint8_t byte_width)
int64_t AsInt64() const
float AsFloat() const
bool MutateString(const char *str, size_t len)
String AsString() const
bool MutateString(const std::string &str)
uint8_t AsUInt8() const
std::string ToString() const
double AsDouble() const
bool MutateFloat(float f)
bool MutateFloat(double d)
int8_t AsInt8() const
Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, Type type)
TypedVector AsTypedVector() const
Vector AsVector() const
void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const
uint32_t AsUInt32() const
bool MutateUInt(uint64_t u)
const char * AsKey() const
bool IsFixedTypedVector() const
uint16_t AsUInt16() const
int32_t AsInt32() const
FixedTypedVector AsFixedTypedVector() const
bool IsAnyVector() const
uint64_t AsUInt64() const
bool IsIntOrUint() const
bool MutateInt(int64_t i)
bool MutateBool(bool b)
int16_t AsInt16() const
bool IsTypedVector() const
bool MutateString(const char *str)
Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
bool IsUntypedVector() const
Sized(const uint8_t *data, uint8_t byte_width)
Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
size_t read_size() const
size_t size() const
String(const uint8_t *data, uint8_t byte_width, size_t sz)
bool IsTheEmptyString() const
size_t length() const
std::string str() const
String(const uint8_t *data, uint8_t byte_width)
static String EmptyString()
const char * c_str() const
Reference operator[](size_t i) const
static TypedVector EmptyTypedVector()
bool IsTheEmptyVector() const
TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
static Vector EmptyVector()
Vector(const uint8_t *data, uint8_t byte_width)
Reference operator[](size_t i) const
bool IsTheEmptyVector() const
#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width)
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
std::string NumToString(T t)
Definition util.h:139
int64_t StringToInt(const char *s, int base=10)
Definition util.h:435
bool EscapeString(const char *s, size_t length, std::string *_text, bool allow_non_utf8, bool natural_utf8)
Definition util.h:652
bool StringToNumber(const char *s, T *val)
Definition util.h:363
T * vector_data(std::vector< T > &vector)
uint64_t StringToUInt(const char *s, int base=10)
Definition util.h:441
uint8_t NullPackedType()
BitWidth WidthI(int64_t i)
@ BUILDER_FLAG_SHARE_KEY_VECTORS
@ BUILDER_FLAG_SHARE_KEYS_AND_STRINGS
@ BUILDER_FLAG_SHARE_KEYS
@ BUILDER_FLAG_SHARE_STRINGS
Reference GetRoot(const uint8_t *buffer, size_t size)
int8_t quarter
double ReadDouble(const uint8_t *data, uint8_t byte_width)
int16_t half
int KeyCompare(const void *key, const void *elem)
BitWidth WidthF(double f)
Type ToTypedVector(Type t, size_t fixed_len=0)
bool IsTypedVectorElementType(Type t)
Definition flexbuffers.h:91
Type ToFixedTypedVectorElementType(Type t, uint8_t *len)
Type ToTypedVectorElementType(Type t)
uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width)
bool IsTypedVector(Type t)
Definition flexbuffers.h:96
@ FBT_VECTOR_STRING_DEPRECATED
Definition flexbuffers.h:74
int64_t ReadInt64(const uint8_t *data, uint8_t byte_width)
bool IsInline(Type t)
Definition flexbuffers.h:89
bool IsFixedTypedVector(Type t)
R ReadSizedScalar(const uint8_t *data, uint8_t byte_width)
void AppendToString(std::string &s, T &&v, bool keys_quoted)
uint8_t PackedType(BitWidth bit_width, Type type)
const uint8_t * Indirect(const uint8_t *offset, uint8_t byte_width)
BitWidth WidthU(uint64_t u)
int32_t size[5]
Definition Slice.cpp:35
BitWidth ElemWidth(size_t buf_size, size_t elem_index) const
BitWidth StoredWidth(BitWidth parent_bit_width_=BIT_WIDTH_8) const
Value(uint64_t u, Type t, BitWidth bw)
uint8_t StoredPackedType(BitWidth parent_bit_width_=BIT_WIDTH_8) const
Value(int64_t i, Type t, BitWidth bw)