1
0

reflection.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * Copyright 2015 Google Inc. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "flatbuffers/reflection.h"
  17. #include "flatbuffers/util.h"
  18. // Helper functionality for reflection.
  19. namespace flatbuffers {
  20. int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data) {
  21. # define FLATBUFFERS_GET(T) static_cast<int64_t>(ReadScalar<T>(data))
  22. switch (type) {
  23. case reflection::UType:
  24. case reflection::Bool:
  25. case reflection::UByte: return FLATBUFFERS_GET(uint8_t);
  26. case reflection::Byte: return FLATBUFFERS_GET(int8_t);
  27. case reflection::Short: return FLATBUFFERS_GET(int16_t);
  28. case reflection::UShort: return FLATBUFFERS_GET(uint16_t);
  29. case reflection::Int: return FLATBUFFERS_GET(int32_t);
  30. case reflection::UInt: return FLATBUFFERS_GET(uint32_t);
  31. case reflection::Long: return FLATBUFFERS_GET(int64_t);
  32. case reflection::ULong: return FLATBUFFERS_GET(uint64_t);
  33. case reflection::Float: return FLATBUFFERS_GET(float);
  34. case reflection::Double: return FLATBUFFERS_GET(double);
  35. case reflection::String: {
  36. auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
  37. data);
  38. return s ? StringToInt(s->c_str()) : 0;
  39. }
  40. default: return 0; // Tables & vectors do not make sense.
  41. }
  42. # undef FLATBUFFERS_GET
  43. }
  44. double GetAnyValueF(reflection::BaseType type, const uint8_t *data) {
  45. switch (type) {
  46. case reflection::Float: return static_cast<double>(ReadScalar<float>(data));
  47. case reflection::Double: return ReadScalar<double>(data);
  48. case reflection::String: {
  49. auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
  50. data);
  51. return s ? strtod(s->c_str(), nullptr) : 0.0;
  52. }
  53. default: return static_cast<double>(GetAnyValueI(type, data));
  54. }
  55. }
  56. std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
  57. const reflection::Schema *schema, int type_index) {
  58. switch (type) {
  59. case reflection::Float:
  60. case reflection::Double: return NumToString(GetAnyValueF(type, data));
  61. case reflection::String: {
  62. auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
  63. data);
  64. return s ? s->c_str() : "";
  65. }
  66. case reflection::Obj:
  67. if (schema) {
  68. // Convert the table to a string. This is mostly for debugging purposes,
  69. // and does NOT promise to be JSON compliant.
  70. // Also prefixes the type.
  71. auto &objectdef = *schema->objects()->Get(type_index);
  72. auto s = objectdef.name()->str();
  73. if (objectdef.is_struct()) {
  74. s += "(struct)"; // TODO: implement this as well.
  75. } else {
  76. auto table_field = reinterpret_cast<const Table *>(
  77. ReadScalar<uoffset_t>(data) + data);
  78. s += " { ";
  79. auto fielddefs = objectdef.fields();
  80. for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
  81. auto &fielddef = **it;
  82. if (!table_field->CheckField(fielddef.offset())) continue;
  83. auto val = GetAnyFieldS(*table_field, fielddef, schema);
  84. if (fielddef.type()->base_type() == reflection::String)
  85. val = "\"" + val + "\""; // Doesn't deal with escape codes etc.
  86. s += fielddef.name()->str();
  87. s += ": ";
  88. s += val;
  89. s += ", ";
  90. }
  91. s += "}";
  92. }
  93. return s;
  94. } else {
  95. return "(table)";
  96. }
  97. case reflection::Vector:
  98. return "[(elements)]"; // TODO: implement this as well.
  99. case reflection::Union:
  100. return "(union)"; // TODO: implement this as well.
  101. default: return NumToString(GetAnyValueI(type, data));
  102. }
  103. }
  104. void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val) {
  105. # define FLATBUFFERS_SET(T) WriteScalar(data, static_cast<T>(val))
  106. switch (type) {
  107. case reflection::UType:
  108. case reflection::Bool:
  109. case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break;
  110. case reflection::Byte: FLATBUFFERS_SET(int8_t ); break;
  111. case reflection::Short: FLATBUFFERS_SET(int16_t ); break;
  112. case reflection::UShort: FLATBUFFERS_SET(uint16_t); break;
  113. case reflection::Int: FLATBUFFERS_SET(int32_t ); break;
  114. case reflection::UInt: FLATBUFFERS_SET(uint32_t); break;
  115. case reflection::Long: FLATBUFFERS_SET(int64_t ); break;
  116. case reflection::ULong: FLATBUFFERS_SET(uint64_t); break;
  117. case reflection::Float: FLATBUFFERS_SET(float ); break;
  118. case reflection::Double: FLATBUFFERS_SET(double ); break;
  119. // TODO: support strings
  120. default: break;
  121. }
  122. # undef FLATBUFFERS_SET
  123. }
  124. void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val) {
  125. switch (type) {
  126. case reflection::Float: WriteScalar(data, static_cast<float>(val)); break;
  127. case reflection::Double: WriteScalar(data, val); break;
  128. // TODO: support strings.
  129. default: SetAnyValueI(type, data, static_cast<int64_t>(val)); break;
  130. }
  131. }
  132. void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val) {
  133. switch (type) {
  134. case reflection::Float:
  135. case reflection::Double:
  136. SetAnyValueF(type, data, strtod(val, nullptr));
  137. break;
  138. // TODO: support strings.
  139. default: SetAnyValueI(type, data, StringToInt(val)); break;
  140. }
  141. }
  142. // Resize a FlatBuffer in-place by iterating through all offsets in the buffer
  143. // and adjusting them by "delta" if they straddle the start offset.
  144. // Once that is done, bytes can now be inserted/deleted safely.
  145. // "delta" may be negative (shrinking).
  146. // Unless "delta" is a multiple of the largest alignment, you'll create a small
  147. // amount of garbage space in the buffer (usually 0..7 bytes).
  148. // If your FlatBuffer's root table is not the schema's root table, you should
  149. // pass in your root_table type as well.
  150. class ResizeContext {
  151. public:
  152. ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta,
  153. std::vector<uint8_t> *flatbuf,
  154. const reflection::Object *root_table = nullptr)
  155. : schema_(schema), startptr_(flatbuf->data() + start),
  156. delta_(delta), buf_(*flatbuf),
  157. dag_check_(flatbuf->size() / sizeof(uoffset_t), false) {
  158. auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1);
  159. delta_ = (delta_ + mask) & ~mask;
  160. if (!delta_) return; // We can't shrink by less than largest_scalar_t.
  161. // Now change all the offsets by delta_.
  162. auto root = GetAnyRoot(buf_.data());
  163. Straddle<uoffset_t, 1>(buf_.data(), root, buf_.data());
  164. ResizeTable(root_table ? *root_table : *schema.root_table(), root);
  165. // We can now add or remove bytes at start.
  166. if (delta_ > 0) buf_.insert(buf_.begin() + start, delta_, 0);
  167. else buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_);
  168. }
  169. // Check if the range between first (lower address) and second straddles
  170. // the insertion point. If it does, change the offset at offsetloc (of
  171. // type T, with direction D).
  172. template<typename T, int D> void Straddle(const void *first,
  173. const void *second,
  174. void *offsetloc) {
  175. if (first <= startptr_ && second >= startptr_) {
  176. WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D);
  177. DagCheck(offsetloc) = true;
  178. }
  179. }
  180. // This returns a boolean that records if the corresponding offset location
  181. // has been modified already. If so, we can't even read the corresponding
  182. // offset, since it is pointing to a location that is illegal until the
  183. // resize actually happens.
  184. // This must be checked for every offset, since we can't know which offsets
  185. // will straddle and which won't.
  186. uint8_t &DagCheck(const void *offsetloc) {
  187. auto dag_idx = reinterpret_cast<const uoffset_t *>(offsetloc) -
  188. reinterpret_cast<const uoffset_t *>(buf_.data());
  189. return dag_check_[dag_idx];
  190. }
  191. void ResizeTable(const reflection::Object &objectdef, Table *table) {
  192. if (DagCheck(table))
  193. return; // Table already visited.
  194. auto vtable = table->GetVTable();
  195. // Early out: since all fields inside the table must point forwards in
  196. // memory, if the insertion point is before the table we can stop here.
  197. auto tableloc = reinterpret_cast<uint8_t *>(table);
  198. if (startptr_ <= tableloc) {
  199. // Check if insertion point is between the table and a vtable that
  200. // precedes it. This can't happen in current construction code, but check
  201. // just in case we ever change the way flatbuffers are built.
  202. Straddle<soffset_t, -1>(vtable, table, table);
  203. } else {
  204. // Check each field.
  205. auto fielddefs = objectdef.fields();
  206. for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
  207. auto &fielddef = **it;
  208. auto base_type = fielddef.type()->base_type();
  209. // Ignore scalars.
  210. if (base_type <= reflection::Double) continue;
  211. // Ignore fields that are not stored.
  212. auto offset = table->GetOptionalFieldOffset(fielddef.offset());
  213. if (!offset) continue;
  214. // Ignore structs.
  215. auto subobjectdef = base_type == reflection::Obj ?
  216. schema_.objects()->Get(fielddef.type()->index()) : nullptr;
  217. if (subobjectdef && subobjectdef->is_struct()) continue;
  218. // Get this fields' offset, and read it if safe.
  219. auto offsetloc = tableloc + offset;
  220. if (DagCheck(offsetloc))
  221. continue; // This offset already visited.
  222. auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc);
  223. Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc);
  224. // Recurse.
  225. switch (base_type) {
  226. case reflection::Obj: {
  227. ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref));
  228. break;
  229. }
  230. case reflection::Vector: {
  231. auto elem_type = fielddef.type()->element();
  232. if (elem_type != reflection::Obj && elem_type != reflection::String)
  233. break;
  234. auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref);
  235. auto elemobjectdef = elem_type == reflection::Obj
  236. ? schema_.objects()->Get(fielddef.type()->index())
  237. : nullptr;
  238. if (elemobjectdef && elemobjectdef->is_struct()) break;
  239. for (uoffset_t i = 0; i < vec->size(); i++) {
  240. auto loc = vec->Data() + i * sizeof(uoffset_t);
  241. if (DagCheck(loc))
  242. continue; // This offset already visited.
  243. auto dest = loc + vec->Get(i);
  244. Straddle<uoffset_t, 1>(loc, dest ,loc);
  245. if (elemobjectdef)
  246. ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest));
  247. }
  248. break;
  249. }
  250. case reflection::Union: {
  251. ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table),
  252. reinterpret_cast<Table *>(ref));
  253. break;
  254. }
  255. case reflection::String:
  256. break;
  257. default:
  258. assert(false);
  259. }
  260. }
  261. // Check if the vtable offset points beyond the insertion point.
  262. // Must do this last, since GetOptionalFieldOffset above still reads
  263. // this value.
  264. Straddle<soffset_t, -1>(table, vtable, table);
  265. }
  266. }
  267. void operator=(const ResizeContext &rc);
  268. private:
  269. const reflection::Schema &schema_;
  270. uint8_t *startptr_;
  271. int delta_;
  272. std::vector<uint8_t> &buf_;
  273. std::vector<uint8_t> dag_check_;
  274. };
  275. void SetString(const reflection::Schema &schema, const std::string &val,
  276. const String *str, std::vector<uint8_t> *flatbuf,
  277. const reflection::Object *root_table) {
  278. auto delta = static_cast<int>(val.size()) - static_cast<int>(str->Length());
  279. auto str_start = static_cast<uoffset_t>(
  280. reinterpret_cast<const uint8_t *>(str) - flatbuf->data());
  281. auto start = str_start + static_cast<uoffset_t>(sizeof(uoffset_t));
  282. if (delta) {
  283. // Clear the old string, since we don't want parts of it remaining.
  284. memset(flatbuf->data() + start, 0, str->Length());
  285. // Different size, we must expand (or contract).
  286. ResizeContext(schema, start, delta, flatbuf, root_table);
  287. // Set the new length.
  288. WriteScalar(flatbuf->data() + str_start,
  289. static_cast<uoffset_t>(val.size()));
  290. }
  291. // Copy new data. Safe because we created the right amount of space.
  292. memcpy(flatbuf->data() + start, val.c_str(), val.size() + 1);
  293. }
  294. uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
  295. const VectorOfAny *vec, uoffset_t num_elems,
  296. uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
  297. const reflection::Object *root_table) {
  298. auto delta_elem = static_cast<int>(newsize) - static_cast<int>(num_elems);
  299. auto delta_bytes = delta_elem * static_cast<int>(elem_size);
  300. auto vec_start = reinterpret_cast<const uint8_t *>(vec) - flatbuf->data();
  301. auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) +
  302. elem_size * num_elems);
  303. if (delta_bytes) {
  304. if (delta_elem < 0) {
  305. // Clear elements we're throwing away, since some might remain in the
  306. // buffer.
  307. auto size_clear = -delta_elem * elem_size;
  308. memset(flatbuf->data() + start - size_clear, 0, size_clear);
  309. }
  310. ResizeContext(schema, start, delta_bytes, flatbuf, root_table);
  311. WriteScalar(flatbuf->data() + vec_start, newsize); // Length field.
  312. // Set new elements to 0.. this can be overwritten by the caller.
  313. if (delta_elem > 0) {
  314. memset(flatbuf->data() + start, 0, delta_elem * elem_size);
  315. }
  316. }
  317. return flatbuf->data() + start;
  318. }
  319. const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
  320. const uint8_t *newbuf, size_t newlen) {
  321. // Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're
  322. // going to chop off the root offset.
  323. while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) ||
  324. !(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) {
  325. flatbuf.push_back(0);
  326. }
  327. auto insertion_point = static_cast<uoffset_t>(flatbuf.size());
  328. // Insert the entire FlatBuffer minus the root pointer.
  329. flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t), newbuf + newlen);
  330. auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t);
  331. return flatbuf.data() + insertion_point + root_offset;
  332. }
  333. void CopyInline(FlatBufferBuilder &fbb, const reflection::Field &fielddef,
  334. const Table &table, size_t align, size_t size) {
  335. fbb.Align(align);
  336. fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size);
  337. fbb.TrackField(fielddef.offset(), fbb.GetSize());
  338. }
  339. Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
  340. const reflection::Schema &schema,
  341. const reflection::Object &objectdef,
  342. const Table &table,
  343. bool use_string_pooling) {
  344. // Before we can construct the table, we have to first generate any
  345. // subobjects, and collect their offsets.
  346. std::vector<uoffset_t> offsets;
  347. auto fielddefs = objectdef.fields();
  348. for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
  349. auto &fielddef = **it;
  350. // Skip if field is not present in the source.
  351. if (!table.CheckField(fielddef.offset())) continue;
  352. uoffset_t offset = 0;
  353. switch (fielddef.type()->base_type()) {
  354. case reflection::String: {
  355. offset = use_string_pooling
  356. ? fbb.CreateSharedString(GetFieldS(table, fielddef)).o
  357. : fbb.CreateString(GetFieldS(table, fielddef)).o;
  358. break;
  359. }
  360. case reflection::Obj: {
  361. auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
  362. if (!subobjectdef.is_struct()) {
  363. offset = CopyTable(fbb, schema, subobjectdef,
  364. *GetFieldT(table, fielddef)).o;
  365. }
  366. break;
  367. }
  368. case reflection::Union: {
  369. auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table);
  370. offset = CopyTable(fbb, schema, subobjectdef,
  371. *GetFieldT(table, fielddef)).o;
  372. break;
  373. }
  374. case reflection::Vector: {
  375. auto vec = table.GetPointer<const Vector<Offset<Table>> *>(
  376. fielddef.offset());
  377. auto element_base_type = fielddef.type()->element();
  378. auto elemobjectdef = element_base_type == reflection::Obj
  379. ? schema.objects()->Get(fielddef.type()->index())
  380. : nullptr;
  381. switch (element_base_type) {
  382. case reflection::String: {
  383. std::vector<Offset<const String *>> elements(vec->size());
  384. auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec);
  385. for (uoffset_t i = 0; i < vec_s->size(); i++) {
  386. elements[i] = use_string_pooling
  387. ? fbb.CreateSharedString(vec_s->Get(i)).o
  388. : fbb.CreateString(vec_s->Get(i)).o;
  389. }
  390. offset = fbb.CreateVector(elements).o;
  391. break;
  392. }
  393. case reflection::Obj: {
  394. if (!elemobjectdef->is_struct()) {
  395. std::vector<Offset<const Table *>> elements(vec->size());
  396. for (uoffset_t i = 0; i < vec->size(); i++) {
  397. elements[i] =
  398. CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i));
  399. }
  400. offset = fbb.CreateVector(elements).o;
  401. break;
  402. }
  403. // FALL-THRU:
  404. }
  405. default: { // Scalars and structs.
  406. auto element_size = GetTypeSize(element_base_type);
  407. if (elemobjectdef && elemobjectdef->is_struct())
  408. element_size = elemobjectdef->bytesize();
  409. fbb.StartVector(element_size, vec->size());
  410. fbb.PushBytes(vec->Data(), element_size * vec->size());
  411. offset = fbb.EndVector(vec->size());
  412. break;
  413. }
  414. }
  415. break;
  416. }
  417. default: // Scalars.
  418. break;
  419. }
  420. if (offset) {
  421. offsets.push_back(offset);
  422. }
  423. }
  424. // Now we can build the actual table from either offsets or scalar data.
  425. auto start = objectdef.is_struct()
  426. ? fbb.StartStruct(objectdef.minalign())
  427. : fbb.StartTable();
  428. size_t offset_idx = 0;
  429. for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
  430. auto &fielddef = **it;
  431. if (!table.CheckField(fielddef.offset())) continue;
  432. auto base_type = fielddef.type()->base_type();
  433. switch (base_type) {
  434. case reflection::Obj: {
  435. auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
  436. if (subobjectdef.is_struct()) {
  437. CopyInline(fbb, fielddef, table, subobjectdef.minalign(),
  438. subobjectdef.bytesize());
  439. break;
  440. }
  441. // else: FALL-THRU:
  442. }
  443. case reflection::Union:
  444. case reflection::String:
  445. case reflection::Vector:
  446. fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++]));
  447. break;
  448. default: { // Scalars.
  449. auto size = GetTypeSize(base_type);
  450. CopyInline(fbb, fielddef, table, size, size);
  451. break;
  452. }
  453. }
  454. }
  455. assert(offset_idx == offsets.size());
  456. if (objectdef.is_struct()) {
  457. fbb.ClearOffsets();
  458. return fbb.EndStruct();
  459. } else {
  460. return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size()));
  461. }
  462. }
  463. bool VerifyStruct(flatbuffers::Verifier &v,
  464. const flatbuffers::Table &parent_table,
  465. voffset_t field_offset,
  466. const reflection::Object &obj,
  467. bool required) {
  468. auto offset = parent_table.GetOptionalFieldOffset(field_offset);
  469. if (required && !offset) {
  470. return false;
  471. }
  472. return !offset || v.Verify(reinterpret_cast<const uint8_t*>(&parent_table)
  473. + offset, obj.bytesize());
  474. }
  475. bool VerifyVectorOfStructs(flatbuffers::Verifier &v,
  476. const flatbuffers::Table &parent_table,
  477. voffset_t field_offset,
  478. const reflection::Object &obj,
  479. bool required) {
  480. auto p = parent_table.GetPointer<const uint8_t*>(field_offset);
  481. const uint8_t* end;
  482. if (required && !p) {
  483. return false;
  484. }
  485. return !p || v.VerifyVector(p, obj.bytesize(), &end);
  486. }
  487. // forward declare to resolve cyclic deps between VerifyObject and VerifyVector
  488. bool VerifyObject(flatbuffers::Verifier &v,
  489. const reflection::Schema &schema,
  490. const reflection::Object &obj,
  491. const flatbuffers::Table *table,
  492. bool isRequired);
  493. bool VerifyVector(flatbuffers::Verifier &v,
  494. const reflection::Schema &schema,
  495. const flatbuffers::Table &table,
  496. const reflection::Field &vec_field) {
  497. assert(vec_field.type()->base_type() == reflection::Vector);
  498. if (!table.VerifyField<uoffset_t>(v, vec_field.offset()))
  499. return false;
  500. switch (vec_field.type()->element()) {
  501. case reflection::None:
  502. assert(false);
  503. break;
  504. case reflection::UType:
  505. return v.Verify(flatbuffers::GetFieldV<uint8_t>(table, vec_field));
  506. case reflection::Bool:
  507. case reflection::Byte:
  508. case reflection::UByte:
  509. return v.Verify(flatbuffers::GetFieldV<int8_t>(table, vec_field));
  510. case reflection::Short:
  511. case reflection::UShort:
  512. return v.Verify(flatbuffers::GetFieldV<int16_t>(table, vec_field));
  513. case reflection::Int:
  514. case reflection::UInt:
  515. return v.Verify(flatbuffers::GetFieldV<int32_t>(table, vec_field));
  516. case reflection::Long:
  517. case reflection::ULong:
  518. return v.Verify(flatbuffers::GetFieldV<int64_t>(table, vec_field));
  519. case reflection::Float:
  520. return v.Verify(flatbuffers::GetFieldV<float>(table, vec_field));
  521. case reflection::Double:
  522. return v.Verify(flatbuffers::GetFieldV<double>(table, vec_field));
  523. case reflection::String: {
  524. auto vecString =
  525. flatbuffers::GetFieldV<flatbuffers::
  526. Offset<flatbuffers::String>>(table, vec_field);
  527. if (v.Verify(vecString) && v.VerifyVectorOfStrings(vecString)) {
  528. return true;
  529. } else {
  530. return false;
  531. }
  532. }
  533. case reflection::Vector:
  534. assert(false);
  535. break;
  536. case reflection::Obj: {
  537. auto obj = schema.objects()->Get(vec_field.type()->index());
  538. if (obj->is_struct()) {
  539. if (!VerifyVectorOfStructs(v, table, vec_field.offset(), *obj,
  540. vec_field.required())) {
  541. return false;
  542. }
  543. } else {
  544. auto vec =
  545. flatbuffers::GetFieldV<flatbuffers::
  546. Offset<flatbuffers::Table>>(table, vec_field);
  547. if (!v.Verify(vec))
  548. return false;
  549. if (vec) {
  550. for (uoffset_t j = 0; j < vec->size(); j++) {
  551. if (!VerifyObject(v, schema, *obj, vec->Get(j), true)) {
  552. return false;
  553. }
  554. }
  555. }
  556. }
  557. return true;
  558. }
  559. case reflection::Union:
  560. assert(false);
  561. break;
  562. default:
  563. assert(false);
  564. break;
  565. }
  566. return false;
  567. }
  568. bool VerifyObject(flatbuffers::Verifier &v,
  569. const reflection::Schema &schema,
  570. const reflection::Object &obj,
  571. const flatbuffers::Table *table,
  572. bool required) {
  573. if (!table) {
  574. if (!required)
  575. return true;
  576. else
  577. return false;
  578. }
  579. if (!table->VerifyTableStart(v))
  580. return false;
  581. for (uoffset_t i = 0; i < obj.fields()->size(); i++) {
  582. auto field_def = obj.fields()->Get(i);
  583. switch (field_def->type()->base_type()) {
  584. case reflection::None:
  585. assert(false);
  586. break;
  587. case reflection::UType:
  588. if (!table->VerifyField<uint8_t>(v, field_def->offset()))
  589. return false;
  590. break;
  591. case reflection::Bool:
  592. case reflection::Byte:
  593. case reflection::UByte:
  594. if (!table->VerifyField<int8_t>(v, field_def->offset()))
  595. return false;
  596. break;
  597. case reflection::Short:
  598. case reflection::UShort:
  599. if (!table->VerifyField<int16_t>(v, field_def->offset()))
  600. return false;
  601. break;
  602. case reflection::Int:
  603. case reflection::UInt:
  604. if (!table->VerifyField<int32_t>(v, field_def->offset()))
  605. return false;
  606. break;
  607. case reflection::Long:
  608. case reflection::ULong:
  609. if (!table->VerifyField<int64_t>(v, field_def->offset()))
  610. return false;
  611. break;
  612. case reflection::Float:
  613. if (!table->VerifyField<float>(v, field_def->offset()))
  614. return false;
  615. break;
  616. case reflection::Double:
  617. if (!table->VerifyField<double>(v, field_def->offset()))
  618. return false;
  619. break;
  620. case reflection::String:
  621. if (!table->VerifyField<uoffset_t>(v, field_def->offset()) ||
  622. !v.Verify(flatbuffers::GetFieldS(*table, *field_def))) {
  623. return false;
  624. }
  625. break;
  626. case reflection::Vector:
  627. if (!VerifyVector(v, schema, *table, *field_def))
  628. return false;
  629. break;
  630. case reflection::Obj: {
  631. auto child_obj = schema.objects()->Get(field_def->type()->index());
  632. if (child_obj->is_struct()) {
  633. if (!VerifyStruct(v, *table, field_def->offset(), *child_obj,
  634. field_def->required())) {
  635. return false;
  636. }
  637. } else {
  638. if (!VerifyObject(v, schema, *child_obj,
  639. flatbuffers::GetFieldT(*table, *field_def),
  640. field_def->required())) {
  641. return false;
  642. }
  643. }
  644. break;
  645. }
  646. case reflection::Union: {
  647. // get union type from the prev field
  648. voffset_t utype_offset = field_def->offset() - sizeof(voffset_t);
  649. auto utype = table->GetField<uint8_t>(utype_offset, 0);
  650. if (utype != 0) {
  651. // Means we have this union field present
  652. auto fb_enum = schema.enums()->Get(field_def->type()->index());
  653. auto child_obj = fb_enum->values()->Get(utype)->object();
  654. if (!VerifyObject(v, schema, *child_obj,
  655. flatbuffers::GetFieldT(*table, *field_def),
  656. field_def->required())) {
  657. return false;
  658. }
  659. }
  660. break;
  661. }
  662. default:
  663. assert(false);
  664. break;
  665. }
  666. }
  667. return true;
  668. }
  669. bool Verify(const reflection::Schema &schema,
  670. const reflection::Object &root,
  671. const uint8_t *buf,
  672. size_t length) {
  673. Verifier v(buf, length);
  674. return VerifyObject(v, schema, root, flatbuffers::GetAnyRoot(buf), true);
  675. }
  676. } // namespace flatbuffers