HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GA_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: GA_PageArrayImpl.h (GA Library, C++)
7  *
8  * COMMENTS: An array class with special handling of constant pages and
9  * shared page data, specialized for GA_Offset.
10  */
11 
12 #pragma once
13 
14 #ifndef __GA_PageArrayImpl__
15 #define __GA_PageArrayImpl__
16 
17 #include "GA_PageArray.h"
18 
19 #include "GA_API.h"
20 #include "GA_Defaults.h"
21 #include "GA_Defragment.h"
22 #include "GA_Iterator.h"
23 #include "GA_LoadMap.h"
24 #include "GA_MergeMap.h"
25 #include "GA_Range.h"
26 #include "GA_SaveOptions.h"
27 #include "GA_Types.h"
28 
29 #include <UT/UT_Array.h>
30 #include <UT/UT_Assert.h>
31 #include <UT/UT_BitArray.h>
32 #include <UT/UT_FixedVector.h>
33 #include <UT/UT_JSONDefines.h>
34 #include <UT/UT_JSONParser.h>
35 #include <UT/UT_JSONWriter.h>
36 #include <UT/UT_StackBuffer.h>
37 #include <UT/UT_Storage.h>
38 #include <UT/UT_UniquePtr.h>
39 #include <UT/UT_VectorTypes.h>
40 #include <UT/UT_WorkBuffer.h>
41 #include <SYS/SYS_Inline.h>
42 #include <SYS/SYS_Math.h>
43 #include <SYS/SYS_Types.h>
44 #include <SYS/SYS_TypeTraits.h>
45 
46 #include <string.h>
47 
48 
49 // Separate namespace for these, because they shouldn't be duplicated per
50 // template instantiation.
51 namespace GA_PageArrayIO
52 {
53  // JSON tokens
55  {
64  };
65  GA_API const char *getJSONToken(JDTupleToken tokenID);
66  GA_API JDTupleToken getJSONTokenID(const char *token);
67 }
68 
69 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
70 void
72 {
73  auto &hard = hardenTable();
74  for (GA_Defragment::const_iterator it=defrag.begin(); !it.atEnd(); ++it)
75  {
76  GA_Offset a = it.getA();
77  GA_Offset b = it.getB();
78  GA_Size n = it.getN();
79  switch (it.getOp())
80  {
82  hard.swapRange(a, b, GA_Offset(n));
83  break;
85  hard.moveRange(a, b, GA_Offset(n));
86  break;
87  }
88  }
89 }
90 
91 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
92 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
93 void
95  const GA_MergeMap &map,
96  GA_AttributeOwner owner,
98  const GA_Defaults &defaults)
99 {
100  if constexpr (SYSisSame<DATA_T,void>())
101  {
102  // Hard case, where the storage type is not known at compile time.
103  UT_Storage storage = Base::getStorage();
104  switch (storage)
105  {
106  case UT_Storage::INT8:
107  castType<int8>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
108  case UT_Storage::UINT8:
109  castType<uint8>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
110  case UT_Storage::INT16:
111  castType<int16>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
112  case UT_Storage::INT32:
113  castType<int32>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
114  case UT_Storage::INT64:
115  castType<int64>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
116  case UT_Storage::REAL16:
117  castType<fpreal16>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
118  case UT_Storage::REAL32:
119  castType<fpreal32>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
120  case UT_Storage::REAL64:
121  castType<fpreal64>().mergeGrowArrayAndCopy(map, owner, src, defaults); break;
122  case UT_Storage::INVALID:
123  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
124  break;
125  }
126 
127  return;
128  }
129 
130  UT_IF_ASSERT( GA_Offset osize = map.getDestInitCapacity(owner); )
131  GA_Offset nsize = map.getDestCapacity(owner);
132 
133  // Ideally we could assert that capacity() == ocapacity, but this method is
134  // sometimes called by implementations of GA_AIFMerge::copyArray(),
135  // after GA_AIFMerge::growArray() has already been called.
136  UT_ASSERT(osize <= size());
137  UT_ASSERT(osize <= nsize || (osize == GA_Offset(0) && nsize <= GA_Offset(0)));
138 
139  if (nsize <= GA_Offset(0))
140  return;
141 
142  GA_Offset dststart = map.getDestStart(owner);
143  GA_Offset dstend = map.getDestEnd(owner)+1;
144 
145  UT_ASSERT(dstend - dststart <= src.size());
146  UT_ASSERT(GAisValid(dststart) && dststart < nsize);
147  UT_ASSERT(GAisValid(dstend) && dstend <= nsize);
148  UT_ASSERT(dststart < dstend);
149 
150  UT_ASSERT_MSG(GAgetPageOff(dststart) == 0, "mergeGrowArrayAndCopy should only be used when dststart is at a page boundary");
151  if (nsize > size())
152  {
153  setSize(nsize, defaults);
154  }
155 
156  // As odd as it may seem, apparently mergeGrowArrayAndCopy has only ever
157  // supported copying from source offset 0 onward, regardless of
158  // map.getSourceRange(owner). For example, GA_DataArray::
159  // mergeGrowArrayAndCopy and GA_DataBitArray::mergeGrowArrayAndCopy
160  // both assume this too.
161  moveRange(src, GA_Offset(0), dststart, dstend - dststart);
162 }
163 
164 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
165 bool
167  UT_JSONWriter &w, const GA_Range &range,
168  const GA_SaveOptions *options,
169  const UT_IntArray *map, int defvalue) const
170 {
171  if constexpr (SYSisSame<DATA_T,void>())
172  {
173  bool success = false;
174  // Hard case, where the storage type is not known at compile time.
175  UT_Storage storage = this->Base::getStorage();
176  switch (storage)
177  {
178  case UT_Storage::INT8:
179  success = castType<int8>().jsonSave(w, range, options, map, defvalue); break;
180  case UT_Storage::UINT8:
181  success = castType<uint8>().jsonSave(w, range, options, map, defvalue); break;
182  case UT_Storage::INT16:
183  success = castType<int16>().jsonSave(w, range, options, map, defvalue); break;
184  case UT_Storage::INT32:
185  success = castType<int32>().jsonSave(w, range, options, map, defvalue); break;
186  case UT_Storage::INT64:
187  success = castType<int64>().jsonSave(w, range, options, map, defvalue); break;
188  case UT_Storage::REAL16:
189  success = castType<fpreal16>().jsonSave(w, range, options, map, defvalue); break;
190  case UT_Storage::REAL32:
191  success = castType<fpreal32>().jsonSave(w, range, options, map, defvalue); break;
192  case UT_Storage::REAL64:
193  success = castType<fpreal64>().jsonSave(w, range, options, map, defvalue); break;
194  case UT_Storage::INVALID:
195  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
196  success = false;
197  break;
198  }
199 
200  return success;
201  }
202 
203  int tuplesize = getTupleSize();
204 
205  // Cast to optimize for small tuple sizes
206  if constexpr (TSIZE == -1)
207  {
208  if (tuplesize <= 3 && tuplesize >= 1)
209  {
210  bool success;
211  if (tuplesize == 3)
212  success = this->castTupleSize<3>().jsonSave(w, range, options, map, defvalue);
213  else if (tuplesize == 1)
214  success = this->castTupleSize<1>().jsonSave(w, range, options, map, defvalue);
215  else
216  {
217  UT_ASSERT_P(tuplesize == 2);
218  success = this->castTupleSize<2>().jsonSave(w, range, options, map, defvalue);
219  }
220 
221  return success;
222  }
223  }
224 
225  GA_Storage ga_storage = getStorage();
226  if (map && !GAisIntStorage(ga_storage))
227  map = nullptr;
228 
229  UT_JID jid = GAStorageToJID(ga_storage);
230 
231  bool ok = true;
232 
233  ok = ok && w.jsonBeginArray();
234 
236  ok = ok && w.jsonInt(tuplesize);
237 
239  ok = ok && w.jsonStringToken(GAstorage(ga_storage));
240 
241  bool savepaged = w.getBinary();
242  if (options)
243  options->importSavePaged(savepaged);
244 
245  if (savepaged)
246  {
248  UT_ASSERT_COMPILETIME(thePageSize == GA_PAGE_SIZE);
249  ok = ok && w.jsonInt(thePageSize);
250 
251 #if 0
252  // For max compatibility with GA_DataArrayTuple, we try to match the old
253  // packing behaviour: 1; 2 -> 1,1; 3; 4 -> 3,1; 5 -> 3,1,1; 6 -> 3,1,1,1
254  // though only for fpreal32 and fpreal64 types. Every other type
255  // had each component stored separately.
256  //
257  // TODO: Check if older versions will load data that is saved
258  // with everything as array-of-structs, avoiding the
259  // need for this.
260  bool hasfirst3packed = (tuplesize >= 3) &&
261  (ga_storage == GA_STORE_REAL32 || ga_storage == GA_STORE_REAL64);
262 
263  // The GA_JDTUPLE_PACKING field is optional and only needed if we
264  // need a data layout other than the default array-of-structs.
265  int n_packing_entries = tuplesize - (hasfirst3packed ? 2 : 0);
266  if (n_packing_entries > 1)
267  {
269  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_UINT8);
270 
271  // First is 3 or 1; every other one is 1.
272  ok = ok && w.uniformWrite(uint8(hasfirst3packed ? 3 : 1));
273  for (int i = 1; i < n_packing_entries; i++)
274  {
275  ok = ok && w.uniformWrite(uint8(1));
276  }
277 
278  ok = ok && w.endUniformArray();
279  }
280 #else
281  // I think GA_DataArrayTuple::jsonLoad supports loading
282  // array-of-structs, regardless of the tuplesize, so let's try it
283  // for now, and we can always fall back later.
284 
285  // I don't think the packing entry array is needed if there's only one entry.
286 #if 0
287  int n_packing_entries = 1;
288 
290  ok = ok && w.beginUniformArray(n_packing_entries, UT_JID_INT32);
291  ok = ok && w.uniformWrite(int32(tuplesize));
292  ok = ok && w.endUniformArray();
293 #endif
294 #endif
295 
296  // constpagecheck:
297  // 0 - none
298  // 1 - use page state
299  // 2 - full data scan
300  exint const_page_check = 2;
301  if (options)
302  const_page_check = options->constPageCheck();
303 
304  UT_UniquePtr<UT_BitArray> const_page_flags(nullptr);
305  if (tuplesize > 0)
306  {
307  if (const_page_check >= 2)
308  {
309  ok = ok && jsonSaveConstantOutputPageFlags<
311  w, range, const_page_flags);
312  }
313  else if (const_page_check == 1)
314  {
315  ok = ok && jsonSaveConstantOutputPageFlags<
317  w, range, const_page_flags);
318  }
319  }
320 
322 
323  ok = ok && jsonSaveRawPageData(w, range,
324  const_page_flags.get(), jid, map, defvalue);
325  }
326  else if (tuplesize <= 1)
327  {
328  // No reason to save an array of tuples if it's a scalar
330  ok = ok && w.jsonBeginArray();
331 
332  if (tuplesize != 0)
333  ok = ok && jsonSaveAsArray<false>(w, range, jid, map, defvalue);
334 
335  ok = ok && w.jsonEndArray();
336  }
337  else
338  {
339  // Store as an array of structs
341  ok = ok && w.jsonBeginArray();
342 
343  ok = ok && jsonSaveAsArray<true>(w, range, jid, map, defvalue);
344 
345  ok = ok && w.jsonEndArray();
346  }
347  return ok && w.jsonEndArray();
348 }
349 
350 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
351 template<typename MAP_ARRAY_CLASS>
352 bool
354  UT_JSONWriter &w, const GA_Range &range, UT_UniquePtr<UT_BitArray> &output_page_flags) const
355 {
356  bool ok = true;
357 
358  MAP_ARRAY_CLASS output_to_internal_page_map;
359  buildOutputToInternalPageMap(range, output_to_internal_page_map);
360 
361  int64 n_output_pages = ((range.getEntries() + thePageSize-1) / thePageSize);
362  UT_BitArray constant_flags(n_output_pages);
363 
364  GA_Size n_constant_pages = marshallConstantFlagsForOutputPages(
365  output_to_internal_page_map, constant_flags);
366  if (n_constant_pages == 0)
367  return ok;
368 
370  ok = ok && w.jsonBeginArray();
371 
372  ok = ok && w.jsonUniformArray(constant_flags.size(), constant_flags);
373  output_page_flags.reset(new UT_BitArray);
374  constant_flags.swap(*output_page_flags);
375 
376  ok = ok && w.jsonEndArray();
377 
378  return ok;
379 }
380 
381 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
382 bool
384  UT_JSONWriter &w,
385  const NotVoidType *page_data,
386  exint length, exint tuplesize,
387  bool const_output, bool const_input,
388  const UT_IntArray *map, int defvalue,
389  NotVoidType *buffer)
390 {
391  // NOTE: nullptr page_data should be dealt with by caller, using buffer.
392  UT_ASSERT_P(page_data);
393 
394  if (!const_output && !const_input)
395  {
396  if (!map)
397  {
398  // Simple case
399  return w.uniformBlockWrite(page_data, length * tuplesize);
400  }
401  else
402  {
403  for (exint i = 0; i < length; ++i)
404  {
405  for (exint component = 0; component < tuplesize; ++component, ++page_data)
406  {
407  NotVoidType val = *page_data;
408  buffer[component] = (val < 0 || val >= map->size())
409  ? defvalue
410  : (*map)(val);
411  }
412  if (!w.uniformBlockWrite(buffer, tuplesize))
413  return false;
414  }
415  return true;
416  }
417  }
418 
419  // Every case left has a single input value to read
420  const NotVoidType *data = page_data;
421  if (map)
422  {
423  for (exint component = 0; component < tuplesize; ++component, ++page_data)
424  {
425  NotVoidType val = *page_data;
426  buffer[component] = (val < 0 || val >= map->size())
427  ? defvalue
428  : (*map)(val);
429  }
430  data = buffer;
431  }
432 
433  if (const_output)
434  {
435  return w.uniformBlockWrite(data, tuplesize);
436  }
437  else
438  {
439  // const_input and !const_output, so repeat same tuple, length times
440  for (exint i = 0; i < length; ++i)
441  {
442  if (!w.uniformBlockWrite(data, tuplesize))
443  return false;
444  }
445  return true;
446  }
447 }
448 
449 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
450 bool
452  UT_JSONWriter &w, const GA_Range &range,
453  const UT_BitArray *const_page_flags,
454  UT_JID jid_storage,
455  const UT_IntArray *map, int defvalue) const
456 {
457  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
458 
459  exint ntotal = range.getEntries();
460 
461  exint collapsedsize = ntotal;
462  if (const_page_flags && ntotal > 0)
463  {
464  exint n_const_pages = const_page_flags->numBitsSet();
465 
466  // Special handling for last page, since it's not always the same size
467  if (const_page_flags->getBitFast(const_page_flags->size()-1))
468  {
469  collapsedsize = (const_page_flags->size()-n_const_pages)*thePageSize
470  + n_const_pages;
471  }
472  else
473  {
474  // NOTE: ((ntotal-1) & thePageMask) + 1 ensures that we get
475  // thePageSize if ntotal is a multiple of thePageSize.
476  collapsedsize = (const_page_flags->size()-n_const_pages-1)*thePageSize
477  + n_const_pages
478  + ((ntotal-1) & thePageMask) + 1;
479  }
480  }
481  const exint tuplesize = getTupleSize();
482  collapsedsize *= tuplesize;
483 
484  bool ok = true;
485  ok = ok && w.beginUniformArray(collapsedsize, jid_storage);
486 
487  // Don't even try to go through the pages if tuplesize is 0.
488  // Only bugs will ensue. Might as well check this implicitly
489  // by checking collapsedsize, since it's multiplied by tuplesize.
490  if (collapsedsize == 0)
491  {
492  ok = ok && w.endUniformArray();
493  return ok;
494  }
495 
496  bool const_page_data;
497  const NotVoidType *page_data;
499  const GA_Size n_output_pages = (ntotal+thePageSize-1) / thePageSize;
500 
501  GA_Iterator it(range);
502  GA_PageNum last_page_num(-1);
503  GA_Offset block_start = GA_INVALID_OFFSET;
504  GA_PageOff block_start_pageoff;
505  GA_Offset block_end = GA_INVALID_OFFSET;
506  for (GA_Size output_page_num = 0; ok && output_page_num < n_output_pages; ++output_page_num)
507  {
508  const bool output_page_const = const_page_flags && const_page_flags->getBitFast(output_page_num);
509 
510  GA_Size output_page_offset = 0;
511  do
512  {
513  if (block_start == block_end)
514  {
515  bool more_data = it.blockAdvance(block_start, block_end);
516  if (!more_data)
517  {
518  UT_ASSERT_P(output_page_num == n_output_pages-1);
519  UT_ASSERT_P(GA_Size(GAgetPageOff(GA_Offset(ntotal))) == output_page_offset);
520  break;
521  }
522 
523  GA_PageNum page_num = GAgetPageNum(block_start);
524  block_start_pageoff = GAgetPageOff(block_start);
525 
526  // Fetch the page data if we don't already have it.
527  if (page_num != last_page_num)
528  {
529  const_page_data = isPageConstant(page_num);
530  page_data = getPageData(page_num);
531 
532  // Deal with nullptr here, to avoid having to deal with it in
533  // multiple codepaths.
534  if (!page_data)
535  {
536  UT_ASSERT_P(const_page_data);
537  memset((NotVoidType*)buffer,0,sizeof(NotVoidType)*tuplesize);
538  page_data = (NotVoidType*)buffer;
539  }
540  last_page_num = page_num;
541  }
542  }
543 
544  const GA_Size copy_size = SYSmin(GA_Size(block_end-block_start), thePageSize-GA_Size(output_page_offset));
545 
546  if (!output_page_const)
547  {
548  const NotVoidType *copy_data = page_data;
549  if (!const_page_data)
550  copy_data += GA_Size(block_start_pageoff)*tuplesize;
551  ok = ok && jsonWriteDataSpan(
552  w, copy_data, copy_size, tuplesize,
553  false, const_page_data, map, defvalue, (NotVoidType*)buffer);
554  }
555 
556  output_page_offset += copy_size;
557  block_start += copy_size;
558  block_start_pageoff += copy_size;
559  } while (ok && output_page_offset != thePageSize);
560 
561  if (output_page_const)
562  {
563  const NotVoidType *copy_data = page_data;
564  if (!const_page_data)
565  {
566  // The -1 is because we added copy_size, which is at least 1, to block_start_pageoff,
567  // and block_start_pageoff may now be at a page offset that is not the same value,
568  // or may even be at the page offset of block_end.
569  copy_data += GA_Size(block_start_pageoff-1)*tuplesize;
570  }
571  ok = ok && jsonWriteDataSpan(
572  w, copy_data, 1, tuplesize,
573  true, const_page_data, map, defvalue, (NotVoidType*)buffer);
574  }
575  }
576 
577  ok = ok && w.endUniformArray();
578  return ok;
579 }
580 
581 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
582 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::ga_SubPageBlock
583 {
584 public:
587  : myPage(page), myStartOffset(start), myEndOffset(end) {}
588 
592 };
593 
594 // --------------------------------------------------------------------------
595 // Compute a mapping to keep track of which internal pages affect which output
596 // pages. We store this mapping as an ordered list of the input pages as they
597 // are traversed in building the output pages, with the start of each output
598 // page indicated by a negative value (-(input_page + 1)).
599 //
600 // NB: We don't keep track of the page offsets in the mapping so this is
601 // really only useful for internal pages that are flagged as constant.
602 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
603 void
605  const GA_Range &range,
607 {
608  GA_Iterator it(range);
609  GA_Size output_page_offset = 0;
610  GA_Size block_size = 0;
611  GA_PageNum page_num;
612  GA_PageNum last_page_num(-1);
613 
614  while (true)
615  {
616  if (output_page_offset == thePageSize)
617  {
618  output_page_offset = 0;
619  }
620 
621  if (block_size == 0) // need new block
622  {
623  GA_Offset block_start, block_end;
624  if (!it.blockAdvance(block_start, block_end))
625  break;
626 
627  page_num = GAgetPageNum(block_start);
628  block_size = block_end - block_start;
629  }
630 
631  GA_Size output_size = SYSmin(block_size,
632  thePageSize-output_page_offset);
633 
634  if (output_page_offset == 0)
635  {
636  map.append(-(page_num+1));
637  last_page_num = page_num;
638  }
639  else if (page_num != last_page_num)
640  {
641  map.append(page_num);
642  last_page_num = page_num;
643  }
644 
645  block_size -= output_size;
646  output_page_offset += output_size;
647  }
648 }
649 
650 // Compute a mapping to keep track of which internal page data blocks affect
651 // which output pages. We store this mapping as an ordered list of the sub
652 // page blocks as they are traversed in building the output pages, with the
653 // start of each output page indicated by a negative page number
654 // (-(input_page + 1)).
655 //
656 // TODO: We could keep track of block start/end, recomputing the internal
657 // page number at need?
658 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
659 void
661  const GA_Range &range,
663 {
664  GA_Iterator it(range);
665  GA_Size output_page_offset = 0;
666  GA_Size block_size = 0;
667  GA_PageNum page_num;
668  GA_PageOff page_offset;
669 
670  while (true)
671  {
672  if (output_page_offset == thePageSize)
673  {
674  output_page_offset = 0;
675  }
676 
677  if (block_size == 0) // need new block
678  {
679  GA_Offset block_start, block_end;
680  if (!it.blockAdvance(block_start, block_end))
681  break;
682 
683  page_num = GAgetPageNum(block_start);
684  page_offset = GAgetPageOff(block_start);
685  block_size = block_end - block_start;
686  }
687 
688  GA_Size output_size = SYSmin(block_size,
689  thePageSize-output_page_offset);
690 
691  if (output_page_offset == 0)
692  {
693  map.append(ga_SubPageBlock(
694  GA_PageNum(-(page_num+1)), page_offset,
695  page_offset + output_size));
696  }
697  else
698  {
699  map.append(ga_SubPageBlock(
700  page_num, page_offset,
701  page_offset + output_size));
702  }
703 
704  page_offset += output_size;
705  block_size -= output_size;
706  output_page_offset += output_size;
707  }
708 }
709 
710 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
711 GA_Size
713  const UT_Array<GA_PageNum> &internal_page_map,
714  UT_BitArray &constant_flags) const
715 {
716  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
717 
718  GA_Size count = 0;
719  GA_Size output_page = -1;
720  bool output_page_flag = false;
721  const NotVoidType *constant_value;
722  const exint tuplesize = getTupleSize();
723 
724  constant_flags.setAllBits(false);
725  for (GA_Size i = 0; i < internal_page_map.size(); i++)
726  {
727  GA_PageNum internal_page = internal_page_map(i);
728  // A negative internal page is used to mark the start of a new
729  // output page.
730  if (internal_page < 0)
731  {
732  if (output_page >= 0 && output_page_flag)
733  {
734  constant_flags.setBit(output_page, output_page_flag);
735  ++count;
736  }
737 
738  ++output_page;
739  UT_ASSERT_P(output_page <= constant_flags.size());
740  internal_page = -(internal_page + 1);
741  output_page_flag = isPageConstant(internal_page);
742  if (output_page_flag)
743  {
744  constant_value = getPageData(internal_page);
745  }
746  }
747  else if (output_page_flag)
748  {
749  if (!isPageConstant(internal_page))
750  output_page_flag = false;
751  else
752  {
753  const NotVoidType *new_constant_value = getPageData(internal_page);
754  if ((new_constant_value==nullptr) != (constant_value==nullptr))
755  output_page_flag = false;
756  else if (constant_value != new_constant_value)
757  output_page_flag = isEqual(constant_value, new_constant_value, tuplesize);
758  }
759  }
760  }
761  if (output_page >= 0 && output_page_flag)
762  {
763  constant_flags.setBit(output_page, output_page_flag);
764  ++count;
765  }
766  return count;
767 }
768 
769 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
770 GA_Size
772  const UT_Array<ga_SubPageBlock> &internal_page_map,
773  UT_BitArray &constant_flags) const
774 {
775  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
776 
777  GA_Size count = 0;
778  GA_Size output_page = -1;
779  bool output_page_flag = false;
780  const NotVoidType *constant_value;
781  const exint tuplesize = getTupleSize();
782 
783  constant_flags.setAllBits(false);
784  for (GA_Size i = 0; i < internal_page_map.entries(); i++)
785  {
786  GA_PageNum internal_page = internal_page_map(i).myPage;
787  // A negative internal page is used to mark the start of a new
788  // output page.
789  if (internal_page < 0)
790  {
791  if (output_page >= 0 && output_page_flag)
792  {
793  constant_flags.setBit(output_page, output_page_flag);
794  ++count;
795  }
796 
797  ++output_page;
798  UT_ASSERT_P(output_page <= constant_flags.size());
799  internal_page = -(internal_page + 1);
800  output_page_flag = isPageConstant(internal_page);
801  constant_value = getPageData(internal_page);
802  if (!output_page_flag)
803  {
804  GA_PageOff start = internal_page_map(i).myStartOffset;
805  GA_PageOff end = internal_page_map(i).myEndOffset;
806  const NotVoidType *page = constant_value;
807  constant_value += start;
808  output_page_flag = isSubPageConstant(page, start+1, end,
809  tuplesize, constant_value);
810  }
811  }
812  else if (output_page_flag)
813  {
814  const bool page_constant = isPageConstant(internal_page);
815  const NotVoidType *page = getPageData(internal_page);
816  if (page_constant)
817  {
818  if ((page==nullptr) != (constant_value==nullptr))
819  output_page_flag = false;
820  else if (constant_value != page)
821  output_page_flag = isEqual(constant_value, page, tuplesize);
822  }
823  else
824  {
825  if (!isSubPageConstant(page,
826  internal_page_map(i).myStartOffset,
827  internal_page_map(i).myEndOffset,
828  tuplesize,
829  constant_value))
830  output_page_flag = false;
831  }
832  }
833  }
834  if (output_page >= 0 && output_page_flag)
835  {
836  constant_flags.setBit(output_page, output_page_flag);
837  ++count;
838  }
839  return count;
840 }
841 
842 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
843 bool
845  const NotVoidType *page,
847  const exint tuplesize,
848  const NotVoidType *value)
849 {
850  if (value == nullptr)
851  {
852  for (GA_PageOff cur = start; cur < end; cur++)
853  {
854  if (!isZero(page+cur*tuplesize, tuplesize))
855  return false;
856  }
857  }
858  else
859  {
860  for (GA_PageOff cur = start; cur < end; cur++)
861  {
862  if (!isEqual(page+cur*tuplesize, value, tuplesize))
863  return false;
864  }
865  }
866  return true;
867 }
868 
869 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
870 template<bool ARRAY_OF_ARRAYS>
871 bool
873  UT_JSONWriter &w, const GA_Range &range, UT_JID jid_storage,
874  const UT_IntArray *map, int defvalue) const
875 {
876  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
877 
878  int tuplesize = getTupleSize();
879 
880  if (!ARRAY_OF_ARRAYS)
881  {
882  if (!w.beginUniformArray(tuplesize*range.getEntries(), jid_storage))
883  return false;
884  }
885 
886  UT_StackBuffer<NotVoidType> buffer(ARRAY_OF_ARRAYS ? tuplesize : 0);
887 
889  GA_Offset end;
890  for (GA_Iterator it(range); it.blockAdvance(start, end); )
891  {
892  if (map)
893  {
894  for (GA_Offset ai = start; ai < end; ++ai)
895  {
896  if (ARRAY_OF_ARRAYS)
897  {
898  for (int component = 0; component < tuplesize; ++component)
899  {
900  NotVoidType v = this->template get<NotVoidType>(ai, component);
901  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
902  buffer[component] = v;
903  }
904  if (!w.jsonUniformArray(tuplesize, buffer))
905  return false;
906  }
907  else
908  {
909  for (int component = 0; component < tuplesize; ++component)
910  {
911  NotVoidType v = this->template get<NotVoidType>(ai, component);
912  v = NotVoidType((v < 0 || v >= map->size()) ? defvalue : (*map)(v));
913  if (!w.uniformWrite(v))
914  return false;
915  }
916  }
917  }
918  }
919  else
920  {
921  // No map
922  for (GA_Offset ai = start; ai < end; ++ai)
923  {
924  if (ARRAY_OF_ARRAYS)
925  {
926  for (int component = 0; component < tuplesize; ++component)
927  buffer[component] = this->template get<NotVoidType>(ai, component);
928 
929  if (!w.jsonUniformArray(tuplesize, buffer))
930  return false;
931  }
932  else
933  {
934  for (int component = 0; component < tuplesize; ++component)
935  {
936  NotVoidType v = this->template get<NotVoidType>(ai, component);
937 
938  if (!w.uniformWrite(v))
939  return false;
940  }
941  }
942  }
943  }
944  }
945 
946  if (ARRAY_OF_ARRAYS)
947  return true;
948 
949  return w.endUniformArray();
950 }
951 
952 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
953 UT_JID
955 {
956  switch (storage)
957  {
958  case GA_STORE_BOOL:
959  return UT_JID_BOOL;
960  case GA_STORE_INVALID:
961  return UT_JID_NULL;
962  case GA_STORE_DICT:
963  return UT_JID_NULL;
964  case GA_STORE_STRING:
965  return UT_JID_STRING;
966  case GA_STORE_INT8:
967  return UT_JID_INT8;
968  case GA_STORE_UINT8:
969  return UT_JID_UINT8;
970  case GA_STORE_INT16:
971  return UT_JID_INT16;
972  case GA_STORE_INT32:
973  return UT_JID_INT32;
974  case GA_STORE_INT64:
975  return UT_JID_INT64;
976  case GA_STORE_REAL16:
977  return UT_JID_REAL16;
978  case GA_STORE_REAL32:
979  return UT_JID_REAL32;
980  case GA_STORE_REAL64:
981  return UT_JID_REAL64;
982  }
983  UT_ASSERT_MSG_P(0, "Unhandled GA_Storage value!");
984  return UT_JID_NULL;
985 }
986 
987 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
988 bool
990  UT_JSONParser &p,
991  const GA_LoadMap &map,
992  GA_AttributeOwner owner)
993 {
994  if constexpr (SYSisSame<DATA_T,void>())
995  {
996  bool success = false;
997  // Hard case, where the storage type is not known at compile time.
998  UT_Storage storage = Base::getStorage();
999  switch (storage)
1000  {
1001  case UT_Storage::INT8:
1002  success = castType<int8>().jsonLoad(p, map, owner); break;
1003  case UT_Storage::UINT8:
1004  success = castType<uint8>().jsonLoad(p, map, owner); break;
1005  case UT_Storage::INT16:
1006  success = castType<int16>().jsonLoad(p, map, owner); break;
1007  case UT_Storage::INT32:
1008  success = castType<int32>().jsonLoad(p, map, owner); break;
1009  case UT_Storage::INT64:
1010  success = castType<int64>().jsonLoad(p, map, owner); break;
1011  case UT_Storage::REAL16:
1012  success = castType<fpreal16>().jsonLoad(p, map, owner); break;
1013  case UT_Storage::REAL32:
1014  success = castType<fpreal32>().jsonLoad(p, map, owner); break;
1015  case UT_Storage::REAL64:
1016  success = castType<fpreal64>().jsonLoad(p, map, owner); break;
1017  case UT_Storage::INVALID:
1018  UT_ASSERT_MSG(0, "Can't have a GA_PageArray with invalid storage!");
1019  success = false;
1020  break;
1021  }
1022 
1023  return success;
1024  }
1025 
1026  int64 tuple_size = getTupleSize();
1027 
1028  // Cast to optimize for small tuple sizes
1029  if constexpr (TSIZE == -1)
1030  {
1031  if (tuple_size <= 3 && tuple_size >= 1)
1032  {
1033  bool success;
1034  if (tuple_size == 3)
1035  success = this->castTupleSize<3>().jsonLoad(p, map, owner);
1036  else if (tuple_size == 1)
1037  success = this->castTupleSize<1>().jsonLoad(p, map, owner);
1038  else
1039  {
1040  UT_ASSERT_P(tuple_size == 2);
1041  success = this->castTupleSize<2>().jsonLoad(p, map, owner);
1042  }
1043  return success;
1044  }
1045  }
1046 
1047  UT_WorkBuffer key;
1048  int64 page_size = -1;
1049  GA_Storage ga_storage = GA_STORE_INVALID;
1050  UT_StackBuffer<int> packing(tuple_size);
1051  int n_packing_entries = 0;
1052  UT_StackBuffer<UT_UniquePtr<UT_BitArray> > constant_page_flags(tuple_size);
1053  bool constant_page_flags_init = false;
1054 
1055  bool ok = true;
1056  bool done = false;
1057  for (UT_JSONParser::traverser mi = p.beginArray(); ok && !mi.atEnd(); ++mi)
1058  {
1059  if (!mi.getLowerKey(key))
1060  {
1061  ok = false;
1062  break;
1063  }
1064  switch (GA_PageArrayIO::getJSONTokenID(key.buffer()))
1065  {
1067  {
1068  int64 local_tuple_size = -1;
1069  ok = p.parseInteger(local_tuple_size);
1070  if (ok && local_tuple_size != tuple_size)
1071  {
1072  p.addWarning("Inconsistent tuple size specification");
1073  ok = false;
1074  }
1075  break;
1076  }
1078  ok = p.parseString(key);
1079  if (ok)
1080  ga_storage = GAstorage(key.buffer());
1081  break;
1083  // Tuple size and storage type are supposed to have already been set.
1084  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1085  {
1086  p.addWarning("Bad data type/size specification");
1087  ok = p.skipNextObject();
1088  }
1089  else
1090  {
1091  // Load as a struct of arrays
1093  for (exint component = 0; ok && !ai.atEnd(); ++component, ++ai)
1094  {
1095  if (component < tuple_size)
1096  {
1097  GA_Offset startoff = map.getLoadOffset(owner);
1098  LoadComponentArrayFunctor op(*this, startoff, component);
1099  if (GAisIntStorage(ga_storage))
1101  else
1103  }
1104  else
1105  {
1106  if (component == tuple_size)
1107  p.addWarning("Too many tuple items in data array");
1108  ok = p.skipNextObject();
1109  }
1110  }
1111  done = true;
1112  }
1113  break;
1115  if (done || tuple_size != getTupleSize() || ga_storage != getStorage())
1116  {
1117  p.addWarning("Bad data type/size specification");
1118  ok = p.skipNextObject();
1119  }
1120  else
1121  {
1122  // Load as an array of structs
1124  GA_Offset offset = map.getLoadOffset(owner);
1125  GA_Size ppage = GAgetPageNum(offset);
1126  for ( ; ok && !ai.atEnd(); ++offset, ++ai)
1127  {
1128  GA_PageNum newpagenum = GAgetPageNum(offset);
1129  if (newpagenum != ppage)
1130  {
1131  // We compress previous page
1132  tryCompressPage(ppage);
1133  ppage = newpagenum;
1134  }
1135 
1136  UT_StackBuffer<NotVoidType> buffer(tuple_size);
1137  exint nread = p.parseUniformArray<NotVoidType>(buffer, tuple_size);
1138  if (nread < tuple_size)
1139  {
1140  ok = false;
1141  break;
1142  }
1143  if (nread > tuple_size)
1144  p.addWarning("Extra data found in array tuple");
1145 
1146  if (TSIZE >= 1)
1147  {
1148  setVector(offset, *(const UT_FixedVector<NotVoidType,theSafeTupleSize>*)buffer.array());
1149  }
1150  else
1151  {
1152  for (int component = 0; component < tuple_size; ++component)
1153  set(offset, component, buffer[component]);
1154  }
1155  }
1156  tryCompressPage(ppage);
1157  done = true;
1158  }
1159  break;
1161  ok = p.parseInteger(page_size);
1162  break;
1164  if (tuple_size != getTupleSize())
1165  {
1166  p.addWarning("Packing requires valid size specification");
1167  ok = p.skipNextObject();
1168  }
1169  else
1170  {
1171  // NB: p.parseUniformArray() might return a greater value
1172  // than expected, but it won't write the extra values
1173  // to packing.array().
1174  n_packing_entries = p.parseUniformArray(packing.array(), tuple_size);
1175 
1176  if (constant_page_flags_init && n_packing_entries != (tuple_size > 0 ? 1 : 0))
1177  {
1178  p.addWarning("Non-trivial packing specification must come before constant page flags");
1179  n_packing_entries = 0;
1180  ok = false;
1181  }
1182  else if (n_packing_entries >= 0)
1183  {
1184  int total_packed_size = 0;
1185  for (int i = 0; i < n_packing_entries; ++i)
1186  {
1187  total_packed_size += packing[i];
1188  }
1189  if (total_packed_size != tuple_size ||
1190  n_packing_entries > tuple_size)
1191  {
1192  p.addWarning("Invalid packing specification");
1193  n_packing_entries = -1;
1194  ok = false;
1195  }
1196  }
1197  }
1198  break;
1200  if (tuple_size != getTupleSize() ||
1201  page_size <= 0 || n_packing_entries < 0)
1202  {
1203  p.addWarning("Bad data type/size specification");
1204  ok = p.skipNextObject();
1205  }
1206  else
1207  {
1208  int i = 0;
1209  UT_BitArray scratch_array;
1210 
1211  int n_arrays = n_packing_entries ? n_packing_entries
1212  : (tuple_size > 0 ? 1 : 0);
1213  int64 n_input_pages = (map.getLoadCount(owner)+page_size-1) / page_size;
1214 
1215  for (UT_JSONParser::traverser it = p.beginArray(); !it.atEnd(); ++it, ++i)
1216  {
1217  if (i < n_arrays)
1218  {
1219  int64 n_loaded = p.parseUniformBoolArray(scratch_array, n_input_pages);
1220 
1221  // We allow an empty array when no pages are constant.
1222  if (n_loaded == 0)
1223  {
1224  constant_page_flags[i].reset(nullptr);
1225  }
1226  else
1227  {
1228  constant_page_flags[i].reset(new UT_BitArray());
1229  scratch_array.swap(*constant_page_flags[i]);
1230  }
1231  }
1232  else
1233  {
1234  p.skipNextObject();
1235  UT_ASSERT(0);
1236  }
1237  }
1238  ok = (i == n_arrays);
1239  constant_page_flags_init = true;
1240  }
1241  break;
1243  // Load as an array of structs with tuples whose pages may be compressed
1244  if (done || tuple_size != getTupleSize() || ga_storage != getStorage() ||
1245  page_size <= 0 || n_packing_entries < 0)
1246  {
1247  p.addWarning("Bad data type/size specification");
1248  ok = p.skipNextObject();
1249  }
1250  else
1251  {
1252  // We default to a full vector when a GA_JDTUPLE_PACKING
1253  // field is missing.
1254  if (n_packing_entries == 0 && tuple_size > 0)
1255  {
1256  packing[0] = tuple_size;
1257  n_packing_entries = 1;
1258  }
1259  done = true;
1260  ok = jsonLoadRawPageData(p, map, owner,
1261  GA_Size(page_size),
1262  packing.array(), n_packing_entries,
1263  constant_page_flags.array());
1264  }
1265  break;
1266  default:
1267  p.addWarning("Data Array Tuple unknown key '%s'", key.buffer());
1268  break;
1269  }
1270  }
1271  if (!done)
1272  p.addWarning("Missing data for data array");
1273  return ok;
1274 }
1275 
1276 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1277 class GA_PageArray<DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED>::LoadComponentArrayFunctor
1278 {
1279 public:
1281 
1283  : myDest(dest)
1284  , myStartOffset(startoff)
1285  , myComponent(component)
1286  , myDestSize(dest.size()-startoff)
1287  {}
1288 
1289  template <typename T> SYS_FORCE_INLINE bool
1290  set(int64 i, T val) const
1291  {
1292  if (GA_Offset(i) >= myDestSize)
1293  return false;
1294  myDest.set(myStartOffset+(GA_Size)i, myComponent, val);
1295  return true;
1296  }
1297 
1298  template <typename T> SYS_FORCE_INLINE bool
1299  setArray(const T *data, int64 size) const
1300  {
1301  bool outofbounds = false;
1302  if (GA_Offset(size) > myDestSize)
1303  {
1304  size = int64(myDestSize);
1305  outofbounds = true;
1306  }
1307 
1308  // Fast path for single component
1309  if (TSIZE == 1)
1310  {
1311  myDest.setRange(myStartOffset, GA_Offset(size), data);
1312  return !outofbounds;
1313  }
1314 
1315  GA_Offset end = myStartOffset + GA_Size(size);
1316 
1317  for (GA_Offset off = myStartOffset; off < end; ++off, ++data)
1318  {
1319  myDest.set(off, myComponent, *data);
1320  }
1321 
1322  return !outofbounds;
1323  }
1324 
1329 };
1330 
1331 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED>
1332 bool
1334  UT_JSONParser &p,
1335  const GA_LoadMap &map,
1336  GA_AttributeOwner owner,
1337  GA_Size page_size,
1338  const int *packing,
1339  int n_packing_entries,
1340  const UT_UniquePtr<UT_BitArray> *const constant_page_flags)
1341 {
1342  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1343 
1345  if (it.getErrorState())
1346  return false;
1347 
1348  UT_JID jid = p.getUniformArrayType();
1349  bool istypematch = (jid == GAStorageToJID(getStorage()));
1350 
1351  GA_Size num_input_elements = map.getLoadCount(owner);
1352  GA_Offset load_offset = map.getLoadOffset(owner);
1353  const GA_PageNum start_page_num = GAgetPageNum(load_offset);
1354  GA_PageOff page_offset = GAgetPageOff(load_offset);
1355  const exint tuple_size = getTupleSize();
1356  const exint num_page_values = tuple_size*thePageSize;
1357 
1358  UT_StackBuffer<NotVoidType> single_tuple(tuple_size);
1359 
1360  if (n_packing_entries == 1 && page_size == thePageSize)
1361  {
1362  UT_ASSERT(packing[0] == tuple_size);
1363 
1364  const UT_BitArray *constpagebits = constant_page_flags[0].get();
1365 
1366  if (page_offset == GA_PageOff(0))
1367  {
1368  // Loading at the beginning of a page, making things much simpler
1369  GA_Size num_full_new_pages = (num_input_elements >> GA_PAGE_BITS);
1370  GA_PageOff end_page_offset = GAgetPageOff(GA_Offset(num_input_elements));
1371 
1372  // First, fill in all complete, full-size pages
1373  GA_PageNum pagenum = start_page_num;
1374  for (GA_Size input_pagei = 0; input_pagei < num_full_new_pages; ++input_pagei, ++pagenum)
1375  {
1376  if (constpagebits && constpagebits->getBitFast(input_pagei))
1377  {
1378  if (istypematch)
1379  {
1380  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1381  return false;
1382  }
1383  else
1384  {
1385  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1386  return false;
1387  }
1388  setPageConstant(pagenum, single_tuple.array());
1389  }
1390  else
1391  {
1392  NotVoidType *data = hardenPageNoInit(pagenum);
1393  if (istypematch)
1394  {
1395  if (!it.readUniformArray(data, num_page_values))
1396  return false;
1397  }
1398  else
1399  {
1400  if (p.parseArrayValues(it, data, num_page_values) != num_page_values)
1401  return false;
1402  }
1403  }
1404  }
1405 
1406  // Handle any final incomplete or not-full-size page
1407  if (end_page_offset != GA_PageOff(0))
1408  {
1409  if (constpagebits && constpagebits->getBitFast(num_full_new_pages))
1410  {
1411  if (istypematch)
1412  {
1413  if (!it.readUniformArray(single_tuple.array(), tuple_size))
1414  return false;
1415  }
1416  else
1417  {
1418  if (p.parseArrayValues(it, single_tuple.array(), tuple_size) != tuple_size)
1419  return false;
1420  }
1421  if (load_offset+num_input_elements == size())
1422  setPageConstant(pagenum, single_tuple.array());
1423  else
1424  {
1425  // I don't know if this path will ever be taken; I'm guessing not.
1426 
1427  bool equal = false;
1428  if (isPageConstant(pagenum))
1429  {
1430  const NotVoidType *current_tuple = getPageData(pagenum);
1431  if (current_tuple)
1432  {
1433  if (isEqual(single_tuple.array(),current_tuple,tuple_size))
1434  equal = true;
1435  }
1436  else
1437  {
1438  if (isZero(single_tuple.array(),tuple_size))
1439  equal = true;
1440  }
1441  }
1442  if (!equal)
1443  {
1444  NotVoidType *data = hardenPage(pagenum);
1445  for (GA_PageOff pageoff(0); pageoff < end_page_offset; ++pageoff)
1446  {
1447  for (exint component = 0; component < tuple_size; ++component, ++data)
1448  {
1449  *data = single_tuple[component];
1450  }
1451  }
1452  }
1453  }
1454  }
1455  else
1456  {
1457  // This could be optimized to avoid a bit of redundant initialization,
1458  // but hopefully it's not too much of an issue.
1459  NotVoidType *data = hardenPage(pagenum);
1460  const exint num_left_values = tuple_size*end_page_offset;
1461  if (istypematch)
1462  {
1463  if (!it.readUniformArray(data, num_left_values))
1464  return false;
1465  }
1466  else
1467  {
1468  if (p.parseArrayValues(it, data, num_left_values) != num_left_values)
1469  return false;
1470  }
1471  }
1472  }
1473  }
1474  else
1475  {
1476  // Loading with matching packing (only 1 tuple) and matching page size,
1477  // but not loading at a page boundary.
1478 
1479  // TODO: Optimize this case for that we know that pages are the same size,
1480  // e.g. to try to preserve constant pages or load directly into destination.
1481 
1482  UT_StackBuffer<NotVoidType> buffer(thePageSize*tuple_size);
1483  const exint num_input_pages = (num_input_elements + thePageSize-1) / thePageSize;
1484  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1485  {
1486  exint inputi = thePageSize*input_pagei;
1487  // NB: Base::thePageSize is needed (as opposed to thePageSize) to avoid
1488  // MSVC 19.14.26428.1 from crashing with /permissive-
1489  const exint num_page_elements = SYSmin(Base::thePageSize, num_input_elements-inputi);
1490  const exint num_page_values = tuple_size*num_page_elements;
1491 
1492  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1493  if (constant_page)
1494  {
1495  if (istypematch)
1496  {
1497  if (!it.readUniformArray(buffer.array(), tuple_size))
1498  return false;
1499  }
1500  else
1501  {
1502  if (p.parseArrayValues(it, buffer.array(), tuple_size) != tuple_size)
1503  return false;
1504  }
1505 
1506  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1507  {
1508  for (exint component = 0; component < tuple_size; ++component)
1509  {
1510  set(load_offset+inputi, component, buffer[component]);
1511  }
1512  }
1513  }
1514  else
1515  {
1516  if (istypematch)
1517  {
1518  if (!it.readUniformArray(buffer.array(), num_page_values))
1519  return false;
1520  }
1521  else
1522  {
1523  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1524  return false;
1525  }
1526 
1527  exint i = 0;
1528  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1529  {
1530  for (exint component = 0; component < tuple_size; ++component, ++i)
1531  {
1532  set(load_offset+inputi, component, buffer[i]);
1533  }
1534  }
1535  }
1536  }
1537  }
1538  }
1539  else
1540  {
1541  UT_StackBuffer<NotVoidType> buffer(page_size*tuple_size);
1542  const exint num_input_pages = (num_input_elements + page_size-1) / page_size;
1543  for (GA_Size input_pagei = 0; input_pagei < num_input_pages; ++input_pagei)
1544  {
1545  exint start_component = 0;
1546  for (exint packingi = 0; packingi < n_packing_entries; ++packingi)
1547  {
1548  exint inputi = page_size*input_pagei;
1549  const exint num_page_elements = SYSmin(page_size, num_input_elements-inputi);
1550  const exint input_tuple_size = packing[packingi];
1551  const exint num_page_values = input_tuple_size*num_page_elements;
1552 
1553  const UT_BitArray *constpagebits = constant_page_flags[packingi].get();
1554 
1555  const bool constant_page = constpagebits && constpagebits->getBitFast(input_pagei);
1556  if (constant_page)
1557  {
1558  if (istypematch)
1559  {
1560  if (!it.readUniformArray(buffer.array(), input_tuple_size))
1561  return false;
1562  }
1563  else
1564  {
1565  if (p.parseArrayValues(it, buffer.array(), input_tuple_size) != input_tuple_size)
1566  return false;
1567  }
1568 
1569  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1570  {
1571  for (exint component = 0; component < input_tuple_size; ++component)
1572  {
1573  set(load_offset+inputi, start_component+component, buffer[component]);
1574  }
1575  }
1576  }
1577  else
1578  {
1579  if (istypematch)
1580  {
1581  if (!it.readUniformArray(buffer.array(), num_page_values))
1582  return false;
1583  }
1584  else
1585  {
1586  if (p.parseArrayValues(it, buffer.array(), num_page_values) != num_page_values)
1587  return false;
1588  }
1589 
1590  exint i = 0;
1591  for (exint element = 0; element < num_page_elements; ++element, ++inputi)
1592  {
1593  for (exint component = 0; component < input_tuple_size; ++component, ++i)
1594  {
1595  set(load_offset+inputi, start_component+component, buffer[i]);
1596  }
1597  }
1598  }
1599 
1600  start_component += input_tuple_size;
1601  }
1602  }
1603  }
1604 
1605  return it.atEnd();
1606 }
1607 
1608 #endif
bool uniformWrite(bool value)
The following byte represents an 8 bit integer.
bool beginUniformArray(int64 length, UT_JID id)
const_iterator begin() const
bool parseString(UT_WorkBuffer &v)
UT_ASSERT_COMPILETIME(BRAY_EVENT_MAXFLAGS<=32)
No data follows the NULL token.
int int32
Definition: SYS_Types.h:39
GLenum GLint * range
Definition: glcorearb.h:1925
UT_Storage
Definition: UT_Storage.h:28
The following 4 bytes represent an 32 bit real (float)
SYS_FORCE_INLINE bool setArray(const T *data, int64 size) const
GA_API JDTupleToken getJSONTokenID(const char *token)
GA_Size GA_PageOff
Definition: GA_Types.h:650
bool getBitFast(exint index) const
Definition: UT_BitArray.h:333
Iteration over a range of elements.
Definition: GA_Iterator.h:29
bool jsonKeyToken(const UT_StringRef &value)
UT_JID
The UT_JID enums are used in byte-stream encoding of binary JSON.
bool getBinary() const
Return whether writing binary or ASCII JSON.
Class which stores the default values for a GA_Attribute.
Definition: GA_Defaults.h:35
getFileOption("OpenEXR:storage") storage
Definition: HDK_Image.dox:276
bool jsonSave(UT_JSONWriter &w, const GA_Range &range, const GA_SaveOptions *options=nullptr, const UT_IntArray *map=nullptr, int defvalue=-1) const
GLboolean * data
Definition: glcorearb.h:131
const GLdouble * v
Definition: glcorearb.h:837
int64 parseUniformBoolArray(UT_BitArray &data, int64 len)
void setAllBits(bool value)
bool blockAdvance(GA_Offset &start, GA_Offset &end)
GLuint start
Definition: glcorearb.h:475
0x23 and 0x24 are reserved for future use (32/64 bit unsigned)
int64 parseArrayValues(iterator &it, T *data, int64 len)
**And then you can **find out if it s done
Definition: thread.h:622
GA_API const char * getJSONToken(JDTupleToken tokenID)
The merge map keeps track of information when merging details.
Definition: GA_MergeMap.h:53
int64 exint
Definition: SYS_Types.h:125
SYS_FORCE_INLINE const char * buffer() const
bool parseInteger(int64 &v)
bool jsonStringToken(const UT_StringRef &value)
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
GLuint GLsizei GLsizei * length
Definition: glcorearb.h:795
iterator beginArray()
IMATH_HOSTDEVICE constexpr bool equal(T1 a, T2 b, T3 t) IMATH_NOEXCEPT
Definition: ImathFun.h:105
JSON reader class which handles parsing of JSON or bJSON files.
Definition: UT_JSONParser.h:87
The following byte represents an unsigned 8 bit integer.
void defragment(const GA_Defragment &defrag)
Include GA_PageArrayImpl.h to call this.
#define GA_API
Definition: GA_API.h:14
Class which writes ASCII or binary JSON streams.
Definition: UT_JSONWriter.h:37
#define UT_ASSERT_MSG_P(ZZ,...)
Definition: UT_Assert.h:158
#define UT_IF_ASSERT(ZZ)
Definition: UT_Assert.h:174
bool jsonLoad(UT_JSONParser &p, const GA_LoadMap &map, GA_AttributeOwner owner)
GA_Offset getDestEnd(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:130
SYS_FORCE_INLINE bool GAisValid(GA_Size v)
Definition: GA_Types.h:655
GLuint buffer
Definition: glcorearb.h:660
exint size() const
Definition: UT_Array.h:646
GA_Size getLoadCount(GA_AttributeOwner owner) const
This method returns the number of elements being loaded of each type.
exint GA_Size
Defines the bit width for index and offset types in GA.
Definition: GA_Types.h:236
SYS_FORCE_INLINE GA_PageOff GAgetPageOff(GA_Offset v)
Definition: GA_Types.h:669
exint numBitsSet() const
#define GA_INVALID_OFFSET
Definition: GA_Types.h:687
A range of elements in an index-map.
Definition: GA_Range.h:42
std::unique_ptr< T, Deleter > UT_UniquePtr
A smart pointer for unique ownership of dynamically allocated objects.
Definition: UT_UniquePtr.h:39
double fpreal64
Definition: SYS_Types.h:201
#define UT_ASSERT_MSG(ZZ,...)
Definition: UT_Assert.h:159
unsigned char uint8
Definition: SYS_Types.h:36
GA_Size GA_Offset
Definition: GA_Types.h:646
The following 8 bytes represent an 64 bit real (float)
The following 8 bytes represent an 64 bit integer.
GA_API const char * GAstorage(GA_Storage store)
Lookup the storage name from the storage type.
GLdouble n
Definition: glcorearb.h:2008
GLintptr offset
Definition: glcorearb.h:665
Definition: core.h:760
bool uniformBlockWrite(const int8 *value, int64 count)
Write a block of 8 bit integer values to the uniform array.
The following 2 bytes represent an 16 bit integer.
GA_PageArray< DATA_T, TSIZE, TABLEHARDENED, PAGESHARDENED > PageArray
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:155
GLuint GLuint end
Definition: glcorearb.h:475
SYS_FORCE_INLINE GA_PageNum GAgetPageNum(GA_Offset v)
Definition: GA_Types.h:664
#define SYS_FORCE_INLINE
Definition: SYS_Inline.h:45
Traverse an array object in the parser.
bool skipNextObject()
Simple convenience method to skip the next object in the stream.
exint size() const
Definition: UT_BitArray.h:46
long long int64
Definition: SYS_Types.h:116
Options during loading.
Definition: GA_LoadMap.h:42
bool getErrorState() const
void void addWarning(const char *fmt,...) SYS_PRINTF_CHECK_ATTRIBUTE(2
Defragmentation of IndexMaps.
Definition: GA_Defragment.h:45
bool jsonEndArray(bool newline=true)
The following 4 bytes represent an 32 bit integer.
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
void mergeGrowArrayAndCopy(const GA_MergeMap &map, GA_AttributeOwner owner, const GA_PageArray< SRC_DATA_T, SRC_TSIZE, SRC_TABLEHARDENED, SRC_PAGESHARDENED > &src, const GA_Defaults &defaults)
Include GA_PageArrayImpl.h to call this.
#define GA_PAGE_SIZE
Definition: GA_Types.h:225
exint append()
Definition: UT_Array.h:142
bool setBit(exint index, bool value)
Definition: UT_BitArray.h:288
ga_SubPageBlock(GA_PageNum page, GA_PageOff start, GA_PageOff end)
exint entries() const
Alias of size(). size() is preferred.
Definition: UT_Array.h:648
int64 parseUniformArray(T *data, int64 len)
GLsizeiptr size
Definition: glcorearb.h:664
GA_AttributeOwner
Definition: GA_Types.h:35
GA_Offset getLoadOffset(GA_AttributeOwner owner) const
Definition: GA_LoadMap.h:154
GA_Size getEntries() const
Get an accurate count of the entries in the range.
Definition: GA_Range.h:252
GA_Size GA_PageNum
Definition: GA_Types.h:649
#define GA_PAGE_BITS
Attributes may paritition their data in pages of GA_PAGE_SIZE offsets.
Definition: GA_Types.h:224
bool loadPODArray(OP_TYPE &op)
GLuint GLfloat * val
Definition: glcorearb.h:1608
bool readUniformArray(T *buffer, int64 size)
bool jsonBeginArray()
Begin a generic array object.
LoadComponentArrayFunctor(PageArray &dest, GA_Offset startoff, exint component)
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:857
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:156
Definition: core.h:1131
bool jsonUniformArray(int64 length, const int8 *value)
Efficent method of writing a uniform array of int8 values.
bool endUniformArray(int64 *nwritten=0)
SYS_FORCE_INLINE bool set(int64 i, T val) const
GA_Offset getDestCapacity(GA_AttributeOwner owner) const
Convenience method to get new destination size.
Definition: GA_MergeMap.h:121
#define SYSmin(a, b)
Definition: SYS_Math.h:1571
GA_Storage
Definition: GA_Types.h:51
GA_Offset getDestStart(GA_AttributeOwner owner) const
Definition: GA_MergeMap.h:128
GLint GLsizei count
Definition: glcorearb.h:405
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:337
Definition: format.h:895
bool jsonInt(int32 value)
Write an integer value.
void swap(UT_BitArray &other)
GLenum src
Definition: glcorearb.h:1793
GA_Offset getDestInitCapacity(GA_AttributeOwner owner) const
Convenience method to get old destination size.
Definition: GA_MergeMap.h:117