HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_PageArrayImpl.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_PageArrayImpl.h (UT Library, C++)
7  *
8  * COMMENTS: Implementations of functions of UT_PageArray that
9  * aren't needed in most places that use it.
10  */
11 
12 #pragma once
13 
14 #ifndef __UT_PageArrayImpl__
15 #define __UT_PageArrayImpl__
16 
17 #include "UT_PageArray.h"
18 
19 #include "UT_Defaults.h"
20 #include "UT_MemoryCounter.h"
21 #include "UT_StackBuffer.h"
22 #include "UT_Storage.h"
23 #include "UT_Swap.h"
24 
25 #include <SYS/SYS_Types.h>
26 
27 
28 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
29 void
31 {
32  UT_ASSERT_P(newsize >= IDX_T(0));
34 
35  setCapacityIfNeeded(newsize);
36  hardenTable();
37  PageTable *pages = myImpl.getPages();
38  UT_ASSERT_P(pages || newsize == IDX_T(0));
39  if (pages)
40  {
41  IDX_T oldsize = pages->size();
42 
43  if (!PAGESHARDENED || TSIZE >= 0)
44  pages->setSize(newsize);
45  else
46  pages->setSize(newsize, myImpl.getTupleSize());
47 
48  if (newsize > oldsize)
49  {
50  if (TSIZE >= 1)
51  pages->fill(oldsize, newsize, initval);
52  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
53  pages->fill(oldsize, newsize, initval, myImpl.getTupleSize());
54  }
55  }
56 }
57 
58 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
59 void
61 {
62  UT_ASSERT_P(newsize >= IDX_T(0));
64  UT_ASSERT_P(getStorage() != UT_Storage::INVALID);
65  UT_ASSERT_P(TSIZE >= 1);
66 
67  setCapacityIfNeeded(newsize);
68  hardenTable();
69  PageTable *pages = myImpl.getPages();
70  UT_ASSERT_P(pages || newsize == IDX_T(0));
71  if (pages)
72  {
73  IDX_T oldsize = pages->size();
74 
75  // No need to destruct if smaller, since it's a POD type.
76 
77  pages->setSize(newsize);
78 
79  if (newsize > oldsize)
80  pages->fill(oldsize, newsize, initval);
81  }
82 }
83 
84 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
85 void
87 {
88  UT_ASSERT_P(newsize >= IDX_T(0));
89 
90  setCapacityIfNeeded(newsize);
91  hardenTable();
92  PageTable *pages = myImpl.getPages();
93  UT_ASSERT_P(pages || newsize == IDX_T(0));
94  if (pages)
95  {
96  IDX_T oldsize = pages->size();
97 
98  // No need to destruct if smaller, since it's a POD type.
99 
100  if (!PAGESHARDENED || TSIZE >= 0)
101  pages->setSize(newsize);
102  else
103  pages->setSize(newsize, myImpl.getTupleSize());
104 
105  if (newsize > oldsize)
106  setConstant(oldsize, newsize, initval);
107  }
108 }
109 
110 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
111 void
113 {
114  UT_ASSERT_P(end >= start);
115  UT_ASSERT_P(start >= IDX_T(0));
116  UT_ASSERT_P(end <= capacity());
118 
119  if (end <= start)
120  return;
121 
122  hardenTable();
123  PageTable *pages = myImpl.getPages();
124  if (!pages)
125  return;
126 
127  if (TSIZE >= 1)
128  pages->fill(start, end, v);
129  else if (TSIZE == -1 && myImpl.getTupleSize() > 0)
130  pages->fill(start, end, v, myImpl.getTupleSize());
131 }
132 
133 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
134 void
136 {
137  UT_ASSERT_P(end >= start);
138  UT_ASSERT_P(start >= IDX_T(0));
139  UT_ASSERT_P(end <= capacity());
141  UT_ASSERT_P(TSIZE >= 1);
142 
143  if (end <= start)
144  return;
145 
146  hardenTable();
147  PageTable *pages = myImpl.getPages();
148  if (!pages)
149  return;
150  pages->fill(start, end, v);
151 }
152 
153 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
154 void
156 {
157  UT_ASSERT_P(end >= start);
158  UT_ASSERT_P(start >= IDX_T(0));
159  UT_ASSERT_P(end <= capacity());
160 
161  auto &hard = hardenTable();
162 
163  UT_Storage storage = getStorage();
164 
165  // If the storage type is not known at compile time,
166  // switch, cast, and call again.
167  if (SYSisSame<DATA_T,void>())
168  {
169  switch (storage)
170  {
171  case UT_Storage::INT8:
172  hard.template castType<int8>().setConstant(start, end, v); return;
173  case UT_Storage::UINT8:
174  hard.template castType<uint8>().setConstant(start, end, v); return;
175  case UT_Storage::INT16:
176  hard.template castType<int16>().setConstant(start, end, v); return;
177  case UT_Storage::INT32:
178  hard.template castType<int32>().setConstant(start, end, v); return;
179  case UT_Storage::INT64:
180  hard.template castType<int64>().setConstant(start, end, v); return;
181  case UT_Storage::REAL16:
182  hard.template castType<fpreal16>().setConstant(start, end, v); return;
183  case UT_Storage::REAL32:
184  hard.template castType<fpreal32>().setConstant(start, end, v); return;
185  case UT_Storage::REAL64:
186  hard.template castType<fpreal64>().setConstant(start, end, v); return;
187  case UT_Storage::INVALID:
188  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
189  break;
190  }
191  return;
192  }
193 
194  if (end <= start)
195  return;
196 
197  PageTable *pages = myImpl.getPages();
198  if (!pages)
199  return;
200 
201  const exint tuplesize = getTupleSize();
202  if (tuplesize == 0)
203  return;
204 
205  // UT_Defaults is almost always tuple size 1, so have a special case for it.
206  if (v.getTupleSize() == 1 || tuplesize == 1)
207  {
208  if (TSIZE >= 1)
209  {
210  if (UTisIntStorage(storage))
211  pages->fill(start, end, NotVoidType(v.getI(0)));
212  else
213  pages->fill(start, end, NotVoidType(v.getF(0)));
214  }
215  else
216  {
217  if (UTisIntStorage(storage))
218  pages->fill(start, end, NotVoidType(v.getI(0)), tuplesize);
219  else
220  pages->fill(start, end, NotVoidType(v.getF(0)), tuplesize);
221  }
222  }
223  else
224  {
226  if (UTisIntStorage(storage))
227  {
228  for (exint i = 0; i < tuplesize; ++i)
229  buf[i] = NotVoidType(v.getI(i));
230  }
231  else
232  {
233  for (exint i = 0; i < tuplesize; ++i)
234  buf[i] = NotVoidType(v.getF(i));
235  }
236  pages->fill(start, end, buf, tuplesize);
237  }
238 }
239 
240 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
241 template<typename SRC_T>
242 void
244 {
245  UT_ASSERT_P(end >= start);
246  UT_ASSERT_P(start >= IDX_T(0));
247  UT_ASSERT_P(end <= capacity());
248 
249  auto &hard = hardenTable();
250 
251  UT_Storage storage = getStorage();
252 
253  // If the storage type is not known at compile time,
254  // switch, cast, and call again.
255  if (SYSisSame<DATA_T,void>())
256  {
257  switch (storage)
258  {
259  case UT_Storage::INT8:
260  hard.template castType<int8>().setConstant(start, end, values); return;
261  case UT_Storage::UINT8:
262  hard.template castType<uint8>().setConstant(start, end, values); return;
263  case UT_Storage::INT16:
264  hard.template castType<int16>().setConstant(start, end, values); return;
265  case UT_Storage::INT32:
266  hard.template castType<int32>().setConstant(start, end, values); return;
267  case UT_Storage::INT64:
268  hard.template castType<int64>().setConstant(start, end, values); return;
269  case UT_Storage::REAL16:
270  hard.template castType<fpreal16>().setConstant(start, end, values); return;
271  case UT_Storage::REAL32:
272  hard.template castType<fpreal32>().setConstant(start, end, values); return;
273  case UT_Storage::REAL64:
274  hard.template castType<fpreal64>().setConstant(start, end, values); return;
275  case UT_Storage::INVALID:
276  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
277  break;
278  }
279  return;
280  }
281 
282  if (end <= start)
283  return;
284 
285  PageTable *pages = myImpl.getPages();
286  if (!pages)
287  return;
288 
289  const exint tuplesize = getTupleSize();
290  if (tuplesize == 0)
291  return;
292 
293  if (SYSisSame<DATA_T,SRC_T>())
294  {
295  // NOTE: The cast is just for the compiler. SRC_T should be NotVoidType,
296  // since it is DATA_T, which is not void.
297  pages->fill(start, end, (const NotVoidType*)values, tuplesize);
298  }
299  else
300  {
302  for (exint i = 0; i < tuplesize; ++i)
303  buf[i] = UTconvertStorage<NotVoidType>(values[i]);
304 
305  pages->fill(start, end, buf, tuplesize);
306  }
307 }
308 
309 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
310 void
312 {
313  const UT_Storage oldstorage = getStorage();
314  UT_ASSERT_P(newstorage != UT_Storage::INVALID);
315  UT_ASSERT_MSG_P((SYS_IsSame<DATA_T,void>::value) || (newstorage == oldstorage), "Can't change the storage of an array whose type is fixed.");
316 
317  // Nothing to do if same type, or bad type
318  if (newstorage == oldstorage || newstorage == UT_Storage::INVALID)
319  return;
320 
321  PageTable *const oldpages = myImpl.getPages();
322 
323  // If there's no data, we only need to set the storage.
324  exint tuplesize = getTupleSize();
325  if (tuplesize == 0 || !oldpages)
326  {
327  myImpl.setStorage(newstorage);
328  return;
329  }
330 
331  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
332 
333  // Copy the data into a new array with the new storage type
334  ThisType newarray(getTupleSize(), newstorage);
335  newarray.setCapacity(capacity());
336  IDX_T n = size();
337  newarray.setSize(n);
338  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
339 
340  // decRef depends on knowing the type
341  switch (oldstorage)
342  {
343  case UT_Storage::INT8:
344  castType<int8>().myImpl.getPages()->decRef(tuplesize); break;
345  case UT_Storage::UINT8:
346  castType<uint8>().myImpl.getPages()->decRef(tuplesize); break;
347  case UT_Storage::INT16:
348  castType<int16>().myImpl.getPages()->decRef(tuplesize); break;
349  case UT_Storage::INT32:
350  castType<int32>().myImpl.getPages()->decRef(tuplesize); break;
351  case UT_Storage::INT64:
352  castType<int64>().myImpl.getPages()->decRef(tuplesize); break;
353  case UT_Storage::REAL16:
354  castType<fpreal16>().myImpl.getPages()->decRef(tuplesize); break;
355  case UT_Storage::REAL32:
356  castType<fpreal32>().myImpl.getPages()->decRef(tuplesize); break;
357  case UT_Storage::REAL64:
358  castType<fpreal64>().myImpl.getPages()->decRef(tuplesize); break;
359  case UT_Storage::INVALID:
360  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
361  myImpl.getPages()->decRef(tuplesize); break;
362  }
363 
364  // Take ownership of the page table.
365  PageTable *newpages = newarray.myImpl.getPages();
366  UT_ASSERT_P(newpages);
367  newpages->incRef();
368 
369  myImpl.setStorage(newstorage);
370  myImpl.getPages() = newpages;
371 }
372 
373 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
374 void
376 {
377  exint oldtuplesize = getTupleSize();
378  UT_ASSERT_P(newtuplesize >= 0);
379  UT_ASSERT_MSG_P((TSIZE == -1) || (newtuplesize == oldtuplesize), "Can't change the tuple size of an array whose tuple size is fixed.");
380 
381  // Nothing to do if same size, or bad size
382  if (newtuplesize == oldtuplesize || newtuplesize < 0)
383  return;
384 
385  PageTable *const oldpages = myImpl.getPages();
386 
387  // If there's no data, we only need to set the tuple size.
388  if (!oldpages)
389  {
390  myImpl.setTupleSize(newtuplesize);
391  return;
392  }
393 
394  UT_ASSERT_P(numPages(oldpages->capacity()) >= 1);
395 
396  // Copy the data into a new array with the new storage type
397  ThisType newarray(newtuplesize, getStorage());
398  newarray.setCapacity(capacity());
399  IDX_T n = size();
400  newarray.setSize(n, v);
401  newarray.moveRange(*this,IDX_T(0),IDX_T(0),IDX_T(n));
402 
403  // decRef depends on knowing the type
404  switch (getStorage())
405  {
406  case UT_Storage::INT8:
407  castType<int8>().myImpl.getPages()->decRef(oldtuplesize); break;
408  case UT_Storage::UINT8:
409  castType<uint8>().myImpl.getPages()->decRef(oldtuplesize); break;
410  case UT_Storage::INT16:
411  castType<int16>().myImpl.getPages()->decRef(oldtuplesize); break;
412  case UT_Storage::INT32:
413  castType<int32>().myImpl.getPages()->decRef(oldtuplesize); break;
414  case UT_Storage::INT64:
415  castType<int64>().myImpl.getPages()->decRef(oldtuplesize); break;
416  case UT_Storage::REAL16:
417  castType<fpreal16>().myImpl.getPages()->decRef(oldtuplesize); break;
418  case UT_Storage::REAL32:
419  castType<fpreal32>().myImpl.getPages()->decRef(oldtuplesize); break;
420  case UT_Storage::REAL64:
421  castType<fpreal64>().myImpl.getPages()->decRef(oldtuplesize); break;
422  case UT_Storage::INVALID:
423  // NOTE: Can't have a UT_PageArray with DATA_T void and invalid storage.
424  myImpl.getPages()->decRef(oldtuplesize); break;
425  }
426 
427  // Take ownership of the page table.
428  PageTable *newpages = newarray.myImpl.getPages();
429  UT_ASSERT_P(newpages);
430  newpages->incRef();
431 
432  myImpl.setTupleSize(newtuplesize);
433  myImpl.getPages() = newpages;
434 }
435 
436 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
437 int64
439 {
440  int64 mem = inclusive ? sizeof(*this) : 0;
441 
442  const PageTable *pages = myImpl.getPages();
443  if (!pages)
444  return mem;
445 
446  UT_PageNum npages = numPages(pages->capacity());
447  mem += exint(npages) * sizeof(PageTableEntry);
448 
449  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
450 
451  // Case for a single, possibly small page
452  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
453  {
454  mem += sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
455  return mem;
456  }
457 
458  for (UT_PageNum i(0); i < npages; ++i)
459  {
460  const PageTableEntry *const page = pages->getPPage(i);
461  mem += page->getMemoryUsage(tuplebytes);
462  }
463 
464  return mem;
465 }
466 
467 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
468 void
470 {
471  if (counter.mustCountUnshared() && inclusive)
472  {
473  UT_MEMORY_DEBUG_LOG("UT_PageArray",int64(sizeof(*this)));
474  counter.countUnshared(sizeof(*this));
475  }
476 
477  const PageTable *pages = myImpl.getPages();
478  if (!pages)
479  return;
480 
481  UT_PageNum npages = numPages(pages->capacity());
482  int64 tablemem = exint(npages) * sizeof(PageTableEntry);
483  if (!pages->isShared())
484  {
485  if (counter.mustCountUnshared())
486  {
487  UT_MEMORY_DEBUG_LOG("UT_PageArray::PageTable",int64(tablemem));
488  counter.countUnshared(tablemem);
489  }
490  }
491  else
492  {
493  UT_ASSERT_MSG_P(pages->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
494  if (counter.mustCountShared())
495  {
496  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::PageTable",int64(tablemem),pages,pages->getRefCount());
497  bool already_counted = counter.countShared(tablemem, pages->getRefCount(), pages);
498 
499  // If this counter has already counted a reference to this page
500  // table, it's also already counted its pages, below, and since
501  // this is the *same* set of references it's already counted,
502  // not different references to the same pages we'd get incorrect
503  // reference count tracking if we counted the pages again, so we
504  // just return.
505  if (already_counted)
506  return;
507  }
508  }
509 
510  exint tuplebytes = ((getStorage() != UT_Storage::INVALID) ? UTstorageSize(getStorage()) : sizeof(NotVoidType))*getTupleSize();
511 
512  // Case for a single, possibly small page
513  if (npages == UT_PageNum(1) && !pages->getFirstPage()->isConstant())
514  {
515  const PageTableEntry *const page = pages->getFirstPage();
516  int64 pagemem = sizeof(SYS_AtomicCounter) + tuplebytes*exint(pages->capacity());
517  if (!page->isShared())
518  {
519  if (counter.mustCountUnshared())
520  {
521  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page0",int64(pagemem));
522  counter.countUnshared(pagemem);
523  }
524  }
525  else
526  {
527  UT_ASSERT_MSG_P(page->getRefCount() > 1, "Why is something unref'ing data while we're counting memory?");
528  if (counter.mustCountShared())
529  {
530  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
531  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page0",int64(pagemem),masked,page->getRefCount());
532  counter.countShared(pagemem, page->getRefCount(), masked);
533  }
534  }
535  return;
536  }
537 
538  for (UT_PageNum i(0); i < npages; ++i)
539  {
540  const PageTableEntry *const page = pages->getPPage(i);
541  int64 pagemem = page->getMemoryUsage(tuplebytes);
542  if (!pagemem)
543  continue;
544 
545  if (!page->isShared())
546  {
547  if (counter.mustCountUnshared())
548  {
549  UT_MEMORY_DEBUG_LOG("UT_PageArray::Page",int64(pagemem));
550  counter.countUnshared(pagemem);
551  }
552  }
553  else
554  {
555  UT_ASSERT_P(page->getRefCount() > 1);
556  if (counter.mustCountShared())
557  {
558  const void *masked = page->isConstant() ? page->getMaskedPtrVoid() : page->getFirstPtrVoid();
559  UT_MEMORY_DEBUG_LOG_SHARED("UT_PageArray::Page",int64(pagemem),masked,page->getRefCount());
560  counter.countShared(pagemem, page->getRefCount(), masked);
561  }
562  }
563  }
564 }
565 
566 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
567 void
569 {
570  // Just defer to the general function.
571  // It handles overlapping ranges and constant pages correctly.
572  moveRange(*this, srcstart, deststart, nelements);
573 }
574 
575 // This is a very big function, but don't let it scare you.
576 // Much of the code is only applicable to particular template types.
577 // If it weren't for constant pages, this would be *much* simpler.
578 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
579 template<typename SRC_DATA_T,exint SRC_TSIZE,bool SRC_TABLEHARDENED,bool SRC_PAGESHARDENED>
580 void
583  IDX_T srcstart, IDX_T deststart, IDX_T nelements)
584 {
586  typedef typename SrcType::PageTable SrcPageTable;
587  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
588  typedef typename SrcType::NotVoidType SrcNotVoidType;
589 
590  UT_ASSERT_P(nelements >= IDX_T(0));
591  UT_ASSERT_P(srcstart >= IDX_T(0) && deststart >= IDX_T(0));
592  UT_ASSERT_P(srcstart+nelements <= src.size() && deststart+nelements <= size());
593 
594  UT_ASSERT_P((SYSisSame<DATA_T,SRC_DATA_T>()) || (getStorage() != UT_Storage::INVALID && src.getStorage() != UT_Storage::INVALID));
595 
596  // If there's even a chance we might write values, we should harden the
597  // table and record it in the templates so that we don't harden again.
598  auto &hard = hardenTable();
599 
600  // If the destination storage type is not known at compile time,
601  // switch, cast, and call again.
602  if (SYSisSame<DATA_T,void>())
603  {
604  UT_Storage storage = getStorage();
605  switch (storage)
606  {
607  case UT_Storage::INT8:
608  hard.template castType<int8>().moveRange(src, srcstart, deststart, nelements); return;
609  case UT_Storage::UINT8:
610  hard.template castType<uint8>().moveRange(src, srcstart, deststart, nelements); return;
611  case UT_Storage::INT16:
612  hard.template castType<int16>().moveRange(src, srcstart, deststart, nelements); return;
613  case UT_Storage::INT32:
614  hard.template castType<int32>().moveRange(src, srcstart, deststart, nelements); return;
615  case UT_Storage::INT64:
616  hard.template castType<int64>().moveRange(src, srcstart, deststart, nelements); return;
617  case UT_Storage::REAL16:
618  hard.template castType<fpreal16>().moveRange(src, srcstart, deststart, nelements); return;
619  case UT_Storage::REAL32:
620  hard.template castType<fpreal32>().moveRange(src, srcstart, deststart, nelements); return;
621  case UT_Storage::REAL64:
622  hard.template castType<fpreal64>().moveRange(src, srcstart, deststart, nelements); return;
623  case UT_Storage::INVALID:
624  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
625  break;
626  }
627  return;
628  }
629 
630  // If the source storage type is not known at compile time,
631  // switch, cast, and call again.
632  if (SYSisSame<SRC_DATA_T,void>())
633  {
634  // Avoid switch on storage type if src is dest.
635  if ((const void*)&src==(void*)this)
636  {
637  hard.moveRange(src.template castType<DATA_T>(), srcstart, deststart, nelements);
638  return;
639  }
640 
641  UT_Storage src_storage = src.getStorage();
642  switch (src_storage)
643  {
644  case UT_Storage::INT8:
645  hard.moveRange(src.template castType<int8>(), srcstart, deststart, nelements); return;
646  case UT_Storage::UINT8:
647  hard.moveRange(src.template castType<uint8>(), srcstart, deststart, nelements); return;
648  case UT_Storage::INT16:
649  hard.moveRange(src.template castType<int16>(), srcstart, deststart, nelements); return;
650  case UT_Storage::INT32:
651  hard.moveRange(src.template castType<int32>(), srcstart, deststart, nelements); return;
652  case UT_Storage::INT64:
653  hard.moveRange(src.template castType<int64>(), srcstart, deststart, nelements); return;
654  case UT_Storage::REAL16:
655  hard.moveRange(src.template castType<fpreal16>(), srcstart, deststart, nelements); return;
656  case UT_Storage::REAL32:
657  hard.moveRange(src.template castType<fpreal32>(), srcstart, deststart, nelements); return;
658  case UT_Storage::REAL64:
659  hard.moveRange(src.template castType<fpreal64>(), srcstart, deststart, nelements); return;
660  case UT_Storage::INVALID:
661  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
662  }
663  return;
664  }
665 
666  // We now have both the source type and the destination type known at compile time.
667  UT_ASSERT_P((!SYSisSame<DATA_T,void>()) && (!SYSisSame<SRC_DATA_T,void>()));
668 
669  // Check if zero elements or moving data to location it's already in.
670  if (nelements <= IDX_T(0) || (SYSisSame<DATA_T,SRC_DATA_T>() && (const void*)&src==(void*)this && srcstart == deststart))
671  return;
672 
673  UT_PageOff srcoff = pageOff(srcstart);
674  UT_PageOff destoff = pageOff(deststart);
675 
676  // Just copy the minimum of the tuple sizes.
677  // Hopefully the compiler optimizes approriately if the values are
678  // known at compile time.
679  const exint srctuplesize = src.getTupleSize();
680  const exint desttuplesize = getTupleSize();
681 
682  // Nothing to do if either tuple size is zero.
683  if (srctuplesize == 0 || desttuplesize == 0)
684  return;
685 
686  const SrcPageTable *srcpagetable = src.myImpl.getPages();
687  PageTable *destpagetable = myImpl.getPages();
688 
689  // Since nelements is > 0, srcpagetable and destpagetable should be non-NULL.
690  UT_ASSERT_P(srcpagetable && destpagetable);
691 
692  UT_PageNum srcpagenum = pageNum(srcstart);
693  UT_PageNum destpagenum = pageNum(deststart);
694 
695  // NOTE: Shouldn't need to check for smaller first page here
696  // (until below), since that page boundary isn't allowed
697  // to be crossed by the ranges.
698  if (srcoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize) && destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
699  {
700  // *************************************************************
701  // * CASE 1: Source and dest each confined to 1 page *
702  // *************************************************************
703 
704  // NOTE: We can dereference here because we don't pass any address into srcpage
705  // outside of this scope.
706  const SrcPageTableEntry *const srcpage = srcpagetable->getPPage(srcpagenum);
707  PageTableEntry *destpage = destpagetable->getPPage(destpagenum);
708 
709  // This is the only case that can have both srcpage and destpage be small pages.
710  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
711  UT_PageOff destpagecapacity(thePageSize);
712  if (issmalldestpage)
713  destpagecapacity = destpagetable->capacity();
714 
715  // If dest is a full page and src is also a full page or constant, just use replacePage.
716  bool isfullpage = (nelements == IDX_T(thePageSize));
717  if (!isfullpage && destoff == UT_PageOff(0) && deststart+nelements == size())
718  {
719  // If srcpage and destpage aren't the same capacity, destpage can't reference srcpage,
720  // even if size() is much less than the capacity of either.
721  bool issmallsrcpage = srcpagetable->capacity() < IDX_T(thePageSize);
722  bool samecapacity = (!issmalldestpage && !issmallsrcpage) ||
723  (issmalldestpage && issmallsrcpage && destpagetable->capacity() == srcpagetable->capacity());
724 
725  // destpage is a full destination page, but may not be replaceable by srcpage.
726  // srcpage lines up if srcoff == 0, and always implicitly lines up if constant.
727  // If either src or dest is small page and can't reference due to different capacity,
728  // fall through to copyPartialPage, which won't reference.
729 
730  isfullpage = ((srcoff == UT_PageOff(0) && samecapacity) || srcpage->isConstant());
731  }
732  if (isfullpage)
733  {
734  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nelements, destpagecapacity);
735  return;
736  }
737 
738  // If it's a partial page, just copy that part
739  // NOTE: This handles overlapping ranges correctly.
740  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, nelements, destpagecapacity);
741  return;
742  }
743 
744  // If overlapping and moving data to later addresses,
745  // we need to do a backward loop, which is a big pain.
746  bool isoverlappingmovelater = (
747  SYSisSame<DATA_T,SRC_DATA_T>() &&
748  (void*)this == (const void *)&src &&
749  (deststart > srcstart && deststart < srcstart+nelements)
750  );
751 
752  if (srcoff == destoff)
753  {
754  // *************************************************************
755  // * CASE 2: Source and dest pages line up and at least one *
756  // * page boundary is crossed. *
757  // *************************************************************
758  // Example for following along:
759  // THEPAGEBITS is 3, so 8-tuple pages.
760  // src and to are dest 5.
761  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
762  // dest [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
763 
764  // In this case, src and dest don't have any small pages,
765  // because both have page boundary crossings.
766 
767  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
768  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
769 
770  // If overlapping and moving data to later addresses,
771  // we need to do a backward loop, which is a big pain.
772  // It's not a very common case, so it doesn't have to be as optimized.
773  if (isoverlappingmovelater)
774  {
775  UT_ASSERT_P(desttuplesize == srctuplesize);
776 
777  UT_PageOff ntuplesfirstpage(0);
778  if (destoff != UT_PageOff(0))
779  {
780  ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
781  nelements -= IDX_T(exint(ntuplesfirstpage));
782  }
783 
784  // (nelements is now 3 less)
785  // NOTE: Not numPages, since that'd include any partial page at end
786  UT_PageNum nfullpages = pageNum(nelements);
787  ++psrcpagep;
788  ++pdestpagep;
789  PageTableEntry *pdestend = pdestpagep + nfullpages;
790  const SrcPageTableEntry *psrcend = psrcpagep + nfullpages;
791 
792  // Since backward, first, copy any incomplete last page
793  // src [ | | |# # # # # #]
794  // dest [ | | |# # # # # #]
795  UT_PageOff nleftover = pageOff(nelements);
796  if (nleftover != UT_PageOff(0))
797  {
798  const SrcPageTableEntry *srcpage = psrcend;
799  PageTableEntry *destpage = pdestend;
800 
801  // Remember that it may be effectively complete, if
802  // the last page within the size of the array and nleftover is
803  // the number of elements less than size() in that page.
804  // If it's really a full page, just use replacePage.
805  bool isfullpage = deststart+nelements == size();
806  if (isfullpage)
807  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
808  else
809  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
810  }
811 
812  // Still backward, copy/reference whole pages next:
813  // src [ |# # # # # # # #|# # # # # # # #| ]
814  // dest [ |# # # # # # # #|# # # # # # # #| ]
815  while (pdestpagep != pdestend)
816  {
817  --psrcend;
818  --pdestend;
819  const SrcPageTableEntry *srcpage = psrcend;
820  PageTableEntry *destpage = pdestend;
821 
822  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
823  }
824 
825  // Still backward, lastly, copy any incomplete first page:
826  // src [# # #| | | ]
827  // dest [# # #| | | ]
828  if (destoff != UT_PageOff(0))
829  {
830  --psrcpagep;
831  --pdestpagep;
832  const SrcPageTableEntry *srcpage = psrcpagep;
833  PageTableEntry *destpage = pdestpagep;
834 
835  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
836  }
837 
838  return;
839  }
840 
841  // In the common case of case 2, src and dest aren't overlapping,
842  // or src is later than dest, so we can go forward.
843 
844  // First, copy any incomplete first page:
845  // src [# # #| | | ]
846  // dest [# # #| | | ]
847  if (destoff != UT_PageOff(0))
848  {
849  const SrcPageTableEntry *srcpage = psrcpagep;
850  PageTableEntry *destpage = pdestpagep;
851 
852  UT_PageOff ntuplesfirstpage = UT_PageOff(thePageSize)-destoff;
853  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, destoff, srcoff, ntuplesfirstpage, thePageSize);
854 
855  nelements -= IDX_T(exint(ntuplesfirstpage));
856  ++psrcpagep;
857  ++pdestpagep;
858  }
859 
860  // Copy/reference whole pages next:
861  // src [ |# # # # # # # #|# # # # # # # #| ]
862  // dest [ |# # # # # # # #|# # # # # # # #| ]
863  // (nelements is now 3 less)
864  // NOTE: Not numPages, since that'd include any partial page at end
865  UT_PageNum nfullpages = pageNum(nelements);
866  PageTableEntry *pdestend = pdestpagep + nfullpages;
867  for (; pdestpagep != pdestend; ++psrcpagep, ++pdestpagep)
868  {
869  const SrcPageTableEntry *srcpage = psrcpagep;
870  PageTableEntry *destpage = pdestpagep;
871 
872  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, thePageSize, thePageSize);
873  }
874 
875  // Lastly, copy any incomplete last page
876  // src [ | | |# # # # # #]
877  // dest [ | | |# # # # # #]
878  UT_PageOff nleftover = pageOff(nelements);
879  if (nleftover != UT_PageOff(0))
880  {
881  const SrcPageTableEntry *srcpage = psrcpagep;
882  PageTableEntry *destpage = pdestpagep;
883 
884  // Remember that it may be effectively complete, if
885  // the last page within the size of the array and nleftover is
886  // the number of elements less than size() in that page.
887  // If it's really a full page, just use replacePage.
888  bool isfullpage = deststart+nelements == size();
889  if (isfullpage)
890  replacePage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, nleftover, thePageSize);
891  else
892  copyPartialPage<SrcType>(destpage, srcpage, desttuplesize, srctuplesize, UT_PageOff(0), UT_PageOff(0), nleftover, thePageSize);
893  }
894  return;
895  }
896  else
897  {
898  // *************************************************************
899  // * CASE 3: Source and dest pages don't line up and at least *
900  // * one page boundary is crossed. *
901  // *************************************************************
902  // Example for following along:
903  // THEPAGEBITS is 3, so 8-tuple pages.
904  // src is 5; dest is 3.
905  // src [# # #|# # # # # # # #|# # # # # # # #|# # # # # #]
906  // dest [# # # # #|# # # # # # # #|# # # # # # # #|# # # #]
907  // |<----6---->|<2>|
908  // spagestartind dpagestartins
909  UT_PageOff spagestartind = pageOff(deststart-srcstart);
910  UT_PageOff dpagestartins = pageOff(srcstart-deststart);
911 
912  // Because of the misalignment, we don't have to worry about
913  // referencing pages, though we do have to worry about constant
914  // pages. If both src pages covering a full dest page are constant
915  // and the same value, we can use makeConstantFrom using either
916  // of the source pages.
917 
918  // REMEMBER: This case could have a small first page in either
919  // src or dest, but not both.
920  // REMEMBER: Must handle overlapping ranges!
921 
922  const SrcPageTableEntry *psrcpagep = srcpagetable->getPPage(srcpagenum);
923  PageTableEntry *pdestpagep = destpagetable->getPPage(destpagenum);
924 
925  // Case 3.0:
926  // Overlapping range
927 
928  const SrcPageTableEntry *srcpage0 = psrcpagep;
929 
930  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
931 
932  // Case 3.1:
933  // src [# # #|# #]
934  // dest [# # # # #]
935  // dest is in a single page; it could be a small page.
936  // src is across two pages; they can't be small-capacity pages.
937  if (destoff+UT_PageOff(exint(nelements)) <= UT_PageOff(thePageSize))
938  {
939  PageTableEntry *destpage = pdestpagep;
940 
941  bool issmalldestpage = destpagetable->capacity() < IDX_T(thePageSize);
942  UT_PageOff destpagecapacity(thePageSize);
943  if (issmalldestpage)
944  destpagecapacity = destpagetable->capacity();
945 
946  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
947 
948  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
949  {
950  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
951  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
952  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
953  {
954  // If dest page is already constant and equal to both src pages, nothing to do.
955  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
956  return;
957 
958  // If both src pages are constant and equal, and dest is a full
959  // page, make dest constant.
960  bool isfullpage = (nelements == IDX_T(thePageSize)) || (destoff == UT_PageOff(0) && deststart+nelements == size());
961  if (isfullpage)
962  {
963  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
964  return;
965  }
966  }
967  }
968 
969  if (!PAGESHARDENED && destpage->isConstant())
970  hardenConstantPage(destpage, destpagecapacity, desttuplesize);
971  else if (!PAGESHARDENED && destpage->isShared())
972  hardenSharedPage(destpage, destpagecapacity, desttuplesize);
973 
974  UT_PageOff n0 = UT_PageOff(thePageSize)-srcoff;
975  if (isoverlappingmovelater)
976  {
977  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
978  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
979  }
980  else
981  {
982  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, destpagecapacity);
983  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, destoff+n0, UT_PageOff(0), nelements-n0, destpagecapacity);
984  }
985 
986  return;
987  }
988 
989  // There is at least one dest page boundary, so dest has full-capacity pages.
990 
991  if (isoverlappingmovelater)
992  {
993  // FIXME: Implement this!!!
994  UT_ASSERT_MSG(0, "Implement this!!! It should be like the code below, only copying chunks in reverse order.");
995 
996  return;
997  }
998 
999  // Deal with tuples before the first full destination page.
1000  if (destoff > UT_PageOff(0))
1001  {
1002  PageTableEntry *destpage = pdestpagep;
1003 
1004  if (destoff < spagestartind)
1005  {
1006  // srcpage0 srcpage1
1007  // src [# # #|# # ...
1008  // dest [# # # # #|...
1009  // |<--->|<->|
1010  // spagestartind-destoff dpagestartins
1011 
1012  UT_PageOff n0 = spagestartind - destoff;
1013  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
1014 
1015  srcoff = UT_PageOff(0);
1016  destoff += n0;
1017  ++psrcpagep;
1018  srcpage0 = psrcpagep;
1019  nelements -= IDX_T(exint(n0));
1020  }
1021 
1022  // srcpage0
1023  // src [# # # #...
1024  // dest [# #|# #...
1025  // |<->|
1026  // thePageSize-destoff
1027  UT_PageOff n0 = UT_PageOff(thePageSize) - destoff;
1028  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, destoff, srcoff, n0, UT_PageOff(thePageSize));
1029  ++pdestpagep;
1030  nelements -= IDX_T(exint(n0));
1031  }
1032 
1033  // Middle full destination pages
1034  for (; nelements >= IDX_T(thePageSize); nelements -= IDX_T(thePageSize), ++pdestpagep, ++psrcpagep, (srcpage0 = psrcpagep))
1035  {
1036  PageTableEntry *destpage = pdestpagep;
1037 
1038  // srcpage0 srcpage1
1039  // src [ # # # # # #|# # ]
1040  // dest [ |# # # # # # # #| ]
1041  // |<--------->|<->|
1042  // spagestartind dpagestartins
1043 
1044  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
1045 
1046  if (!PAGESHARDENED && srcpage0->isConstant() && srcpage1->isConstant())
1047  {
1048  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1049  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
1050  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
1051  {
1052  // If dest page is already constant and equal to both src pages, nothing to do.
1053  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1054  continue;
1055 
1056  // If both src pages are constant and equal, and dest is a full
1057  // page, make dest constant.
1058  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1059  continue;
1060  }
1061  }
1062 
1063  if (!PAGESHARDENED && destpage->isConstant())
1064  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1065  else if (!PAGESHARDENED && destpage->isShared())
1066  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1067 
1068  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
1069  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), dpagestartins, UT_PageOff(thePageSize));
1070  }
1071 
1072  // Final partial page, though may reach size()
1073  if (nelements > IDX_T(0))
1074  {
1075  PageTableEntry *destpage = pdestpagep;
1076 
1077  const bool isfullmaybeconstpage = !PAGESHARDENED && (deststart+nelements == size());
1078 
1079  if (nelements > IDX_T(exint(spagestartind)))
1080  {
1081  // srcpage0 srcpage1
1082  // src [ # # # # # #|#]
1083  // dest [ |# # # # # # #]
1084  // |<--------->|-|
1085  // spagestartind nelements-spagestartind
1086 
1087  const SrcPageTableEntry *srcpage1 = psrcpagep + 1;
1088 
1089  if (isfullmaybeconstpage && srcpage0->isConstant() && srcpage1->isConstant())
1090  {
1091  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1092  const SrcNotVoidType *stuple1 = SrcType::getConstantPtr(srcpage1, 0, srctuplesize);
1093  if (SrcType::isEqualConst(stuple0, stuple1, srctuplesize))
1094  {
1095  // If dest page is already constant and equal to both src pages, nothing to do.
1096  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1097  return;
1098 
1099  // If both src pages are constant and equal, and dest is a full
1100  // page, make dest constant.
1101  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1102  return;
1103  }
1104  }
1105 
1106  if (!PAGESHARDENED && destpage->isConstant())
1107  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1108  else if (!PAGESHARDENED && destpage->isShared())
1109  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1110 
1111  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, spagestartind, UT_PageOff(thePageSize));
1112  copyPartialPage<SrcType>(destpage, srcpage1, desttuplesize, srctuplesize, spagestartind, UT_PageOff(0), UT_PageOff(exint(nelements))-spagestartind, UT_PageOff(thePageSize));
1113  }
1114  else
1115  {
1116  // srcpage0
1117  // src [ # # # # #]
1118  // dest [ |# # # # #]
1119  // |<------->|
1120  // nelements
1121 
1122  if (isfullmaybeconstpage && srcpage0->isConstant())
1123  {
1124  const SrcNotVoidType *stuple0 = SrcType::getConstantPtr(srcpage0, 0, srctuplesize);
1125  // If dest page is already constant and equal to both src pages, nothing to do.
1126  if (destpage->isConstant() && isEqualConst(getConstantPtr(destpage, 0, desttuplesize), stuple0, mintuplesize))
1127  return;
1128 
1129  // If both src pages are constant and equal, and dest is a full
1130  // page, make dest constant.
1131  makeConstantFrom<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize);
1132  return;
1133  }
1134 
1135  if (!PAGESHARDENED && destpage->isConstant())
1136  hardenConstantPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1137  else if (!PAGESHARDENED && destpage->isShared())
1138  hardenSharedPage(destpage, UT_PageOff(thePageSize), desttuplesize);
1139 
1140  copyPartialPage<SrcType>(destpage, srcpage0, desttuplesize, srctuplesize, UT_PageOff(0), dpagestartins, UT_PageOff(exint(nelements)), UT_PageOff(thePageSize));
1141  }
1142  }
1143  }
1144 }
1145 
1146 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1147 void
1149 {
1150  UT_ASSERT_P(nelements >= IDX_T(0));
1151  UT_ASSERT_P(astart >= IDX_T(0) && bstart >= IDX_T(0));
1152  UT_ASSERT_P(astart+nelements <= size() && bstart+nelements <= size());
1153  UT_ASSERT_MSG_P(astart >= bstart+nelements || bstart >= astart+nelements, "Ranges can't overlap when swapping!");
1154  if (nelements <= IDX_T(0))
1155  return;
1156  auto &hard = hardenTable();
1157  if (!SYSisSame<DATA_T,void>())
1158  {
1159  // Easy case, where the storage type is known at compile time.
1160  exint tuplesize = getTupleSize();
1161  for (IDX_T i(0); i < nelements; ++i)
1162  {
1163  for (exint component = 0; component < tuplesize; ++component)
1164  {
1165  UTswap(hard(astart+i, component), hard(bstart+i, component));
1166  }
1167  }
1168  return;
1169  }
1170 
1171  // Hard case, where the storage type is not known at compile time.
1172  UT_Storage storage = getStorage();
1173  switch (storage)
1174  {
1175  case UT_Storage::INT8:
1176  hard.template castType<int8>().swapRange(astart, bstart, nelements); return;
1177  case UT_Storage::UINT8:
1178  hard.template castType<uint8>().swapRange(astart, bstart, nelements); return;
1179  case UT_Storage::INT16:
1180  hard.template castType<int16>().swapRange(astart, bstart, nelements); return;
1181  case UT_Storage::INT32:
1182  hard.template castType<int32>().swapRange(astart, bstart, nelements); return;
1183  case UT_Storage::INT64:
1184  hard.template castType<int64>().swapRange(astart, bstart, nelements); return;
1185  case UT_Storage::REAL16:
1186  hard.template castType<fpreal16>().swapRange(astart, bstart, nelements); return;
1187  case UT_Storage::REAL32:
1188  hard.template castType<fpreal32>().swapRange(astart, bstart, nelements); return;
1189  case UT_Storage::REAL64:
1190  hard.template castType<fpreal64>().swapRange(astart, bstart, nelements); return;
1191  case UT_Storage::INVALID:
1192  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
1193  break;
1194  }
1195 }
1196 
1197 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1198 template<typename SrcType>
1199 void
1201  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
1202 {
1203  typedef typename SrcType::DataType SRC_DATA_T;
1204  typedef typename SrcType::NotVoidType SrcNotVoidType;
1205  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1206  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1207  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1209  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1210  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1211  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1212  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1213 
1214  // If the source page is constant,
1215  if (src->isConstant())
1216  {
1217  if (!PAGESHARDENED && (dest->isConstant() || desttuplesize <= srctuplesize))
1218  makeConstantFrom<SrcType>(dest, src, desttuplesize, srctuplesize);
1219  else
1220  {
1221  // This codepath is primarily for the awkward case where we can't
1222  // easily make the destination page constant, because
1223  // it's not currently constant and the tuple size is larger.
1224  // However, it's also used for filling a page that isn't allowed to be
1225  // constant-compressed with the tuple from a constant-compressed source.
1226  UT_ASSERT_P(PAGESHARDENED || (!dest->isConstant() && desttuplesize > srctuplesize));
1227 
1228  if (!PAGESHARDENED && dest->isShared())
1229  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1230 
1231  // Fill range in dest with value from src.
1232  NotVoidType *destpagedata = dest->getFirstPtr();
1233  // NOTE: This is destpagesize instead of capacity, because it's just used for filling in data.
1234  NotVoidType *destpageend = destpagedata + (desttuplesize*destpagesize);
1235 
1236  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1237 
1238  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1239  const exint desttupleextra = desttuplesize-mintuplesize;
1240 
1241  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1242  }
1243  }
1244  else if (!PAGESHARDENED && SYSisSame<DATA_T,SRC_DATA_T>() && desttuplesize == srctuplesize)
1245  {
1246  // Nothing to do if already referencing the same data.
1247  // This pointer comparison works because we know that
1248  // the types and tuple sizes are the same,
1249  // and the src is non-constant, (so if dest is constant,
1250  // it won't be equal).
1251  if (src->getFirstPtrVoid() == dest->getFirstPtrVoidUnsafe())
1252  return;
1253 
1254  exint bytesize = desttuplesize*sizeof(NotVoidType);
1255  if (dest->isRefd(bytesize))
1256  dest->decRef();
1257 
1258  // Reference the source page
1259  SYSconst_cast(src)->incRef();
1260 
1261  // Still need to cast to PageTableEntry*, because the compiler needs to
1262  // compile this line when the condition is false.
1263  *dest = *(const PageTableEntry *)src;
1264  }
1265  else
1266  {
1267  if (!PAGESHARDENED && dest->isConstant())
1268  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1269  else if (!PAGESHARDENED && dest->isShared())
1270  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1271 
1272  // Copy data from src to dest
1273  NotVoidType *destpagedata = dest->getFirstPtr();
1274  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1275  // NOTE: This must be destpagesize instead of capacity, else it might access the source out of bounds.
1276  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, destpagesize);
1277  }
1278 }
1279 
1280 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1281 template<typename SrcType>
1282 void
1284  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize,
1285  UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
1286 {
1287  typedef typename SrcType::DataType SRC_DATA_T;
1288  typedef typename SrcType::NotVoidType SrcNotVoidType;
1289  UT_IF_ASSERT_P(const exint SRC_TSIZE = SrcType::theTupleSize;)
1290  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1291  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1293  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1294  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1295  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1296  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1297  UT_ASSERT_P(ntuples > UT_PageOff(0));
1298 
1299  // NOTE: Don't have to check for full page. The caller is responsible
1300  // for that if they want referencing or constant overwriting.
1301 
1302  // If the source page is constant,
1303  if (src->isConstant())
1304  {
1305  const SrcNotVoidType *stuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1306 
1307  const exint mintuplesize = SYSmin(srctuplesize,desttuplesize);
1308 
1309  // If the destination page is constant,
1310  if (dest->isConstant())
1311  {
1312  const NotVoidType *tuple = getConstantPtr(dest, 0, desttuplesize);
1313 
1314  // If the const pages are equal, there's nothing to do.
1315  if (isEqualConst(tuple, stuple, mintuplesize))
1316  return;
1317 
1318  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1319  }
1320 
1321  if (!PAGESHARDENED && dest->isShared())
1322  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1323 
1324  // Fill range in dest with value from src.
1325  NotVoidType *destpagedata = dest->getFirstPtr() + (desttuplesize*destoff);
1326  NotVoidType *destpageend = destpagedata + (desttuplesize*ntuples);
1327 
1328  const exint desttupleextra = desttuplesize-mintuplesize;
1329 
1330  fillNonConstWithConst(destpagedata, destpageend, stuple, mintuplesize, desttupleextra);
1331 
1332  return;
1333  }
1334 
1335  if (!PAGESHARDENED && dest->isConstant())
1336  hardenConstantPage(dest, destpagecapacity, desttuplesize);
1337  else if (!PAGESHARDENED && dest->isShared())
1338  hardenSharedPage(dest, destpagecapacity, desttuplesize);
1339 
1340  // Remember that the ranges could overlap if same page
1341  // NOTE: Since dest was hardened if shared, dest can only equal src if same table.
1342  if (SYSisSame<DATA_T,SRC_DATA_T>() && dest->getFirstPtrVoidUnsafe() == src->getFirstPtrVoid() && (srcoff < destoff+UT_PageOff(exint(ntuples)) && destoff < srcoff+UT_PageOff(exint(ntuples))))
1343  {
1344  // Overlapping, so be careful!
1345 
1346  UT_ASSERT_P(desttuplesize == srctuplesize);
1347 
1348  // Nothing to do if exactly same range.
1349  // This could happen even if caller checked the global offsets,
1350  // and even if they're separate arrays, because the same page
1351  // can appear in multiple locations.
1352  if (srcoff == destoff)
1353  return;
1354 
1355  NotVoidType *destpagedata = dest->getFirstPtr();
1356  destpagedata += desttuplesize*destoff;
1357  NotVoidType *destend = destpagedata + desttuplesize*ntuples;
1358 
1359  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1360  srcpagedata += srctuplesize*srcoff;
1361 
1362  // If moving to earlier addresses, can copy in forward loop
1363  if (destoff < srcoff)
1364  {
1365  do
1366  {
1367  *destpagedata = *srcpagedata;
1368  ++srcpagedata;
1369  ++destpagedata;
1370  } while (destpagedata != destend);
1371  }
1372  // If moving to later addresses, must copy in backward loop
1373  else
1374  {
1375  const SrcNotVoidType *srcend = srcpagedata + srctuplesize*ntuples;
1376  do
1377  {
1378  --srcend;
1379  --destend;
1380  *destend = *srcend;
1381  } while (destpagedata != destend);
1382  }
1383  }
1384  else
1385  {
1386  // The two ranges don't overlap, so just copy
1387  NotVoidType *destpagedata = dest->getFirstPtr();
1388  destpagedata += desttuplesize*destoff;
1389 
1390  const SrcNotVoidType *srcpagedata = src->getFirstPtr();
1391  srcpagedata += srctuplesize*srcoff;
1392 
1393  copyNonConst(destpagedata, srcpagedata, desttuplesize, srctuplesize, UT_PageOff(ntuples));
1394  }
1395 }
1396 
1397 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1398 template<typename SrcType>
1399 void
1401  PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
1402 {
1403  typedef typename SrcType::DataType SRC_DATA_T;
1404  //typedef UT_PageArray<SRC_DATA_T,SRC_TSIZE,SRC_TABLEHARDENED,SRC_PAGESHARDENED,THEPAGEBITS,IDX_T> SrcType;
1405  typedef typename SrcType::PageTableEntry SrcPageTableEntry;
1406  typedef typename SrcType::NotVoidType SrcNotVoidType;
1407 
1408  const exint SRC_TSIZE = SrcType::theTupleSize;
1409  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1410  UT_ASSERT_P(!(SYSisSame<SRC_DATA_T,void>()));
1411  UT_ASSERT_P(src->isConstant());
1413  UT_ASSERT_P(!PAGESHARDENED);
1414  UT_ASSERT_P(thePageBits == SrcType::thePageBits);
1415  UT_ASSERT_P((SYSisSame<IndexType, typename SrcType::IndexType>()));
1416  UT_ASSERT_P(desttuplesize > 0 && srctuplesize > 0);
1417  UT_ASSERT_P((TSIZE == -1 || TSIZE == desttuplesize) && (SRC_TSIZE == -1 || SRC_TSIZE == srctuplesize));
1418  UT_ASSERT_MSG_P(desttuplesize <= srctuplesize || dest->isConstant(), "The higher component values may not have been constant!");
1419 
1420  if (PAGESHARDENED)
1421  return;
1422 
1423  // Do nothing in very common case of copying zero to zero.
1424  // NOTE: It may seem like this could be generalized to
1425  // if (*src == *dest)
1426  // but, they could be different types or tuple sizes,
1427  // so, for example, one could be an inline constant page
1428  // that just happens to equal a pointer for a non-inline
1429  // constant page, or two inline tuples may be different
1430  // but produce equal pointers, e.g. (-2,-2) in int16
1431  // would match (-65538) in int32.
1432  const bool issrczero = src->isConstantAndZeroSafe();
1433  if (dest->isConstantAndZeroSafe() && issrczero)
1434  return;
1435 
1436  bool wasconst = dest->isConstant();
1437  if (!wasconst)
1438  dest->decRef();
1439 
1440  // Common case of exact match can just reference the same constant page
1441  if (((TSIZE == SRC_TSIZE && TSIZE != -1) || (desttuplesize == srctuplesize)) && SYSisSame<DATA_T,SRC_DATA_T>())
1442  {
1443  // Now that we know that the types and tuple sizes are the same,
1444  // we can just check the pointers to see if they're the same (constant) page.
1445  // The typecast on src is just so that it will compile when the types don't match.
1446  if (*dest == *(const PageTableEntry*)src)
1447  return;
1448 
1449  const bool typefitsinline = PageTableEntry::typeFitsInline(desttuplesize);
1450  if (!typefitsinline && wasconst && !dest->isConstantZero())
1451  {
1452  dest->decRef();
1453  }
1454 
1455  // Still need to cast to PageTableEntry*, because the compiler needs to
1456  // compile this line when the condition is false.
1457  *dest = *(const PageTableEntry*)src;
1458 
1459  if (!typefitsinline && !dest->isConstantZero())
1460  {
1461  dest->incRef();
1462  }
1463 
1464  return;
1465  }
1466 
1467  // Either the type doesn't match or the tuple size doesn't match.
1468 
1469  const SrcNotVoidType *const srctuple = SrcType::getConstantPtr(src, 0, srctuplesize);
1470 
1471  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1472 
1473  // Easy for inline case, checked at compile-time.
1474  if (PageTableEntry::typeFitsInline(desttuplesize))
1475  {
1476  // If dest was already constant, we don't need to write
1477  // theConstantPageBit, and we don't want to blow away any
1478  // components that are between srctuplesize and desttuplesize, in the
1479  // unlikely event that desttuplesize > srctuplesize.
1480  if (!wasconst)
1481  {
1482  // This sets the constant bit and makes sure that the
1483  // space between that bit and tuple component 0 is zeroed.
1484  dest->initZero();
1485 
1486  // Since initZero sets everything to zero, if src
1487  // is all zero, we can early exit.
1488  if (issrczero)
1489  return;
1490  }
1491  NotVoidType *tuple = dest->getInlinePtr(desttuplesize);
1492  if (issrczero)
1493  {
1494  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1495  }
1496  else
1497  {
1498  for (exint i = 0; i < mintuplesize; ++i)
1499  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1500  }
1501  return;
1502  }
1503 
1504  // In other cases, we may or may not have to unref constant page
1505  if (wasconst)
1506  {
1507  if (dest->isConstantZero())
1508  {
1509  // Fairly common case: already zero, making zero.
1510  if (issrczero)
1511  return;
1512  }
1513  else
1514  {
1515  if (desttuplesize <= srctuplesize && issrczero)
1516  {
1517  // No longer need this old constant page
1518  dest->decRef();
1519  }
1520  else if (dest->isShared())
1521  {
1522  NotVoidType *tuple = dest->getMaskedPtr();
1523  bool equal = true;
1524  if (desttuplesize > srctuplesize && issrczero)
1525  {
1526  equal = isZero(tuple, mintuplesize);
1527  }
1528  else
1529  {
1530  for (exint i = 0; i < mintuplesize; ++i)
1531  {
1532  if (tuple[i] != UTconvertStorage<NotVoidType>(srctuple[i]))
1533  {
1534  equal = false;
1535  break;
1536  }
1537  }
1538  }
1539 
1540  if (equal)
1541  {
1542  // Already equal; nothing to do
1543  return;
1544  }
1545 
1546  // Need to allocate new constant page before ditching the old one
1547  // if desttuplesize is larger, because some elements need to be kept.
1548  if (desttuplesize > srctuplesize)
1549  {
1550  // Need to save the pointers so that we can decRef below
1551  // after calling alloc.
1552  PageTableEntry orig(*dest);
1553 
1554  dest->alloc(UT_PageOff(1), desttuplesize);
1555  NotVoidType *newtuple = dest->getFirstPtr();
1556  // Copy lower components from src
1557  if (issrczero)
1558  {
1559  memset(newtuple, 0, srctuplesize*sizeof(NotVoidType));
1560  }
1561  else
1562  {
1563  for (exint i = 0; i < srctuplesize; ++i)
1564  newtuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1565  }
1566  // Copy higher components from dest
1567  for (exint i = srctuplesize; i < desttuplesize; ++i)
1568  newtuple[i] = tuple[i];
1569 
1570  orig.decRef();
1571  dest->setConstantBit();
1572  return;
1573  }
1574 
1575  // No longer need this old constant page
1576  dest->decRef();
1577  }
1578  else
1579  {
1580  // Reuse the unshared constant page
1581  NotVoidType *tuple = dest->getMaskedPtr();
1582  if (issrczero)
1583  {
1584  memset(tuple, 0, mintuplesize*sizeof(NotVoidType));
1585  }
1586  else
1587  {
1588  for (exint i = 0; i < mintuplesize; ++i)
1589  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1590  }
1591  return;
1592  }
1593  }
1594  }
1595 
1596  if (desttuplesize <= srctuplesize && issrczero)
1597  {
1598  dest->initZero();
1599  return;
1600  }
1601 
1602  // Need to allocate new constant page
1603  dest->alloc(UT_PageOff(1), desttuplesize);
1604  NotVoidType *tuple = dest->getFirstPtr();
1605  if (issrczero)
1606  {
1607  memset(tuple, 0, desttuplesize*sizeof(NotVoidType));
1608  }
1609  else
1610  {
1611  for (exint i = 0; i < mintuplesize; ++i)
1612  tuple[i] = UTconvertStorage<NotVoidType>(srctuple[i]);
1613 
1614  if (desttuplesize > srctuplesize)
1615  {
1616  // dest was already zero when here, or !wasconst, so zero out the extra components not copied from src.
1617  memset(tuple+srctuplesize, 0, (desttuplesize-srctuplesize)*sizeof(NotVoidType));
1618  }
1619  }
1620  dest->setConstantBit();
1621 }
1622 
1623 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1624 template<typename SrcNotVoidType>
1625 void
1627  NotVoidType *__restrict destpagedata,
1628  NotVoidType *destpageend,
1629  const SrcNotVoidType *__restrict stuple,
1630  exint mintuplesize,
1631  exint desttupleextra)
1632 {
1633  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1634 
1635  // Fill range in dest with value from stuple.
1636  if (stuple)
1637  {
1638  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1639  {
1640  do
1641  {
1642  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1643  *destpagedata = UTconvertStorage<NotVoidType>(stuple[i]);
1644  destpagedata += desttupleextra;
1645  } while(destpagedata < destpageend);
1646  }
1647  else
1648  {
1649  do
1650  {
1651  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1652  *destpagedata = stuple[i];
1653  destpagedata += desttupleextra;
1654  } while(destpagedata < destpageend);
1655  }
1656  }
1657  else
1658  {
1659  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1660  {
1661  do
1662  {
1663  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1664  *destpagedata = NotVoidType();
1665  destpagedata += desttupleextra;
1666  } while(destpagedata < destpageend);
1667  }
1668  else
1669  {
1670  do
1671  {
1672  if (SYSisPOD<NotVoidType>())
1673  {
1674  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1675  ::memset(destpagedata, 0, sizeof(NotVoidType));
1676  }
1677  else
1678  {
1680  for (exint i = 0; i < mintuplesize; ++i, ++destpagedata)
1681  *destpagedata = v;
1682  }
1683  destpagedata += desttupleextra;
1684  } while(destpagedata < destpageend);
1685  }
1686  }
1687 }
1688 
1689 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1690 template<typename SrcNotVoidType>
1691 void
1693  NotVoidType *__restrict destpagedata,
1694  const SrcNotVoidType *__restrict srcpagedata,
1695  const exint desttuplesize,
1696  const exint srctuplesize,
1697  UT_PageOff ntuples)
1698 {
1699  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1700 
1701  NotVoidType *destpageend = destpagedata + ntuples*desttuplesize;
1702 
1703  if (desttuplesize == srctuplesize)
1704  {
1705  // Copy values from srcpagedata to destpagedata.
1706  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1707  {
1708  do
1709  {
1710  *destpagedata = *srcpagedata;
1711  ++srcpagedata;
1712  ++destpagedata;
1713  } while(destpagedata < destpageend);
1714  }
1715  else
1716  {
1717  do
1718  {
1719  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1720  ++srcpagedata;
1721  ++destpagedata;
1722  } while(destpagedata < destpageend);
1723  }
1724  }
1725  else
1726  {
1727  const exint mintuplesize = SYSmin(desttuplesize, srctuplesize);
1728  const exint srctupleextra = srctuplesize - mintuplesize;
1729  const exint desttupleextra = desttuplesize - mintuplesize;
1730 
1731  // Copy values from srcpagedata to destpagedata.
1732  if (SYSisSame<NotVoidType,SrcNotVoidType>())
1733  {
1734  do
1735  {
1736  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1737  *destpagedata = *srcpagedata;
1738  destpagedata += desttupleextra;
1739  srcpagedata += srctupleextra;
1740  } while(destpagedata < destpageend);
1741  }
1742  else
1743  {
1744  do
1745  {
1746  for (exint i = 0; i < mintuplesize; ++i, ++srcpagedata, ++destpagedata)
1747  *destpagedata = UTconvertStorage<NotVoidType>(*srcpagedata);
1748  destpagedata += desttupleextra;
1749  srcpagedata += srctupleextra;
1750  } while(destpagedata < destpageend);
1751  }
1752  }
1753 }
1754 
1755 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1756 template<typename SrcNotVoidType>
1757 bool
1759 {
1760  UT_ASSERT_P(!(SYSisSame<DATA_T,void>()));
1761 
1762  // If they're the same page, or they're both NULL,
1763  // there's nothing to do.
1764  if ((const void*)stuple == (void*)tuple)
1765  return true;
1766 
1767  if (!stuple || !tuple)
1768  return false;
1769 
1770  UT_ASSERT_P(mintuplesize > 0);
1771 
1772  bool isequal;
1773  if (!SYSisSame<NotVoidType,SrcNotVoidType>())
1774  {
1775  // Cast to the destination type, since it's
1776  // supposed to represent whether the destination
1777  // wouldn't change if assigned.
1778  isequal = (tuple[0] == UTconvertStorage<NotVoidType>(stuple[0]));
1779  for (exint i = 1; i < mintuplesize; ++i)
1780  isequal &= (tuple[i] == UTconvertStorage<NotVoidType>(stuple[i]));
1781  }
1782  else
1783  {
1784  // NOTE: Don't want to copy-construct non-POD types
1785  // unnecessarily by casting to NotVoidType.
1786  isequal = (tuple[0] == stuple[0]);
1787  for (exint i = 1; i < mintuplesize; ++i)
1788  isequal &= (tuple[i] == stuple[i]);
1789  }
1790  // If they're equal, nothing to do
1791  return isequal;
1792 }
1793 
1794 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1795 void
1797 {
1799  UT_ASSERT_P(start < end);
1800  UT_ASSERT_P(start >= IDX_T(0));
1801  UT_ASSERT_P(end <= myCapacity);
1802  UT_ASSERT_P(TSIZE >= 1);
1803  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
1804 
1805  UT_PageNum startpage = pageNum(start);
1806  UT_PageOff startoff = pageOff(start);
1807  UT_PageNum endpage = pageNum(end);
1808  UT_PageOff endoff = pageOff(end);
1809 
1810  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1811  if (valiszero)
1812  {
1813  valiszero = isZero(val);
1814  }
1815 
1816  UT_PageOff pagecapacity(thePageSize);
1817  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1818  pagecapacity = capacity();
1819 
1820  // Handle incomplete first page
1821  if (startoff != UT_PageOff(0))
1822  {
1823  PageTableEntry *page = getPPage(startpage);
1824  bool equal = false;
1825  if (!PAGESHARDENED)
1826  {
1827  if (page->isConstant())
1828  {
1829  const NotVoidType *tuple = getConstantPtr(page);
1830  // Nothing to do if equal already.
1831  if (tuple)
1832  {
1833  equal = true;
1834  for (exint i = 0; i < TSIZE; ++i)
1835  equal &= (tuple[i] == val);
1836  }
1837  else
1838  {
1839  equal = valiszero;
1840  }
1841  if (!equal)
1842  hardenConstantPage(page, pagecapacity);
1843  }
1844  else if (page->isShared())
1845  hardenSharedPage(page, pagecapacity);
1846  }
1847  if (!equal)
1848  {
1849  UT_ASSERT_P(!page->isConstant());
1850  UT_ASSERT_P(!page->isShared());
1851  NotVoidType *data = page->getFirstPtr();
1852  NotVoidType *end = data + TSIZE*((endpage != startpage) ? pagecapacity : endoff);
1853  data += TSIZE*startoff;
1854  for (; data != end; ++data)
1855  *data = val;
1856  }
1857  if (endpage == startpage)
1858  return;
1859 
1860  ++startpage;
1861  }
1862 
1863  // Handle complete middle pages
1864  for (; startpage < endpage; ++startpage)
1865  {
1866  PageTableEntry *page = getPPage(startpage);
1867  // FIXME: Need a makeConstant that takes a single value for non-POD types
1868  if (!PAGESHARDENED)
1869  makeConstant(page, val);
1870  else
1871  {
1872  NotVoidType *data = page->getFirstPtr();
1873  // NOTE: This isn't a small page, so we can use thePageSize
1874  NotVoidType *end = data + TSIZE*thePageSize;
1875  for (; data != end; ++data)
1876  *data = val;
1877  }
1878  }
1879 
1880  // Handle incomplete last page
1881  if (endoff != UT_PageOff(0))
1882  {
1883  PageTableEntry *page = getPPage(startpage);
1884  // If end page, and goes to end, can still make constant.
1885  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
1886  {
1887  makeConstant(page, val);
1888  return;
1889  }
1890  bool equal = false;
1891  if (!PAGESHARDENED)
1892  {
1893  if (page->isConstant())
1894  {
1895  const NotVoidType *tuple = getConstantPtr(page);
1896  // Nothing to do if equal already.
1897  if (tuple)
1898  {
1899  equal = true;
1900  for (exint i = 0; i < TSIZE; ++i)
1901  equal &= (tuple[i] == val);
1902  }
1903  else
1904  {
1905  equal = valiszero;
1906  }
1907  if (!equal)
1908  hardenConstantPage(page, pagecapacity);
1909  }
1910  else if (page->isShared())
1911  hardenSharedPage(page, pagecapacity);
1912  }
1913  if (!equal)
1914  {
1915  UT_ASSERT_P(!page->isConstant());
1916  UT_ASSERT_P(!page->isShared());
1917  NotVoidType *data = page->getFirstPtr();
1918  NotVoidType *end = data + TSIZE*endoff;
1919  for (; data != end; ++data)
1920  *data = val;
1921  }
1922  }
1923 }
1924 
1925 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
1926 void
1928 {
1930  UT_ASSERT_P(start < end);
1931  UT_ASSERT_P(start >= IDX_T(0));
1932  UT_ASSERT_P(end <= myCapacity);
1933  UT_ASSERT_P(TSIZE == -1);
1934  UT_ASSERT_P(tuplesize >= 1);
1935  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
1936 
1937  // Fast paths for small sizes.
1938  if (tuplesize <= 4)
1939  {
1940  if (tuplesize == 1)
1941  {
1943  this)->fill(start, end, val);
1944  }
1945  else if (tuplesize == 2)
1946  {
1948  this)->fill(start, end, val);
1949  }
1950  else if (tuplesize == 3)
1951  {
1953  this)->fill(start, end, val);
1954  }
1955  else //if (tuplesize == 4)
1956  {
1958  this)->fill(start, end, val);
1959  }
1960  return;
1961  }
1962 
1963  UT_PageNum startpage = pageNum(start);
1964  UT_PageOff startoff = pageOff(start);
1965  UT_PageNum endpage = pageNum(end);
1966  UT_PageOff endoff = pageOff(end);
1967 
1968  bool valiszero = !PAGESHARDENED && (startoff != UT_PageOff(0) || endoff != UT_PageOff(0));
1969  if (valiszero)
1970  {
1971  valiszero = isZero(val);
1972  }
1973 
1974  UT_PageOff pagecapacity(thePageSize);
1975  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
1976  pagecapacity = capacity();
1977 
1978  // Handle incomplete first page
1979  if (startoff != UT_PageOff(0))
1980  {
1981  PageTableEntry *page = getPPage(startpage);
1982  bool equal = false;
1983  if (!PAGESHARDENED)
1984  {
1985  if (page->isConstant())
1986  {
1987  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
1988  // Nothing to do if equal already.
1989  if (tuple)
1990  {
1991  equal = true;
1992  for (exint i = 0; i < tuplesize; ++i)
1993  equal &= (tuple[i] == val);
1994  }
1995  else
1996  {
1997  equal = valiszero;
1998  }
1999  if (!equal)
2000  hardenConstantPage(page, pagecapacity, tuplesize);
2001  }
2002  else if (page->isShared())
2003  hardenSharedPage(page, pagecapacity, tuplesize);
2004  }
2005  if (!equal)
2006  {
2007  UT_ASSERT_P(!page->isConstant());
2008  UT_ASSERT_P(!page->isShared());
2009  NotVoidType *data = page->getFirstPtr();
2010  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
2011  data += tuplesize*startoff;
2012  for (; data != end; ++data)
2013  *data = val;
2014  }
2015  if (endpage == startpage)
2016  return;
2017  ++startpage;
2018  }
2019 
2020  // Handle complete middle pages
2021  for (; startpage < endpage; ++startpage)
2022  {
2023  PageTableEntry *page = getPPage(startpage);
2024  if (!PAGESHARDENED)
2025  makeConstant(page, val, tuplesize);
2026  else
2027  {
2028  NotVoidType *data = page->getFirstPtr();
2029  // NOTE: This isn't a small page, so we can use thePageSize
2030  NotVoidType *end = data + tuplesize*thePageSize;
2031  for (; data != end; ++data)
2032  *data = val;
2033  }
2034  }
2035 
2036  // Handle incomplete last page
2037  if (endoff != UT_PageOff(0))
2038  {
2039  PageTableEntry *page = getPPage(startpage);
2040  // If end page, and goes to end, can still make constant.
2041  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2042  {
2043  makeConstant(page, val, tuplesize);
2044  return;
2045  }
2046  bool equal = false;
2047  if (!PAGESHARDENED)
2048  {
2049  if (page->isConstant())
2050  {
2051  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2052  // Nothing to do if equal already.
2053  if (tuple)
2054  {
2055  equal = true;
2056  for (exint i = 0; i < tuplesize; ++i)
2057  equal &= (tuple[i] == val);
2058  }
2059  else
2060  {
2061  equal = valiszero;
2062  }
2063  if (!equal)
2064  hardenConstantPage(page, pagecapacity, tuplesize);
2065  }
2066  else if (page->isShared())
2067  hardenSharedPage(page, pagecapacity, tuplesize);
2068  }
2069  if (!equal)
2070  {
2071  UT_ASSERT_P(!page->isConstant());
2072  UT_ASSERT_P(!page->isShared());
2073  NotVoidType *data = page->getFirstPtr();
2074  NotVoidType *end = data + tuplesize*endoff;
2075  for (; data != end; ++data)
2076  *data = val;
2077  }
2078  }
2079 }
2080 
2081 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2082 void
2084 {
2086  UT_ASSERT_P(start < end);
2087  UT_ASSERT_P(start >= IDX_T(0));
2088  UT_ASSERT_P(end <= myCapacity);
2089  UT_ASSERT_P(TSIZE == -1 || TSIZE==tuplesize);
2090  UT_ASSERT_P(tuplesize >= 1);
2091  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
2092 
2093  UT_PageNum startpage = pageNum(start);
2094  UT_PageOff startoff = pageOff(start);
2095  UT_PageNum endpage = pageNum(end);
2096  UT_PageOff endoff = pageOff(end);
2097 
2098  UT_PageOff pagecapacity(thePageSize);
2099  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2100  pagecapacity = capacity();
2101 
2102  // Handle incomplete first page
2103  if (startoff != UT_PageOff(0))
2104  {
2105  PageTableEntry *page = getPPage(startpage);
2106  bool equal = false;
2107  if (!PAGESHARDENED)
2108  {
2109  if (page->isConstant())
2110  {
2111  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2112  // Nothing to do if equal already.
2113  equal = true;
2114  if (tuple)
2115  {
2116  for (exint i = 0; i < tuplesize; ++i)
2117  equal &= (tuple[i] == values[i]);
2118  }
2119  else
2120  {
2121  for (exint i = 0; i < tuplesize; ++i)
2122  equal &= (NotVoidType(0) == values[i]);
2123  }
2124  if (!equal)
2125  hardenConstantPage(page, pagecapacity, tuplesize);
2126  }
2127  else if (page->isShared())
2128  hardenSharedPage(page, pagecapacity, tuplesize);
2129  }
2130  if (!equal)
2131  {
2132  UT_ASSERT_P(!page->isConstant());
2133  UT_ASSERT_P(!page->isShared());
2134  NotVoidType *data = page->getFirstPtr();
2135  NotVoidType *end = data + tuplesize*((endpage != startpage) ? pagecapacity : endoff);
2136  data += tuplesize*startoff;
2137  while (data != end)
2138  {
2139  for (exint i = 0; i < tuplesize; ++i, ++data)
2140  *data = values[i];
2141  }
2142  }
2143  if (endpage == startpage)
2144  return;
2145  ++startpage;
2146  }
2147 
2148  // Handle complete middle pages
2149  for (; startpage < endpage; ++startpage)
2150  {
2151  PageTableEntry *page = getPPage(startpage);
2152  if (!PAGESHARDENED)
2153  makeConstant(page, values, tuplesize);
2154  else
2155  {
2156  NotVoidType *data = page->getFirstPtr();
2157  // NOTE: This isn't a small page, so we can use thePageSize
2158  NotVoidType *end = data + tuplesize*thePageSize;
2159  while (data != end)
2160  {
2161  for (exint i = 0; i < tuplesize; ++i, ++data)
2162  *data = values[i];
2163  }
2164  }
2165  }
2166 
2167  // Handle incomplete last page
2168  if (endoff != UT_PageOff(0))
2169  {
2170  PageTableEntry *page = getPPage(startpage);
2171  // If end page, and goes to end, can still make constant.
2172  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2173  {
2174  makeConstant(page, values, tuplesize);
2175  return;
2176  }
2177  bool equal = false;
2178  if (!PAGESHARDENED)
2179  {
2180  if (page->isConstant())
2181  {
2182  const NotVoidType *tuple = getConstantPtr(page, 0, tuplesize);
2183  // Nothing to do if equal already.
2184  equal = true;
2185  if (tuple)
2186  {
2187  for (exint i = 0; i < tuplesize; ++i)
2188  equal &= (tuple[i] == values[i]);
2189  }
2190  else
2191  {
2192  for (exint i = 0; i < tuplesize; ++i)
2193  equal &= (NotVoidType(0) == values[i]);
2194  }
2195  if (!equal)
2196  hardenConstantPage(page, pagecapacity, tuplesize);
2197  }
2198  else if (page->isShared())
2199  hardenSharedPage(page, pagecapacity, tuplesize);
2200  }
2201  if (!equal)
2202  {
2203  UT_ASSERT_P(!page->isConstant());
2204  UT_ASSERT_P(!page->isShared());
2205  NotVoidType *data = page->getFirstPtr();
2206  NotVoidType *end = data + tuplesize*endoff;
2207  while (data != end)
2208  {
2209  for (exint i = 0; i < tuplesize; ++i, ++data)
2210  *data = values[i];
2211  }
2212  }
2213  }
2214 }
2215 
2216 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2217 void
2219 {
2221  UT_ASSERT_P(start < end);
2222  UT_ASSERT_P(start >= IDX_T(0));
2223  UT_ASSERT_P(end <= myCapacity);
2224  UT_ASSERT_P(TSIZE >= 1);
2225  UT_ASSERT_MSG_P(myRefCount.relaxedLoad() == 1, "The table must already be hardened before we modify it!");
2226 
2227  UT_PageNum startpage = pageNum(start);
2228  UT_PageOff startoff = pageOff(start);
2229  UT_PageNum endpage = pageNum(end);
2230  UT_PageOff endoff = pageOff(end);
2231 
2232  UT_PageOff pagecapacity(thePageSize);
2233  if (startpage == UT_PageNum(0) && endpage == UT_PageNum(0) && exint(capacity()) < thePageSize)
2234  pagecapacity = capacity();
2235 
2236  // Handle incomplete first page
2237  if (startoff != UT_PageOff(0))
2238  {
2239  PageTableEntry *page = getPPage(startpage);
2240  bool equal = false;
2241  if (!PAGESHARDENED)
2242  {
2243  if (page->isConstant())
2244  {
2246  // Nothing to do if equal already.
2247  equal = tuple ? (*tuple == val) : isZero(val);
2248  if (!equal)
2249  hardenConstantPage(page, pagecapacity);
2250  }
2251  else if (page->isShared())
2252  hardenSharedPage(page, pagecapacity);
2253  }
2254  if (!equal)
2255  {
2256  UT_ASSERT_P(!page->isConstant());
2257  UT_ASSERT_P(!page->isShared());
2259  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + ((endpage != startpage) ? pagecapacity : endoff);
2260  data += startoff;
2261  for (; data != end; ++data)
2262  *data = val;
2263  }
2264  if (endpage == startpage)
2265  return;
2266 
2267  ++startpage;
2268  }
2269 
2270  // Handle complete middle pages
2271  for (; startpage < endpage; ++startpage)
2272  {
2273  PageTableEntry *page = getPPage(startpage);
2274  if (!PAGESHARDENED)
2275  makeConstant(page, val);
2276  else
2277  {
2279  // NOTE: This isn't a small page, so we can use thePageSize
2281  for (; data != end; ++data)
2282  *data = val;
2283  }
2284  }
2285 
2286  // Handle incomplete last page
2287  if (endoff != UT_PageOff(0))
2288  {
2289  PageTableEntry *page = getPPage(startpage);
2290  // If end page, and goes to end, can still make constant.
2291  if (!PAGESHARDENED && startpage >= numPages(mySize)-1 && endoff >= pageOff(mySize-1)+1)
2292  {
2293  makeConstant(page, val);
2294  return;
2295  }
2296  bool equal = false;
2297  if (!PAGESHARDENED)
2298  {
2299  if (page->isConstant())
2300  {
2302  // Nothing to do if equal already.
2303  equal = tuple ? (*tuple == val) : isZero(val);
2304  if (!equal)
2305  hardenConstantPage(page, pagecapacity);
2306  }
2307  else if (page->isShared())
2308  hardenSharedPage(page, pagecapacity);
2309  }
2310  if (!equal)
2311  {
2312  UT_ASSERT_P(!page->isConstant());
2313  UT_ASSERT_P(!page->isShared());
2315  UT_FixedVector<NotVoidType,theSafeTupleSize> *end = data + endoff;
2316  for (; data != end; ++data)
2317  *data = val;
2318  }
2319  }
2320 }
2321 
2322 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2323 template<typename DEST_DATA_T,exint DEST_TSIZE>
2324 void
2326 {
2327  // If the source storage type is not known at compile time,
2328  // switch, cast, and call again.
2329  if (SYSisSame<DATA_T,void>())
2330  {
2332 
2333  // Probably matches destination type
2335  {
2336  castType<DEST_DATA_T>().getVectorRange(srcstart, nelements, dest);
2337  return;
2338  }
2339 
2340  switch (storage)
2341  {
2342  case UT_Storage::INT8:
2343  castType<int8>().getVectorRange(srcstart, nelements, dest); return;
2344  case UT_Storage::UINT8:
2345  castType<uint8>().getVectorRange(srcstart, nelements, dest); return;
2346  case UT_Storage::INT16:
2347  castType<int16>().getVectorRange(srcstart, nelements, dest); return;
2348  case UT_Storage::INT32:
2349  castType<int32>().getVectorRange(srcstart, nelements, dest); return;
2350  case UT_Storage::INT64:
2351  castType<int64>().getVectorRange(srcstart, nelements, dest); return;
2352  case UT_Storage::REAL16:
2353  castType<fpreal16>().getVectorRange(srcstart, nelements, dest); return;
2354  case UT_Storage::REAL32:
2355  castType<fpreal32>().getVectorRange(srcstart, nelements, dest); return;
2356  case UT_Storage::REAL64:
2357  castType<fpreal64>().getVectorRange(srcstart, nelements, dest); return;
2358  case UT_Storage::INVALID:
2359  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2360  break;
2361  }
2362  return;
2363  }
2364 
2365  // We now have both the source type and the destination type known at compile time.
2366  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2367 
2368  // Tuple size probably matches
2369  if (TSIZE == -1 && myImpl.getTupleSize() == DEST_TSIZE)
2370  {
2371  castTupleSize<DEST_TSIZE>().getVectorRange(srcstart, nelements, dest);
2372  return;
2373  }
2374 
2375  auto vdest = reinterpret_cast<UT_FixedVector<DEST_DATA_T,DEST_TSIZE> *>(dest);
2376 
2377  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2378  for (IDX_T srcend(srcstart+nelements); srcstart < srcend; ++srcstart, ++vdest)
2379  *vdest = getVector<DEST_DATA_T,DEST_TSIZE>(srcstart);
2380 }
2381 
2382 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2383 template<typename SRC_DATA_T,exint SRC_TSIZE>
2384 void
2386 {
2387  if (nelements <= IDX_T(0))
2388  return;
2389 
2390  auto &hard = hardenTable();
2391 
2392  UT_ASSERT_MSG_P((!SYSisSame<SRC_DATA_T,void>()), "Source type must be known.");
2393 
2394  // If the destination storage type is not known at compile time,
2395  // switch, cast, and call again.
2396  if (SYSisSame<DATA_T,void>())
2397  {
2399 
2400  // Probably matches source type
2402  {
2403  hard.template castType<SRC_DATA_T>().setVectorRange(deststart, nelements, src);
2404  return;
2405  }
2406 
2407  switch (storage)
2408  {
2409  case UT_Storage::INT8:
2410  hard.template castType<int8>().setVectorRange(deststart, nelements, src); return;
2411  case UT_Storage::UINT8:
2412  hard.template castType<uint8>().setVectorRange(deststart, nelements, src); return;
2413  case UT_Storage::INT16:
2414  hard.template castType<int16>().setVectorRange(deststart, nelements, src); return;
2415  case UT_Storage::INT32:
2416  hard.template castType<int32>().setVectorRange(deststart, nelements, src); return;
2417  case UT_Storage::INT64:
2418  hard.template castType<int64>().setVectorRange(deststart, nelements, src); return;
2419  case UT_Storage::REAL16:
2420  hard.template castType<fpreal16>().setVectorRange(deststart, nelements, src); return;
2421  case UT_Storage::REAL32:
2422  hard.template castType<fpreal32>().setVectorRange(deststart, nelements, src); return;
2423  case UT_Storage::REAL64:
2424  hard.template castType<fpreal64>().setVectorRange(deststart, nelements, src); return;
2425  case UT_Storage::INVALID:
2426  UT_ASSERT_MSG(0, "Can't have a UT_PageArray with DATA_T void and invalid storage!");
2427  break;
2428  }
2429  return;
2430  }
2431 
2432  // We now have both the source type and the destination type known at compile time.
2433  UT_ASSERT_P((!SYSisSame<DATA_T,void>()));
2434 
2435  // Tuple size probably matches
2436  if (TSIZE == -1 && myImpl.getTupleSize() == SRC_TSIZE)
2437  {
2438  hard.template castTupleSize<SRC_TSIZE>().setVectorRange(deststart, nelements, src);
2439  return;
2440  }
2441 
2442  // TODO: Implement this more efficiently, e.g. only check once whether each page is constant or shared.
2443  for (IDX_T destend(deststart+nelements); deststart < destend; ++deststart, ++src)
2444  setVector(deststart, *src);
2445 }
2446 
2447 template<typename DATA_T,exint TSIZE,bool TABLEHARDENED,bool PAGESHARDENED,exint THEPAGEBITS,typename IDX_T>
2448 bool
2450 {
2451  if (!UTisFloatStorage(getStorage()))
2452  return false;
2453 
2454  // If the storage type is not known at compile time,
2455  // switch, cast, and call again.
2456  if (SYSisSame<DATA_T,void>())
2457  {
2459  switch (storage)
2460  {
2461  case UT_Storage::REAL16:
2462  return castType<fpreal16>().hasNanInRange(start, end);
2463  case UT_Storage::REAL32:
2464  return castType<fpreal32>().hasNanInRange(start, end);
2465  case UT_Storage::REAL64:
2466  return castType<fpreal64>().hasNanInRange(start, end);
2467  default:
2468  UT_ASSERT_MSG(0, "Only 16-bit, 32-bit, and 64-bit floats should be considered float types!");
2469  break;
2470  }
2471  return false;
2472  }
2473 
2474  UT_ASSERT_P(start >= IDX_T(0) && start <= size());
2475  UT_ASSERT_P(end >= IDX_T(0) && end <= size());
2476  UT_ASSERT_P(start <= end);
2477 
2478  if (start >= end)
2479  return false;
2480 
2481  const PageTable *pages = myImpl.getPages();
2482  UT_ASSERT_P(pages);
2483 
2484  UT_PageNum pagenum = pageNum(start);
2485  UT_PageOff pageoff = pageOff(start);
2486  UT_PageNum endpagenum = pageNum(end);
2487  UT_PageOff endpageoff = pageOff(end);
2488 
2489  exint tuplesize = getTupleSize();
2490 
2491  if (endpageoff == UT_PageOff(0))
2492  {
2493  --endpagenum;
2494  endpageoff = UT_PageOff(thePageSize);
2495  }
2496 
2497  for (; pagenum <= endpagenum; ++pagenum)
2498  {
2499  const PageTableEntry *const page = pages->getPPage(pagenum);
2500  if (page->isConstant())
2501  {
2502  const NotVoidType *data = getConstantPtr(page, 0, tuplesize);
2503  // Special case for zero page is always a number.
2504  if (!data)
2505  continue;
2506  for (exint i = 0; i < tuplesize; ++i)
2507  {
2508  if (SYSisNan(data[i]))
2509  return true;
2510  }
2511  }
2512  else
2513  {
2514  const NotVoidType *data = page->getFirstPtr();
2515  const NotVoidType *end = data + ((pagenum == endpagenum) ? endpageoff : thePageSize*tuplesize);
2516  data += pageoff;
2517  for (; data != end; ++data)
2518  {
2519  if (SYSisNan(*data))
2520  return true;
2521  }
2522  }
2523  pageoff = UT_PageOff(0);
2524  }
2525 
2526  return false;
2527 }
2528 
2529 
2530 #endif
static void copyNonConst(NotVoidType *destpagedata, const SrcNotVoidType *srcpagedata, exint desttuplesize, exint srctuplesize, UT_PageOff ntuples)
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glcorearb.h:2540
static SYS_FORCE_INLINE const NotVoidType * getConstantPtr(const PageTableEntry *page, exint component=0, exint tuplesize=TSIZE)
SYS_FORCE_INLINE void setSize(IDX_T newsize)
SYS_FORCE_INLINE void * getFirstPtrVoid()
Returns the data pointer, if not a constant page.
UT_Storage
Definition: UT_Storage.h:28
NotVoid< DATA_T >::type NotVoidType
Definition: UT_PageArray.h:239
void UTswap(T &a, T &b)
Definition: UT_Swap.h:35
void setTupleSize(exint newtuplesize, const UT_Defaults &v)
getFileOption("OpenEXR:storage") storage
Definition: HDK_Image.dox:276
GLboolean * data
Definition: glcorearb.h:131
const GLdouble * v
Definition: glcorearb.h:837
GLuint start
Definition: glcorearb.h:475
int64 getI(exint i=0) const
Definition: UT_Defaults.h:268
SYS_FORCE_INLINE IDX_T size() const
Definition: UT_PageArray.h:899
SYS_FORCE_INLINE T * SYSconst_cast(const T *foo)
Definition: SYS_Types.h:136
SYS_FORCE_INLINE bool isShared() const
void countMemory(UT_MemoryCounter &counter, bool inclusive) const
int64 exint
Definition: SYS_Types.h:125
constexpr bool SYSisNan(const F f)
Definition: SYS_Math.h:184
SYS_FORCE_INLINE PageTableEntry * getFirstPage()
void setConstant(IDX_T start, IDX_T end, NotVoidType v)
exint UT_PageNum
Definition: UT_PageArray.h:35
SYS_FORCE_INLINE const PageTableEntry * getPPage(UT_PageNum i) const
IMATH_HOSTDEVICE constexpr bool equal(T1 a, T2 b, T3 t) IMATH_NOEXCEPT
Definition: ImathFun.h:105
SYS_FORCE_INLINE exint getTupleSize() const
virtual bool countShared(size_t size, exint refcount, const void *p)
#define UT_ASSERT_MSG_P(ZZ,...)
Definition: UT_Assert.h:158
#define UT_MEMORY_DEBUG_LOG_SHARED(m, s, p, r)
SYS_FORCE_INLINE constexpr bool UTisFloatStorage(UT_Storage storage)
Returns true iff the given storage type represents a floating-point number.
Definition: UT_Storage.h:50
SYS_FORCE_INLINE IDX_T size() const
NOTE: This is the size of the full array, not the number of pages.
#define UT_IF_ASSERT_P(ZZ)
Definition: UT_Assert.h:182
SYS_FORCE_INLINE UT_Storage getStorage() const
SYS_FORCE_INLINE exint getRefCount() const
bool hasNanInRange(IDX_T start, IDX_T end) const
void setVectorRange(IDX_T deststart, IDX_T nelements, const UT_FixedVector< SRC_DATA_T, SRC_TSIZE > *src)
SYS_FORCE_INLINE bool isShared() const
void fill(IDX_T start, IDX_T end, const NotVoidType &val)
static bool isEqualConst(const NotVoidType *tuple, const SrcNotVoidType *stuple, exint mintuplesize)
SYS_FORCE_INLINE void setConstantBit()
#define UT_ASSERT_MSG(ZZ,...)
Definition: UT_Assert.h:159
SYS_FORCE_INLINE exint getRefCount() const
SYS_FORCE_INLINE bool isConstantZero() const
This is only valid to call if the type doesn't fit inline.
GLdouble n
Definition: glcorearb.h:2008
static SYS_FORCE_INLINE UT_PageOff pageOff(IDX_T i)
static SYS_FORCE_INLINE UT_PageNum numPages(IDX_T nelements)
SYS_FORCE_INLINE bool isRefd(exint tuplebytes) const
#define UT_ASSERT_P(ZZ)
Definition: UT_Assert.h:155
SYS_FORCE_INLINE IDX_T capacity() const
NOTE: This is the capacity of the full array, not the capacity of pages.
static SYS_FORCE_INLINE bool isZero(const T &val)
SYS_FORCE_INLINE IDX_T capacity() const
Definition: UT_PageArray.h:907
GLuint GLuint end
Definition: glcorearb.h:475
bool mustCountUnshared() const
#define UT_MEMORY_DEBUG_LOG(m, s)
SYS_FORCE_INLINE NotVoidType * getInlinePtr(exint tuplesize)
Returns the data pointer, if an inline constant page.
void setCapacity(IDX_T newcapacity)
Definition: UT_PageArray.h:935
long long int64
Definition: SYS_Types.h:116
SYS_FORCE_INLINE void * getMaskedPtrVoid()
static void copyPartialPage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destoff, UT_PageOff srcoff, UT_PageOff ntuples, UT_PageOff destpagecapacity)
SYS_FORCE_INLINE bool isConstantAndZeroSafe() const
exint UT_PageOff
Definition: UT_PageArray.h:36
SYS_FORCE_INLINE void incRef()
static void makeConstantFrom(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize)
SYS_FORCE_INLINE T relaxedLoad() const
void moveRange(IDX_T srcstart, IDX_T deststart, IDX_T nelements)
void countUnshared(size_t size)
GLsizeiptr size
Definition: glcorearb.h:664
SYS_FORCE_INLINE int64 getMemoryUsage(exint tuplebytes) const
SYS_AtomicInt< int32 > SYS_AtomicCounter
void setSize(IDX_T newsize)
Definition: UT_PageArray.h:959
fpreal64 getF(exint i=0) const
Definition: UT_Defaults.h:244
GLenum GLsizei GLsizei GLint * values
Definition: glcorearb.h:1602
void swapRange(IDX_T astart, IDX_T bstart, IDX_T nelements)
SYS_FORCE_INLINE void * getFirstPtrVoidUnsafe()
void setStorage(const UT_Storage newstorage)
static void fillNonConstWithConst(NotVoidType *destpagedata, NotVoidType *destpageend, const SrcNotVoidType *stuple, exint mintuplesize, exint desttupleextra)
SYS_FORCE_INLINE NotVoidType * getMaskedPtr()
SYS_FORCE_INLINE bool isConstant() const
This is always valid to call.
static void hardenSharedPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
bool mustCountShared() const
GLuint GLfloat * val
Definition: glcorearb.h:1608
static SYS_FORCE_INLINE void makeConstant(PageTableEntry *page, const UT_FixedVector< NotVoidType, theSafeTupleSize > &val)
SYS_FORCE_INLINE UT_PageArray< DATA_T, TSIZE, true, PAGESHARDENED, THEPAGEBITS, IDX_T > & hardenTable()
static const exint thePageSize
Definition: UT_PageArray.h:251
SYS_FORCE_INLINE void alloc(UT_PageOff nelements, exint tuplesize=TSIZE)
static void hardenConstantPage(PageTableEntry *page, UT_PageOff pagecapacity, exint tuplesize=TSIZE)
int64 getMemoryUsage(bool inclusive) const
static SYS_FORCE_INLINE UT_PageNum pageNum(IDX_T i)
SYS_FORCE_INLINE void setVector(IDX_T i, const TS &as)
Definition: UT_PageArray.h:731
void getVectorRange(IDX_T srcstart, IDX_T nelements, UT_FixedVector< DEST_DATA_T, DEST_TSIZE > *dest) const
SYS_FORCE_INLINE constexpr bool UTisIntStorage(UT_Storage storage)
Returns true iff the given storage type represents an integer.
Definition: UT_Storage.h:43
#define SYSmin(a, b)
Definition: SYS_Math.h:1571
SYS_FORCE_INLINE constexpr int UTstorageSize(UT_Storage storage)
Returns the number of bytes in the given storage type.
Definition: UT_Storage.h:57
SYS_FORCE_INLINE void initZero()
SYS_FORCE_INLINE exint getTupleSize() const
Definition: UT_Defaults.h:239
SYS_FORCE_INLINE NotVoidType * getFirstPtr()
Returns the data pointer, if not a constant page.
bool isZero(const Type &x)
Return true if x is exactly equal to zero.
Definition: Math.h:337
Definition: format.h:895
SYS_FORCE_INLINE void decRef()
static void replacePage(PageTableEntry *dest, const typename SrcType::PageTableEntry *src, exint desttuplesize, exint srctuplesize, UT_PageOff destpagesize, UT_PageOff destpagecapacity)
GLenum src
Definition: glcorearb.h:1793