All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
instantiatePool.h
1 //
2 // Copyright 2019 Pixar
3 //
4 // Licensed under the Apache License, Version 2.0 (the "Apache License")
5 // with the following modification; you may not use this file except in
6 // compliance with the Apache License and the following modification to it:
7 // Section 6. Trademarks. is deleted and replaced with:
8 //
9 // 6. Trademarks. This License does not grant permission to use the trade
10 // names, trademarks, service marks, or product names of the Licensor
11 // and its affiliates, except as required to comply with Section 4(c) of
12 // the License and to reproduce the content of the NOTICE file.
13 //
14 // You may obtain a copy of the Apache License at
15 //
16 // http://www.apache.org/licenses/LICENSE-2.0
17 //
18 // Unless required by applicable law or agreed to in writing, software
19 // distributed under the Apache License with the above modification is
20 // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 // KIND, either express or implied. See the Apache License for the specific
22 // language governing permissions and limitations under the Apache License.
23 //
24 /*
25  * This header is not meant to be included in a .h file.
26  * Complain if we see this header twice through.
27  */
28 
29 #ifdef SDF_INSTANTIATEPOOL_H
30 #error This file should only be included once in any given source (.cpp) file.
31 #endif
32 
33 #define SDF_INSTANTIATEPOOL_H
34 
35 #include "pxr/pxr.h"
36 #include "pxr/usd/sdf/pool.h"
37 
38 PXR_NAMESPACE_OPEN_SCOPE
39 
40 // Helper to reserve a region of virtual address space.
41 SDF_API char *
42 Sdf_PoolReserveRegion(size_t numBytes);
43 
44 // Helper to commit and make read/writable a range of bytes from
45 // Sdf_PoolReserveRegion.
46 SDF_API bool
47 Sdf_PoolCommitRange(char *start, char *end);
48 
49 template <class Tag,
50  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
51 typename Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_ThreadData
52 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_threadData;
53 
54 template <class Tag,
55  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
56 char *
57 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_regionStarts[NumRegions+1];
58 
59 template <class Tag,
60  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
61 std::atomic<typename Sdf_Pool<
62  Tag, ElemSize, RegionBits, ElemsPerSpan>::_RegionState>
63 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_regionState;
64 
65 template <class Tag,
66  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
67 TfStaticData<tbb::concurrent_queue<
68  typename Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_FreeList>>
69 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_sharedFreeLists;
70 
71 
72 template <class Tag,
73  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
74 typename Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_RegionState
75 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_RegionState::
76 Reserve(unsigned num) const
77 {
78  // Make a new state. If reserving \p num leaves no free elements, then
79  // return the LockedState, since a new region will need to be allocated.
80  uint32_t index = GetIndex();
81  unsigned region = GetRegion();
82  uint32_t avail = MaxIndex - index + 1;
83  _RegionState ret;
84  if (ARCH_UNLIKELY(avail <= num)) {
85  ret._state = LockedState;
86  }
87  else {
88  ret = _RegionState(region, index + num);
89  }
90  return ret;
91 }
92 
93 template <class Tag,
94  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
95 typename Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::Handle
96 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::Allocate()
97 {
98  _PerThreadData &threadData = _threadData.Get();
99 
100  // Check local free-list, or try to take a shared one.
101  Handle alloc = threadData.freeList.head;
102  if (alloc) {
103  threadData.freeList.Pop();
104  }
105  else if (!threadData.span.empty()) {
106  // Allocate new from local span.
107  alloc = threadData.span.Alloc();
108  }
109  else if (_TakeSharedFreeList(threadData.freeList)) {
110  // Nothing local. Try to take a shared free list.
111  alloc = threadData.freeList.head;
112  threadData.freeList.Pop();
113  }
114  else {
115  // No shared free list -- reserve a new span and allocate from it.
116  _ReserveSpan(threadData.span);
117  alloc = threadData.span.Alloc();
118  }
119  return alloc;
120 }
121 
122 template <class Tag,
123  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
124 void
125 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::Free(Handle h)
126 {
127  _PerThreadData &threadData = _threadData.Get();
128 
129  // Add to local free list.
130  threadData.freeList.Push(h);
131 
132  // If our free list is big and we have >=25% span space, share the free list
133  // for use by other threads.
134  if (threadData.freeList.size >= ElemsPerSpan &&
135  threadData.span.size() >= ElemsPerSpan/4) {
136  _ShareFreeList(threadData.freeList);
137  }
138 }
139 
140 template <class Tag,
141  unsigned ElemSize, unsigned RegionBits, unsigned ElemsPerSpan>
142 void
143 Sdf_Pool<Tag, ElemSize, RegionBits, ElemsPerSpan>::_ReserveSpan(_PoolSpan &out)
144 {
145  // Read current state. The state will either be locked, or will have
146  // some remaining space available.
147  _RegionState state = _regionState.load(std::memory_order_relaxed);
148  _RegionState newState;
149 
150  // If we read the "init" state, which is region=0, index=0, then try to
151  // move to the locked state. If we take it, then do the initialization
152  // and unlock. If we don't take it, then someone else has done it or is
153  // doing it, so we just continue.
154  if (state == _RegionState::GetInitState()) {
155  // Try to lock.
156  newState = _RegionState::GetLockedState();
157  if (_regionState.compare_exchange_strong(state, newState)) {
158  // We took the lock to initialize. Create the first region and
159  // unlock. Indexes start at 1 to avoid hash collisions when
160  // multiple pool indexes are combined in a single hash.
161  _regionStarts[1] =
162  Sdf_PoolReserveRegion(ElemsPerRegion * ElemSize);
163  _regionState = state = _RegionState(1, 1);
164  }
165  }
166 
167  while (true) {
168  // If we're locked, just wait and retry.
169  if (ARCH_UNLIKELY(state.IsLocked())) {
170  std::this_thread::yield();
171  state = _regionState.load(std::memory_order_relaxed);
172  continue;
173  }
174 
175  // Try to take space for the span. If this would consume all
176  // remaining space, try to lock and allocate the next span.
177  newState = state.Reserve(ElemsPerSpan);
178 
179  if (_regionState.compare_exchange_weak(state, newState)) {
180  // We allocated our span.
181  break;
182  }
183  }
184 
185  // Now newState is either a normal region & index, or is locked for
186  // allocation. If locked, then allocate a new region and update
187  // _regionState.
188  if (newState.IsLocked()) {
189  // Allocate the next region, or die if out of regions...
190  unsigned newRegion = state.GetRegion() + 1;
191  if (ARCH_UNLIKELY(newRegion > NumRegions)) {
192  TF_FATAL_ERROR("Out of memory in '%s'.",
193  ArchGetDemangled<Sdf_Pool>().c_str());
194  }
195  _regionStarts[newRegion] =
196  Sdf_PoolReserveRegion(ElemsPerRegion * ElemSize);
197  // Set the new state accordingly, and unlock. Indexes start at 1 to
198  // avoid hash collisions when multiple pool indexes are combined in
199  // a single hash.
200  newState = _RegionState(newRegion, 1);
201  _regionState.store(newState);
202  }
203 
204  // Now our span space is indicated by state & newState. Update the
205  // \p out span and ensure the span space is committed (think
206  // mprotect(PROT_READ | PROT_WRITE) on posixes.
207  out.region = state.GetRegion();
208  out.beginIndex = state.GetIndex();
209  out.endIndex = newState.GetRegion() == out.region ?
210  newState.GetIndex() : MaxIndex;
211 
212  // Ensure the new span is committed & read/writable.
213  char *startAddr = _GetPtr(out.region, out.beginIndex);
214  char *endAddr = _GetPtr(out.region, out.endIndex);
215  Sdf_PoolCommitRange(startAddr, endAddr);
216 }
217 
218 // Source file definition of an Sdf_Pool instantiation.
219 #define SDF_INSTANTIATE_POOL(Tag, ElemSize, RegionBits) \
220  template class PXR_NS_GLOBAL::Sdf_Pool<Tag, ElemSize, RegionBits>
221 
222 
223 PXR_NAMESPACE_CLOSE_SCOPE
Create or return a previously created object instance of global data.
Definition: staticData.h:113
#define TF_FATAL_ERROR(fmt, args)
Issue a fatal error and end the program.
Definition: diagnostic.h:111