1 mike 1.1 /*
2 **==============================================================================
3 **
4 ** Open Management Infrastructure (OMI)
5 **
6 ** Copyright (c) Microsoft Corporation
7 **
8 ** Licensed under the Apache License, Version 2.0 (the "License"); you may not
9 ** use this file except in compliance with the License. You may obtain a copy
10 ** of the License at
11 **
12 ** http://www.apache.org/licenses/LICENSE-2.0
13 **
14 ** THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 ** KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
16 ** WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
17 ** MERCHANTABLITY OR NON-INFRINGEMENT.
18 **
19 ** See the Apache 2 License for the specific language governing permissions
20 ** and limitations under the License.
21 **
22 mike 1.1 **==============================================================================
23 */
24
25 #include <assert.h>
26 #include "batch.h"
27 #include "strings.h"
28
29 #define BATCH_PAGE_SIZE ((size_t)(BLOCK_SIZE * 2))
30 #define BLOCK_SIZE ((size_t)512)
31
32 void* Batch_Get(
33 Batch* self,
34 size_t size)
35 {
36 size_t size8;
37
38 /* Round request size to a multiple of eight */
39 size8 = (size + 7) & ~7;
40
41 /* Attempt to obtain the block from the available storage */
42 if (size8 <= BLOCK_SIZE && size8 <= (size_t)(self->end - self->avail))
43 mike 1.1 {
44 char* ptr = self->avail;
45 self->avail += size8;
46 return ptr;
47 }
48
49 /* Allocate a new page and carve new block from that page */
50 if (size8 <= BLOCK_SIZE && self->numPages < self->maxPages)
51 {
52 Page* page = malloc(BATCH_PAGE_SIZE);
53 char* ptr;
54
55 if (!page)
56 return NULL;
57
58 memset(page, 0, sizeof(page->u.s));
59 page->u.s.independent = 0;
60 page->u.s.size = BATCH_PAGE_SIZE - sizeof(Page);
61 #ifdef CONFIG_ENABLE_DEBUG
62 memset(page + 1,0xAA, page->u.s.size);
63 #endif
64 mike 1.1
65 /* Link new page onto list */
66 page->u.s.next = self->pages;
67 self->pages = page;
68
69 /* Calculate pointer (skip over Page.next field) */
70 ptr = (char*)(page + 1);
71
72 /* Set the available memory pointers */
73 self->avail = ptr + size8;
74 self->end = (char*)page + BATCH_PAGE_SIZE;
75
76 /* Update number of pages */
77 self->numPages++;
78
79 /* Return pointer to memory */
80 return ptr;
81 }
82
83 /* Chunk is larger than the block size or maximum batch size has been
84 * exceeded (so obtain it from the heap and place it on the individual
85 mike 1.1 * blocks list)
86 */
87 {
88 Page* page = malloc(sizeof(Page) + size8);
89
90 if (!page)
91 return NULL;
92
93 /* Set the size of the page */
94 memset(page, 0, sizeof(page->u.s));
95 page->u.s.independent = 1;
96 page->u.s.size = (unsigned int)size8;
97 #ifdef CONFIG_ENABLE_DEBUG
98 memset(page + 1,0xAA, page->u.s.size);
99 #endif
100
101 page->u.s.next = self->pages;
102 self->pages = page;
103
104 /* Return pointer to memory */
105 return page + 1;
106 mike 1.1 }
107 }
108
109 void Batch_Destroy(
110 Batch* self)
111 {
112 Page* p;
113 void* selfPtr = NULL;
114
115 /* Free batch pages */
116 for (p = self->pages; p; )
117 {
118 Page* next = p->u.s.next;
119
120 /* If memory object contains this batch, delete it last */
121 if ((char*)self>=(char*)(p + 1) && (char*)self<(char*)p + p->u.s.size)
122 selfPtr = p;
123 else
124 free(p);
125
126 p = next;
127 mike 1.1 }
128
129 if (selfPtr)
130 free(selfPtr);
131 }
132
133 void Batch_InitFromBuffer(
134 Batch* self,
135 void* data,
136 size_t size,
137 size_t maxPages)
138 {
139 self->avail = data;
140 self->end = (char*)data + size;
141 self->pages = NULL;
142 self->numPages = 0;
143 self->maxPages = maxPages;
144 }
145
146 Batch* Batch_New(
147 size_t maxPages)
148 mike 1.1 {
149 Batch batch;
150 Batch* self;
151
152 /* Construct batch on the stack. Next allocate space for batch within
153 * the batch itself. Finally, copy the batch from the stack onto the
154 * batch itself. Now the batch contains itself. This avoids an extra
155 * allocation.
156 */
157
158 Batch_Init(&batch, maxPages);
159 self = (Batch*)Batch_Get(&batch, sizeof(Batch));
160
161 if (!self)
162 {
163 Batch_Destroy(&batch);
164 return NULL;
165 }
166
167 return memcpy(self, &batch, sizeof(Batch));
168 }
169 mike 1.1
170 void Batch_Delete(
171 Batch* self)
172 {
173 /* Destroying the batch frees the batch object on heap (since batch
174 * contains itself.
175 */
176 Batch_Destroy(self);
177 }
178
179 void Batch_Put(
180 Batch* self,
181 void* ptr)
182 {
183 Page* p;
184 Page* page;
185 Page* prev = NULL;
186 #if defined (CONFIG_ENABLE_DEBUG)
187 int found = 0;
188 #endif
189
190 mike 1.1 if (!ptr)
191 return;
192
193 /* Get pointer to start of page */
194 page = (Page*)ptr - 1;
195
196 /* Free independent pages */
197 for (p = self->pages; p; p = p->u.s.next)
198 {
199 if (p == page)
200 {
201 /* Skip if page is not independent */
202 if (!page->u.s.independent)
203 return;
204
205 /* Remove from linked list */
206 if (prev)
207 prev->u.s.next = p->u.s.next;
208 else
209 self->pages = p->u.s.next;
210
211 mike 1.1 free(p);
212 return;
213 }
214 #if defined (CONFIG_ENABLE_DEBUG)
215 else
216 {
217 void* start = (char*)p + sizeof(Page);
218 void* end = (char*)start + p->u.s.size;
219
220 if (ptr >= start && ptr < end)
221 found = 1;
222 }
223 #endif
224
225
226 prev = p;
227 }
228
229 /* ATTN: put debug assert here */
230
231 #if defined (CONFIG_ENABLE_DEBUG)
232 mike 1.1 if (!found)
233 DEBUG_ASSERT(0);
234 #endif
235 }
236
237 MI_Char* Batch_Zdup(
238 Batch* self,
239 const MI_Char* str)
240 {
241 MI_Char* p;
242 size_t size;
243
244 size = (Zlen(str) + 1) * sizeof(MI_Char);
245
246 p = Batch_Get(self, size);
247
248 if (!p)
249 return NULL;
250
251 return memcpy(p, str, size);
252 }
253 mike 1.1
254 char* Batch_Strdup(
255 Batch* self,
256 const char* str)
257 {
258 char* p;
259 size_t size;
260
261 size = (Strlen(str) + 1);
262
263 p = Batch_Get(self, size);
264
265 if (!p)
266 return NULL;
267
268 return memcpy(p, str, size);
269 }
270
271 MI_Char* Batch_Strdup2(
272 Batch* self,
273 const char* s)
274 mike 1.1 {
275 size_t size = strlen(s) + 1;
276 MI_Char* p;
277 size_t i;
278
279 p = Batch_Get(self, size * sizeof(MI_Char));
280
281 if (!p)
282 return NULL;
283
284 for (i = 0; i < size; i++)
285 p[i] = (MI_Char)s[i];
286
287 return p;
288 }
289
290 size_t Batch_GetPageCount(
291 Batch* self)
292 {
293 size_t res = 0;
294 Page* p;
295 mike 1.1
296 for (p = self->pages; p; p = p->u.s.next)
297 {
298 res++;
299 }
300
301 return res;
302 }
303
304 size_t Batch_GetPageInfo(
305 Batch* self,
306 Header_BatchInfoItem* buffer)
307 {
308 size_t res = 0;
309 Page* p;
310
311 for (p = self->pages; p; p = p->u.s.next)
312 {
313 buffer->pagePointer = p+1;
314
315 /* for current page we can send only part of it */
316 mike 1.1 if (self->end == ((char*)p + BATCH_PAGE_SIZE))
317 buffer->pageSize = self->avail - (char*)(p + 1);
318 else
319 buffer->pageSize = p->u.s.size;
320
321 buffer++;
322 res++;
323 }
324
325 return res;
326 }
327
328 void* Batch_GetPageByIndex(
329 Batch* self,
330 size_t index)
331 {
332 Page* p;
333
334 for (p = self->pages; p && index != 0; p = p->u.s.next)
335 {
336 index--;
337 mike 1.1 }
338
339 return p ? p + 1 : 0;
340 }
341
342 MI_Boolean Batch_CreateBatchByPageInfo(
343 Batch** self,
344 const Header_BatchInfoItem* buffer,
345 size_t pages)
346 {
347 Page* page;
348
349 /*if (!(*self))
350 {
351 *self = Batch_New(BATCH_MAX_PAGES);
352
353 if (! (*self))
354 return MI_FALSE;
355 }*/
356
357 while (pages)
358 mike 1.1 {
359 pages--;
360
361 /* put batch inside first page */
362 if (!(*self))
363 {
364 size_t size = sizeof(Page) + buffer[pages].pageSize;
365 size_t size8;
366
367 /* Round request size to a multiple of eight */
368 size8 = (size + 7) & ~7;
369
370 page = malloc(size8 + sizeof(Batch));
371
372 if (!page)
373 return MI_FALSE;
374
375 *self = (Batch*)(((char*)page) + size8);
376 Batch_Init(*self, BATCH_MAX_PAGES);
377 }
378 else
379 mike 1.1 page = malloc(sizeof(Page) + buffer[pages].pageSize);
380
381 if (!page)
382 {
383 Batch_Destroy((*self));
384 (*self) = 0;
385 return MI_FALSE;
386 }
387
388 memset(page, 0, sizeof(page->u.s));
389 page->u.s.independent = 0;
390 page->u.s.size = (unsigned int)buffer[pages].pageSize;
391 #ifdef CONFIG_ENABLE_DEBUG
392 memset(page + 1,0xAA,page->u.s.size);
393 #endif
394
395 /* Link new page onto list */
396 page->u.s.next = (*self)->pages;
397 (*self)->pages = page;
398 }
399
400 mike 1.1 return MI_TRUE;
401 }
402
403 MI_Boolean Batch_FixPointer(
404 Batch* self,
405 const Header_BatchInfoItem* ptrAdjustmentInfo,
406 size_t ptrAdjustmentInfoCount,
407 void** ptrInOut)
408 {
409 /* see if pointer matches old blocks */
410 size_t index;
411 char* old_ptr = *ptrInOut;
412 Page* p;
413
414 /* since we may have now more pages than initially, skip new pages */
415 p = Batch_GetPageByIndex(self,Batch_GetPageCount(self) - ptrAdjustmentInfoCount);
416
417 if (!p)
418 return MI_FALSE;
419 p--;
420
421 mike 1.1 for (index = 0; p && index < ptrAdjustmentInfoCount; p = p->u.s.next, index++)
422 {
423 char* old_page = ptrAdjustmentInfo[index].pagePointer;
424
425 if ( old_ptr >= old_page &&
426 old_ptr < (old_page + ptrAdjustmentInfo[index].pageSize) )
427 {
428 *ptrInOut = ((char*)(p + 1)) + (old_ptr - old_page);
429 return MI_TRUE;
430 }
431 }
432
433 return MI_FALSE;
434 }
|