Print this page
XXXX update sendmail to 8.14.9
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/cmd/sendmail/libsm/rpool.c
+++ new/usr/src/cmd/sendmail/libsm/rpool.c
1 1 /*
2 - * Copyright (c) 2000-2004 Sendmail, Inc. and its suppliers.
2 + * Copyright (c) 2000-2004 Proofpoint, Inc. and its suppliers.
3 3 * All rights reserved.
4 4 *
5 5 * By using this file, you agree to the terms and conditions set
6 6 * forth in the LICENSE file which can be found at the top level of
7 7 * the sendmail distribution.
8 8 */
9 9
10 -#pragma ident "%Z%%M% %I% %E% SMI"
11 -
12 10 #include <sm/gen.h>
13 -SM_RCSID("@(#)$Id: rpool.c,v 1.28 2004/08/03 20:44:04 ca Exp $")
11 +SM_RCSID("@(#)$Id: rpool.c,v 1.29 2013-11-22 20:51:43 ca Exp $")
14 12
15 13 /*
16 14 ** resource pools
17 15 ** For documentation, see rpool.html
18 16 */
19 17
20 18 #include <sm/exc.h>
21 19 #include <sm/heap.h>
22 20 #include <sm/rpool.h>
23 21 #include <sm/varargs.h>
24 22 #include <sm/conf.h>
25 23 #if _FFR_PERF_RPOOL
26 24 # include <syslog.h>
27 25 #endif /* _FFR_PERF_RPOOL */
28 26
29 27 const char SmRpoolMagic[] = "sm_rpool";
30 28
31 29 typedef union
32 30 {
33 31 SM_POOLLINK_T link;
34 32 char align[SM_ALIGN_SIZE];
35 33 } SM_POOLHDR_T;
36 34
37 35 static char *sm_rpool_allocblock_x __P((SM_RPOOL_T *, size_t));
38 36 static char *sm_rpool_allocblock __P((SM_RPOOL_T *, size_t));
39 37
40 38 /*
41 39 ** Tune this later
42 40 */
43 41
44 42 #define POOLSIZE 4096
45 43 #define BIG_OBJECT_RATIO 10
46 44
47 45 /*
48 46 ** SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
49 47 **
50 48 ** Parameters:
51 49 ** rpool -- rpool to which the block should be added.
52 50 ** size -- size of block.
53 51 **
54 52 ** Returns:
55 53 ** Pointer to block.
56 54 **
57 55 ** Exceptions:
58 56 ** F:sm_heap -- out of memory
59 57 */
60 58
61 59 static char *
62 60 sm_rpool_allocblock_x(rpool, size)
63 61 SM_RPOOL_T *rpool;
64 62 size_t size;
65 63 {
66 64 SM_POOLLINK_T *p;
67 65
68 66 p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size);
69 67 p->sm_pnext = rpool->sm_pools;
70 68 rpool->sm_pools = p;
71 69 return (char*) p + sizeof(SM_POOLHDR_T);
72 70 }
73 71
74 72 /*
75 73 ** SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
76 74 **
77 75 ** Parameters:
78 76 ** rpool -- rpool to which the block should be added.
79 77 ** size -- size of block.
80 78 **
81 79 ** Returns:
82 80 ** Pointer to block, NULL on failure.
83 81 */
84 82
85 83 static char *
86 84 sm_rpool_allocblock(rpool, size)
87 85 SM_RPOOL_T *rpool;
88 86 size_t size;
89 87 {
90 88 SM_POOLLINK_T *p;
91 89
92 90 p = sm_malloc(sizeof(SM_POOLHDR_T) + size);
93 91 if (p == NULL)
94 92 return NULL;
95 93 p->sm_pnext = rpool->sm_pools;
96 94 rpool->sm_pools = p;
97 95 return (char*) p + sizeof(SM_POOLHDR_T);
98 96 }
99 97
100 98 /*
101 99 ** SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
102 100 **
103 101 ** Parameters:
104 102 ** rpool -- rpool from which memory should be allocated;
105 103 ** can be NULL, use sm_malloc() then.
106 104 ** size -- size of block.
107 105 ** file -- filename.
108 106 ** line -- line number in file.
109 107 ** group -- heap group for debugging.
110 108 **
111 109 ** Returns:
112 110 ** Pointer to block.
113 111 **
114 112 ** Exceptions:
115 113 ** F:sm_heap -- out of memory
116 114 **
117 115 ** Notice: XXX
118 116 ** if size == 0 and the rpool is new (no memory
119 117 ** allocated yet) NULL is returned!
120 118 ** We could solve this by
121 119 ** - wasting 1 byte (size < avail)
122 120 ** - checking for rpool->sm_poolptr != NULL
123 121 ** - not asking for 0 sized buffer
124 122 */
125 123
126 124 void *
127 125 #if SM_HEAP_CHECK
128 126 sm_rpool_malloc_tagged_x(rpool, size, file, line, group)
129 127 SM_RPOOL_T *rpool;
130 128 size_t size;
131 129 char *file;
132 130 int line;
133 131 int group;
134 132 #else /* SM_HEAP_CHECK */
135 133 sm_rpool_malloc_x(rpool, size)
136 134 SM_RPOOL_T *rpool;
137 135 size_t size;
138 136 #endif /* SM_HEAP_CHECK */
139 137 {
140 138 char *ptr;
141 139
142 140 if (rpool == NULL)
143 141 return sm_malloc_tagged_x(size, file, line, group);
144 142
145 143 /* Ensure that size is properly aligned. */
146 144 if (size & SM_ALIGN_BITS)
147 145 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
148 146
149 147 /* The common case. This is optimized for speed. */
150 148 if (size <= rpool->sm_poolavail)
151 149 {
152 150 ptr = rpool->sm_poolptr;
153 151 rpool->sm_poolptr += size;
154 152 rpool->sm_poolavail -= size;
155 153 return ptr;
156 154 }
157 155
158 156 /*
159 157 ** The slow case: we need to call malloc.
160 158 ** The SM_REQUIRE assertion is deferred until now, for speed.
161 159 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
162 160 ** so the common case code won't be triggered on a dangling pointer.
163 161 */
164 162
165 163 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
166 164
167 165 /*
168 166 ** If size > sm_poolsize, then malloc a new block especially for
169 167 ** this request. Future requests will be allocated from the
170 168 ** current pool.
171 169 **
172 170 ** What if the current pool is mostly unallocated, and the current
173 171 ** request is larger than the available space, but < sm_poolsize?
174 172 ** If we discard the current pool, and start allocating from a new
175 173 ** pool, then we will be wasting a lot of space. For this reason,
176 174 ** we malloc a block just for the current request if size >
177 175 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
178 176 ** Thus, the most space that we will waste at the end of a pool
179 177 ** is sm_bigobjectsize - 1.
180 178 */
181 179
182 180 if (size > rpool->sm_bigobjectsize)
183 181 {
184 182 #if _FFR_PERF_RPOOL
185 183 ++rpool->sm_nbigblocks;
186 184 #endif /* _FFR_PERF_RPOOL */
187 185 return sm_rpool_allocblock_x(rpool, size);
188 186 }
189 187 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
190 188 ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize);
191 189 rpool->sm_poolptr = ptr + size;
192 190 rpool->sm_poolavail = rpool->sm_poolsize - size;
193 191 #if _FFR_PERF_RPOOL
194 192 ++rpool->sm_npools;
195 193 #endif /* _FFR_PERF_RPOOL */
196 194 return ptr;
197 195 }
198 196
199 197 /*
200 198 ** SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
201 199 **
202 200 ** Parameters:
203 201 ** rpool -- rpool from which memory should be allocated;
204 202 ** can be NULL, use sm_malloc() then.
205 203 ** size -- size of block.
206 204 ** file -- filename.
207 205 ** line -- line number in file.
208 206 ** group -- heap group for debugging.
209 207 **
210 208 ** Returns:
211 209 ** Pointer to block, NULL on failure.
212 210 **
213 211 ** Notice: XXX
214 212 ** if size == 0 and the rpool is new (no memory
215 213 ** allocated yet) NULL is returned!
216 214 ** We could solve this by
217 215 ** - wasting 1 byte (size < avail)
218 216 ** - checking for rpool->sm_poolptr != NULL
219 217 ** - not asking for 0 sized buffer
220 218 */
221 219
222 220 void *
223 221 #if SM_HEAP_CHECK
224 222 sm_rpool_malloc_tagged(rpool, size, file, line, group)
225 223 SM_RPOOL_T *rpool;
226 224 size_t size;
227 225 char *file;
228 226 int line;
229 227 int group;
230 228 #else /* SM_HEAP_CHECK */
231 229 sm_rpool_malloc(rpool, size)
232 230 SM_RPOOL_T *rpool;
233 231 size_t size;
234 232 #endif /* SM_HEAP_CHECK */
235 233 {
236 234 char *ptr;
237 235
238 236 if (rpool == NULL)
239 237 return sm_malloc_tagged(size, file, line, group);
240 238
241 239 /* Ensure that size is properly aligned. */
242 240 if (size & SM_ALIGN_BITS)
243 241 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
244 242
245 243 /* The common case. This is optimized for speed. */
246 244 if (size <= rpool->sm_poolavail)
247 245 {
248 246 ptr = rpool->sm_poolptr;
249 247 rpool->sm_poolptr += size;
250 248 rpool->sm_poolavail -= size;
251 249 return ptr;
252 250 }
253 251
254 252 /*
255 253 ** The slow case: we need to call malloc.
256 254 ** The SM_REQUIRE assertion is deferred until now, for speed.
257 255 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
258 256 ** so the common case code won't be triggered on a dangling pointer.
259 257 */
260 258
261 259 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
262 260
263 261 /*
264 262 ** If size > sm_poolsize, then malloc a new block especially for
265 263 ** this request. Future requests will be allocated from the
266 264 ** current pool.
267 265 **
268 266 ** What if the current pool is mostly unallocated, and the current
269 267 ** request is larger than the available space, but < sm_poolsize?
270 268 ** If we discard the current pool, and start allocating from a new
271 269 ** pool, then we will be wasting a lot of space. For this reason,
272 270 ** we malloc a block just for the current request if size >
273 271 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
274 272 ** Thus, the most space that we will waste at the end of a pool
275 273 ** is sm_bigobjectsize - 1.
276 274 */
277 275
278 276 if (size > rpool->sm_bigobjectsize)
279 277 {
280 278 #if _FFR_PERF_RPOOL
281 279 ++rpool->sm_nbigblocks;
282 280 #endif /* _FFR_PERF_RPOOL */
283 281 return sm_rpool_allocblock(rpool, size);
284 282 }
285 283 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
286 284 ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize);
287 285 if (ptr == NULL)
288 286 return NULL;
289 287 rpool->sm_poolptr = ptr + size;
290 288 rpool->sm_poolavail = rpool->sm_poolsize - size;
291 289 #if _FFR_PERF_RPOOL
292 290 ++rpool->sm_npools;
293 291 #endif /* _FFR_PERF_RPOOL */
294 292 return ptr;
295 293 }
296 294
297 295 /*
298 296 ** SM_RPOOL_NEW_X -- create a new rpool.
299 297 **
300 298 ** Parameters:
301 299 ** parent -- pointer to parent rpool, can be NULL.
302 300 **
303 301 ** Returns:
304 302 ** Pointer to new rpool.
305 303 */
306 304
307 305 SM_RPOOL_T *
308 306 sm_rpool_new_x(parent)
309 307 SM_RPOOL_T *parent;
310 308 {
311 309 SM_RPOOL_T *rpool;
312 310
313 311 rpool = sm_malloc_x(sizeof(SM_RPOOL_T));
314 312 if (parent == NULL)
315 313 rpool->sm_parentlink = NULL;
316 314 else
317 315 {
318 316 SM_TRY
319 317 rpool->sm_parentlink = sm_rpool_attach_x(parent,
320 318 (SM_RPOOL_RFREE_T) sm_rpool_free,
321 319 (void *) rpool);
322 320 SM_EXCEPT(exc, "*")
323 321 sm_free(rpool);
324 322 sm_exc_raise_x(exc);
325 323 SM_END_TRY
326 324 }
327 325 rpool->sm_magic = SmRpoolMagic;
328 326
329 327 rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
330 328 rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO;
331 329 rpool->sm_poolptr = NULL;
332 330 rpool->sm_poolavail = 0;
333 331 rpool->sm_pools = NULL;
334 332
335 333 rpool->sm_rptr = NULL;
336 334 rpool->sm_ravail = 0;
337 335 rpool->sm_rlists = NULL;
338 336 #if _FFR_PERF_RPOOL
339 337 rpool->sm_nbigblocks = 0;
340 338 rpool->sm_npools = 0;
341 339 #endif /* _FFR_PERF_RPOOL */
342 340
343 341 return rpool;
344 342 }
345 343
346 344 /*
347 345 ** SM_RPOOL_SETSIZES -- set sizes for rpool.
348 346 **
349 347 ** Parameters:
350 348 ** poolsize -- size of a single rpool block.
351 349 ** bigobjectsize -- if this size is exceeded, an individual
352 350 ** block is allocated (must be less or equal poolsize).
353 351 **
354 352 ** Returns:
355 353 ** none.
356 354 */
357 355
358 356 void
359 357 sm_rpool_setsizes(rpool, poolsize, bigobjectsize)
360 358 SM_RPOOL_T *rpool;
361 359 size_t poolsize;
362 360 size_t bigobjectsize;
363 361 {
364 362 SM_REQUIRE(poolsize >= bigobjectsize);
365 363 if (poolsize == 0)
366 364 poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
367 365 if (bigobjectsize == 0)
368 366 bigobjectsize = poolsize / BIG_OBJECT_RATIO;
369 367 rpool->sm_poolsize = poolsize;
370 368 rpool->sm_bigobjectsize = bigobjectsize;
371 369 }
372 370
373 371 /*
374 372 ** SM_RPOOL_FREE -- free an rpool and release all of its resources.
375 373 **
376 374 ** Parameters:
377 375 ** rpool -- rpool to free.
378 376 **
379 377 ** Returns:
380 378 ** none.
381 379 */
382 380
383 381 void
384 382 sm_rpool_free(rpool)
385 383 SM_RPOOL_T *rpool;
386 384 {
387 385 SM_RLIST_T *rl, *rnext;
388 386 SM_RESOURCE_T *r, *rmax;
389 387 SM_POOLLINK_T *pp, *pnext;
390 388
391 389 if (rpool == NULL)
392 390 return;
393 391
394 392 /*
395 393 ** It's important to free the resources before the memory pools,
396 394 ** because the resource free functions might modify the contents
397 395 ** of the memory pools.
398 396 */
399 397
400 398 rl = rpool->sm_rlists;
401 399 if (rl != NULL)
402 400 {
403 401 rmax = rpool->sm_rptr;
404 402 for (;;)
405 403 {
406 404 for (r = rl->sm_rvec; r < rmax; ++r)
407 405 {
408 406 if (r->sm_rfree != NULL)
409 407 r->sm_rfree(r->sm_rcontext);
410 408 }
411 409 rnext = rl->sm_rnext;
412 410 sm_free(rl);
413 411 if (rnext == NULL)
414 412 break;
415 413 rl = rnext;
416 414 rmax = &rl->sm_rvec[SM_RLIST_MAX];
417 415 }
418 416 }
419 417
420 418 /*
421 419 ** Now free the memory pools.
422 420 */
423 421
424 422 for (pp = rpool->sm_pools; pp != NULL; pp = pnext)
425 423 {
426 424 pnext = pp->sm_pnext;
427 425 sm_free(pp);
428 426 }
429 427
430 428 /*
431 429 ** Disconnect rpool from its parent.
432 430 */
433 431
434 432 if (rpool->sm_parentlink != NULL)
435 433 *rpool->sm_parentlink = NULL;
436 434
437 435 /*
438 436 ** Setting these fields to zero means that any future to attempt
439 437 ** to use the rpool after it is freed will cause an assertion failure.
440 438 */
441 439
442 440 rpool->sm_magic = NULL;
443 441 rpool->sm_poolavail = 0;
444 442 rpool->sm_ravail = 0;
445 443
446 444 #if _FFR_PERF_RPOOL
447 445 if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1)
448 446 syslog(LOG_NOTICE,
449 447 "perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
450 448 (long) rpool, rpool->sm_nbigblocks, rpool->sm_npools);
451 449 rpool->sm_nbigblocks = 0;
452 450 rpool->sm_npools = 0;
453 451 #endif /* _FFR_PERF_RPOOL */
454 452 sm_free(rpool);
455 453 }
456 454
457 455 /*
458 456 ** SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
459 457 **
460 458 ** Parameters:
461 459 ** rpool -- rpool to which resource should be attached.
462 460 ** rfree -- function to call when rpool is freed.
463 461 ** rcontext -- argument for function to call when rpool is freed.
464 462 **
465 463 ** Returns:
466 464 ** Pointer to allocated function.
467 465 **
468 466 ** Exceptions:
469 467 ** F:sm_heap -- out of memory
470 468 */
471 469
472 470 SM_RPOOL_ATTACH_T
473 471 sm_rpool_attach_x(rpool, rfree, rcontext)
474 472 SM_RPOOL_T *rpool;
475 473 SM_RPOOL_RFREE_T rfree;
476 474 void *rcontext;
477 475 {
478 476 SM_RLIST_T *rl;
479 477 SM_RPOOL_ATTACH_T a;
480 478
481 479 SM_REQUIRE_ISA(rpool, SmRpoolMagic);
482 480
483 481 if (rpool->sm_ravail == 0)
484 482 {
485 483 rl = sm_malloc_x(sizeof(SM_RLIST_T));
486 484 rl->sm_rnext = rpool->sm_rlists;
487 485 rpool->sm_rlists = rl;
488 486 rpool->sm_rptr = rl->sm_rvec;
489 487 rpool->sm_ravail = SM_RLIST_MAX;
490 488 }
491 489
492 490 a = &rpool->sm_rptr->sm_rfree;
493 491 rpool->sm_rptr->sm_rfree = rfree;
494 492 rpool->sm_rptr->sm_rcontext = rcontext;
495 493 ++rpool->sm_rptr;
496 494 --rpool->sm_ravail;
497 495 return a;
498 496 }
499 497
500 498 #if DO_NOT_USE_STRCPY
501 499 /*
502 500 ** SM_RPOOL_STRDUP_X -- Create a copy of a C string
503 501 **
504 502 ** Parameters:
505 503 ** rpool -- rpool to use.
506 504 ** s -- the string to copy.
507 505 **
508 506 ** Returns:
509 507 ** pointer to newly allocated string.
510 508 */
511 509
512 510 char *
513 511 sm_rpool_strdup_x(rpool, s)
514 512 SM_RPOOL_T *rpool;
515 513 const char *s;
516 514 {
517 515 size_t l;
518 516 char *n;
519 517
520 518 l = strlen(s);
521 519 SM_ASSERT(l + 1 > l);
522 520 n = sm_rpool_malloc_x(rpool, l + 1);
523 521 sm_strlcpy(n, s, l + 1);
524 522 return n;
525 523 }
526 524 #endif /* DO_NOT_USE_STRCPY */
|
↓ open down ↓ |
503 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX