1 | /*
|
---|
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
---|
3 | * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
|
---|
4 | * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
|
---|
5 | * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
|
---|
6 | *
|
---|
7 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
---|
8 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
---|
9 | *
|
---|
10 | * Permission is hereby granted to use or copy this program
|
---|
11 | * for any purpose, provided the above notices are retained on all copies.
|
---|
12 | * Permission to modify the code and to distribute modified code is granted,
|
---|
13 | * provided the above notices are retained, and a notice that the code was
|
---|
14 | * modified is included with the above copyright notice.
|
---|
15 | */
|
---|
16 |
|
---|
17 | /*
|
---|
18 | * Note that this defines a large number of tuning hooks, which can
|
---|
19 | * safely be ignored in nearly all cases. For normal use it suffices
|
---|
20 | * to call only GC_MALLOC and perhaps GC_REALLOC.
|
---|
21 | * For better performance, also look at GC_MALLOC_ATOMIC, and
|
---|
22 | * GC_enable_incremental. If you need an action to be performed
|
---|
23 | * immediately before an object is collected, look at GC_register_finalizer.
|
---|
24 | * If you are using Solaris threads, look at the end of this file.
|
---|
25 | * Everything else is best ignored unless you encounter performance
|
---|
26 | * problems.
|
---|
27 | */
|
---|
28 |
|
---|
29 | #ifndef _GC_H
|
---|
30 |
|
---|
31 | # define _GC_H
|
---|
32 |
|
---|
33 | /*
|
---|
34 | * Some tests for old macros. These violate our namespace rules and will
|
---|
35 | * disappear shortly. Use the GC_ names.
|
---|
36 | */
|
---|
37 | #if defined(SOLARIS_THREADS) || defined(_SOLARIS_THREADS)
|
---|
38 | # define GC_SOLARIS_THREADS
|
---|
39 | #endif
|
---|
40 | #if defined(_SOLARIS_PTHREADS)
|
---|
41 | # define GC_SOLARIS_PTHREADS
|
---|
42 | #endif
|
---|
43 | #if defined(IRIX_THREADS)
|
---|
44 | # define GC_IRIX_THREADS
|
---|
45 | #endif
|
---|
46 | #if defined(HPUX_THREADS)
|
---|
47 | # define GC_HPUX_THREADS
|
---|
48 | #endif
|
---|
49 | #if defined(OSF1_THREADS)
|
---|
50 | # define GC_OSF1_THREADS
|
---|
51 | #endif
|
---|
52 | #if defined(LINUX_THREADS)
|
---|
53 | # define GC_LINUX_THREADS
|
---|
54 | #endif
|
---|
55 | #if defined(WIN32_THREADS)
|
---|
56 | # define GC_WIN32_THREADS
|
---|
57 | #endif
|
---|
58 | #if defined(USE_LD_WRAP)
|
---|
59 | # define GC_USE_LD_WRAP
|
---|
60 | #endif
|
---|
61 |
|
---|
62 | #if !defined(_REENTRANT) && (defined(GC_SOLARIS_THREADS) \
|
---|
63 | || defined(GC_SOLARIS_PTHREADS) \
|
---|
64 | || defined(GC_HPUX_THREADS) \
|
---|
65 | || defined(GC_LINUX_THREADS))
|
---|
66 | # define _REENTRANT
|
---|
67 | /* Better late than never. This fails if system headers that */
|
---|
68 | /* depend on this were previously included. */
|
---|
69 | #endif
|
---|
70 |
|
---|
71 | #if defined(GC_SOLARIS_PTHREADS) && !defined(GC_SOLARIS_THREADS)
|
---|
72 | # define GC_SOLARIS_THREADS
|
---|
73 | #endif
|
---|
74 |
|
---|
75 | # if defined(GC_SOLARIS_PTHREADS) || defined(GC_FREEBSD_THREADS) || \
|
---|
76 | defined(GC_IRIX_THREADS) || defined(GC_LINUX_THREADS) || \
|
---|
77 | defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
|
---|
78 | # define GC_PTHREADS
|
---|
79 | # endif
|
---|
80 |
|
---|
81 | # define __GC
|
---|
82 | # include <stddef.h>
|
---|
83 | # ifdef _WIN32_WCE
|
---|
84 | /* Yet more kluges for WinCE */
|
---|
85 | # include <stdlib.h> /* size_t is defined here */
|
---|
86 | typedef long ptrdiff_t; /* ptrdiff_t is not defined */
|
---|
87 | # endif
|
---|
88 |
|
---|
89 | #if defined(__MINGW32__) &&defined(_DLL) && !defined(GC_NOT_DLL)
|
---|
90 | # ifdef GC_BUILD
|
---|
91 | # define GC_API __declspec(dllexport)
|
---|
92 | # else
|
---|
93 | # define GC_API __declspec(dllimport)
|
---|
94 | # endif
|
---|
95 | #endif
|
---|
96 |
|
---|
97 | #if (defined(__DMC__) || defined(_MSC_VER)) \
|
---|
98 | && (defined(_DLL) && !defined(GC_NOT_DLL) \
|
---|
99 | || defined(GC_DLL))
|
---|
100 | # ifdef GC_BUILD
|
---|
101 | # define GC_API extern __declspec(dllexport)
|
---|
102 | # else
|
---|
103 | # define GC_API __declspec(dllimport)
|
---|
104 | # endif
|
---|
105 | #endif
|
---|
106 |
|
---|
107 | #if defined(__WATCOMC__) && defined(GC_DLL)
|
---|
108 | # ifdef GC_BUILD
|
---|
109 | # define GC_API extern __declspec(dllexport)
|
---|
110 | # else
|
---|
111 | # define GC_API extern __declspec(dllimport)
|
---|
112 | # endif
|
---|
113 | #endif
|
---|
114 |
|
---|
115 | #ifndef GC_API
|
---|
116 | #define GC_API extern
|
---|
117 | #endif
|
---|
118 |
|
---|
119 | # if defined(__STDC__) || defined(__cplusplus)
|
---|
120 | # define GC_PROTO(args) args
|
---|
121 | typedef void * GC_PTR;
|
---|
122 | # define GC_CONST const
|
---|
123 | # else
|
---|
124 | # define GC_PROTO(args) ()
|
---|
125 | typedef char * GC_PTR;
|
---|
126 | # define GC_CONST
|
---|
127 | # endif
|
---|
128 |
|
---|
129 | # ifdef __cplusplus
|
---|
130 | extern "C" {
|
---|
131 | # endif
|
---|
132 |
|
---|
133 |
|
---|
134 | /* Define word and signed_word to be unsigned and signed types of the */
|
---|
135 | /* size as char * or void *. There seems to be no way to do this */
|
---|
136 | /* even semi-portably. The following is probably no better/worse */
|
---|
137 | /* than almost anything else. */
|
---|
138 | /* The ANSI standard suggests that size_t and ptr_diff_t might be */
|
---|
139 | /* better choices. But those appear to have incorrect definitions */
|
---|
140 | /* on may systems. Notably "typedef int size_t" seems to be both */
|
---|
141 | /* frequent and WRONG. */
|
---|
142 | typedef unsigned long GC_word;
|
---|
143 | typedef long GC_signed_word;
|
---|
144 |
|
---|
145 | /* Public read-only variables */
|
---|
146 |
|
---|
147 | GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
|
---|
148 | /* Includes empty GCs at startup. */
|
---|
149 |
|
---|
150 | GC_API int GC_parallel; /* GC is parallelized for performance on */
|
---|
151 | /* multiprocessors. Currently set only */
|
---|
152 | /* implicitly if collector is built with */
|
---|
153 | /* -DPARALLEL_MARK and if either: */
|
---|
154 | /* Env variable GC_NPROC is set to > 1, or */
|
---|
155 | /* GC_NPROC is not set and this is an MP. */
|
---|
156 | /* If GC_parallel is set, incremental */
|
---|
157 | /* collection is aonly partially functional, */
|
---|
158 | /* and may not be desirable. */
|
---|
159 |
|
---|
160 |
|
---|
161 | /* Public R/W variables */
|
---|
162 |
|
---|
163 | GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
|
---|
164 | /* When there is insufficient memory to satisfy */
|
---|
165 | /* an allocation request, we return */
|
---|
166 | /* (*GC_oom_fn)(). By default this just */
|
---|
167 | /* returns 0. */
|
---|
168 | /* If it returns, it must return 0 or a valid */
|
---|
169 | /* pointer to a previously allocated heap */
|
---|
170 | /* object. */
|
---|
171 |
|
---|
172 | GC_API int GC_find_leak;
|
---|
173 | /* Do not actually garbage collect, but simply */
|
---|
174 | /* report inaccessible memory that was not */
|
---|
175 | /* deallocated with GC_free. Initial value */
|
---|
176 | /* is determined by FIND_LEAK macro. */
|
---|
177 |
|
---|
178 | GC_API int GC_all_interior_pointers;
|
---|
179 | /* Arrange for pointers to object interiors to */
|
---|
180 | /* be recognized as valid. May not be changed */
|
---|
181 | /* after GC initialization. */
|
---|
182 | /* Initial value is determined by */
|
---|
183 | /* -DALL_INTERIOR_POINTERS. */
|
---|
184 | /* Unless DONT_ADD_BYTE_AT_END is defined, this */
|
---|
185 | /* also affects whether sizes are increased by */
|
---|
186 | /* at least a byte to allow "off the end" */
|
---|
187 | /* pointer recognition. */
|
---|
188 | /* MUST BE 0 or 1. */
|
---|
189 |
|
---|
190 | GC_API int GC_quiet; /* Disable statistics output. Only matters if */
|
---|
191 | /* collector has been compiled with statistics */
|
---|
192 | /* enabled. This involves a performance cost, */
|
---|
193 | /* and is thus not the default. */
|
---|
194 |
|
---|
195 | GC_API int GC_finalize_on_demand;
|
---|
196 | /* If nonzero, finalizers will only be run in */
|
---|
197 | /* response to an explicit GC_invoke_finalizers */
|
---|
198 | /* call. The default is determined by whether */
|
---|
199 | /* the FINALIZE_ON_DEMAND macro is defined */
|
---|
200 | /* when the collector is built. */
|
---|
201 |
|
---|
202 | GC_API int GC_java_finalization;
|
---|
203 | /* Mark objects reachable from finalizable */
|
---|
204 | /* objects in a separate postpass. This makes */
|
---|
205 | /* it a bit safer to use non-topologically- */
|
---|
206 | /* ordered finalization. Default value is */
|
---|
207 | /* determined by JAVA_FINALIZATION macro. */
|
---|
208 |
|
---|
209 | GC_API void (* GC_finalizer_notifier)();
|
---|
210 | /* Invoked by the collector when there are */
|
---|
211 | /* objects to be finalized. Invoked at most */
|
---|
212 | /* once per GC cycle. Never invoked unless */
|
---|
213 | /* GC_finalize_on_demand is set. */
|
---|
214 | /* Typically this will notify a finalization */
|
---|
215 | /* thread, which will call GC_invoke_finalizers */
|
---|
216 | /* in response. */
|
---|
217 |
|
---|
218 | GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
|
---|
219 | /* because it's not safe. */
|
---|
220 |
|
---|
221 | GC_API int GC_dont_expand;
|
---|
222 | /* Dont expand heap unless explicitly requested */
|
---|
223 | /* or forced to. */
|
---|
224 |
|
---|
225 | GC_API int GC_use_entire_heap;
|
---|
226 | /* Causes the nonincremental collector to use the */
|
---|
227 | /* entire heap before collecting. This was the only */
|
---|
228 | /* option for GC versions < 5.0. This sometimes */
|
---|
229 | /* results in more large block fragmentation, since */
|
---|
230 | /* very larg blocks will tend to get broken up */
|
---|
231 | /* during each GC cycle. It is likely to result in a */
|
---|
232 | /* larger working set, but lower collection */
|
---|
233 | /* frequencies, and hence fewer instructions executed */
|
---|
234 | /* in the collector. */
|
---|
235 |
|
---|
236 | GC_API int GC_full_freq; /* Number of partial collections between */
|
---|
237 | /* full collections. Matters only if */
|
---|
238 | /* GC_incremental is set. */
|
---|
239 | /* Full collections are also triggered if */
|
---|
240 | /* the collector detects a substantial */
|
---|
241 | /* increase in the number of in-use heap */
|
---|
242 | /* blocks. Values in the tens are now */
|
---|
243 | /* perfectly reasonable, unlike for */
|
---|
244 | /* earlier GC versions. */
|
---|
245 |
|
---|
246 | GC_API GC_word GC_non_gc_bytes;
|
---|
247 | /* Bytes not considered candidates for collection. */
|
---|
248 | /* Used only to control scheduling of collections. */
|
---|
249 | /* Updated by GC_malloc_uncollectable and GC_free. */
|
---|
250 | /* Wizards only. */
|
---|
251 |
|
---|
252 | GC_API int GC_no_dls;
|
---|
253 | /* Don't register dynamic library data segments. */
|
---|
254 | /* Wizards only. Should be used only if the */
|
---|
255 | /* application explicitly registers all roots. */
|
---|
256 | /* In Microsoft Windows environments, this will */
|
---|
257 | /* usually also prevent registration of the */
|
---|
258 | /* main data segment as part of the root set. */
|
---|
259 |
|
---|
260 | GC_API GC_word GC_free_space_divisor;
|
---|
261 | /* We try to make sure that we allocate at */
|
---|
262 | /* least N/GC_free_space_divisor bytes between */
|
---|
263 | /* collections, where N is the heap size plus */
|
---|
264 | /* a rough estimate of the root set size. */
|
---|
265 | /* Initially, GC_free_space_divisor = 4. */
|
---|
266 | /* Increasing its value will use less space */
|
---|
267 | /* but more collection time. Decreasing it */
|
---|
268 | /* will appreciably decrease collection time */
|
---|
269 | /* at the expense of space. */
|
---|
270 | /* GC_free_space_divisor = 1 will effectively */
|
---|
271 | /* disable collections. */
|
---|
272 |
|
---|
273 | GC_API GC_word GC_max_retries;
|
---|
274 | /* The maximum number of GCs attempted before */
|
---|
275 | /* reporting out of memory after heap */
|
---|
276 | /* expansion fails. Initially 0. */
|
---|
277 |
|
---|
278 |
|
---|
279 | GC_API char *GC_stackbottom; /* Cool end of user stack. */
|
---|
280 | /* May be set in the client prior to */
|
---|
281 | /* calling any GC_ routines. This */
|
---|
282 | /* avoids some overhead, and */
|
---|
283 | /* potentially some signals that can */
|
---|
284 | /* confuse debuggers. Otherwise the */
|
---|
285 | /* collector attempts to set it */
|
---|
286 | /* automatically. */
|
---|
287 | /* For multithreaded code, this is the */
|
---|
288 | /* cold end of the stack for the */
|
---|
289 | /* primordial thread. */
|
---|
290 |
|
---|
291 | GC_API int GC_dont_precollect; /* Don't collect as part of */
|
---|
292 | /* initialization. Should be set only */
|
---|
293 | /* if the client wants a chance to */
|
---|
294 | /* manually initialize the root set */
|
---|
295 | /* before the first collection. */
|
---|
296 | /* Interferes with blacklisting. */
|
---|
297 | /* Wizards only. */
|
---|
298 |
|
---|
299 | /* Public procedures */
|
---|
300 |
|
---|
301 | /* Initialize the collector. This is only required when using thread-local
|
---|
302 | * allocation, since unlike the regular allocation routines, GC_local_malloc
|
---|
303 | * is not self-initializing. If you use GC_local_malloc you should arrange
|
---|
304 | * to call this somehow (e.g. from a constructor) before doing any allocation.
|
---|
305 | */
|
---|
306 | GC_API void GC_init GC_PROTO((void));
|
---|
307 |
|
---|
308 | GC_API unsigned long GC_time_limit;
|
---|
309 | /* If incremental collection is enabled, */
|
---|
310 | /* We try to terminate collections */
|
---|
311 | /* after this many milliseconds. Not a */
|
---|
312 | /* hard time bound. Setting this to */
|
---|
313 | /* GC_TIME_UNLIMITED will essentially */
|
---|
314 | /* disable incremental collection while */
|
---|
315 | /* leaving generational collection */
|
---|
316 | /* enabled. */
|
---|
317 | # define GC_TIME_UNLIMITED 999999
|
---|
318 | /* Setting GC_time_limit to this value */
|
---|
319 | /* will disable the "pause time exceeded */
|
---|
320 | /* tests. */
|
---|
321 |
|
---|
322 | /*
|
---|
323 | * general purpose allocation routines, with roughly malloc calling conv.
|
---|
324 | * The atomic versions promise that no relevant pointers are contained
|
---|
325 | * in the object. The nonatomic versions guarantee that the new object
|
---|
326 | * is cleared. GC_malloc_stubborn promises that no changes to the object
|
---|
327 | * will occur after GC_end_stubborn_change has been called on the
|
---|
328 | * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
|
---|
329 | * that is scanned for pointers to collectable objects, but is not itself
|
---|
330 | * collectable. The object is scanned even if it does not appear to
|
---|
331 | * be reachable. GC_malloc_uncollectable and GC_free called on the resulting
|
---|
332 | * object implicitly update GC_non_gc_bytes appropriately.
|
---|
333 | *
|
---|
334 | * Note that the GC_malloc_stubborn support is stubbed out by default
|
---|
335 | * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
|
---|
336 | * the collector is built with STUBBORN_ALLOC defined.
|
---|
337 | */
|
---|
338 | GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
|
---|
339 | GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
|
---|
340 | GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
|
---|
341 | GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
|
---|
342 |
|
---|
343 | /* The following is only defined if the library has been suitably */
|
---|
344 | /* compiled: */
|
---|
345 | GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
|
---|
346 |
|
---|
347 | /* Explicitly deallocate an object. Dangerous if used incorrectly. */
|
---|
348 | /* Requires a pointer to the base of an object. */
|
---|
349 | /* If the argument is stubborn, it should not be changeable when freed. */
|
---|
350 | /* An object should not be enable for finalization when it is */
|
---|
351 | /* explicitly deallocated. */
|
---|
352 | /* GC_free(0) is a no-op, as required by ANSI C for free. */
|
---|
353 | GC_API void GC_free GC_PROTO((GC_PTR object_addr));
|
---|
354 |
|
---|
355 | /*
|
---|
356 | * Stubborn objects may be changed only if the collector is explicitly informed.
|
---|
357 | * The collector is implicitly informed of coming change when such
|
---|
358 | * an object is first allocated. The following routines inform the
|
---|
359 | * collector that an object will no longer be changed, or that it will
|
---|
360 | * once again be changed. Only nonNIL pointer stores into the object
|
---|
361 | * are considered to be changes. The argument to GC_end_stubborn_change
|
---|
362 | * must be exacly the value returned by GC_malloc_stubborn or passed to
|
---|
363 | * GC_change_stubborn. (In the second case it may be an interior pointer
|
---|
364 | * within 512 bytes of the beginning of the objects.)
|
---|
365 | * There is a performance penalty for allowing more than
|
---|
366 | * one stubborn object to be changed at once, but it is acceptable to
|
---|
367 | * do so. The same applies to dropping stubborn objects that are still
|
---|
368 | * changeable.
|
---|
369 | */
|
---|
370 | GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
|
---|
371 | GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
|
---|
372 |
|
---|
373 | /* Return a pointer to the base (lowest address) of an object given */
|
---|
374 | /* a pointer to a location within the object. */
|
---|
375 | /* I.e. map an interior pointer to the corresponding bas pointer. */
|
---|
376 | /* Note that with debugging allocation, this returns a pointer to the */
|
---|
377 | /* actual base of the object, i.e. the debug information, not to */
|
---|
378 | /* the base of the user object. */
|
---|
379 | /* Return 0 if displaced_pointer doesn't point to within a valid */
|
---|
380 | /* object. */
|
---|
381 | GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
|
---|
382 |
|
---|
383 | /* Given a pointer to the base of an object, return its size in bytes. */
|
---|
384 | /* The returned size may be slightly larger than what was originally */
|
---|
385 | /* requested. */
|
---|
386 | GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
|
---|
387 |
|
---|
388 | /* For compatibility with C library. This is occasionally faster than */
|
---|
389 | /* a malloc followed by a bcopy. But if you rely on that, either here */
|
---|
390 | /* or with the standard C library, your code is broken. In my */
|
---|
391 | /* opinion, it shouldn't have been invented, but now we're stuck. -HB */
|
---|
392 | /* The resulting object has the same kind as the original. */
|
---|
393 | /* If the argument is stubborn, the result will have changes enabled. */
|
---|
394 | /* It is an error to have changes enabled for the original object. */
|
---|
395 | /* Follows ANSI comventions for NULL old_object. */
|
---|
396 | GC_API GC_PTR GC_realloc
|
---|
397 | GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
|
---|
398 |
|
---|
399 | /* Explicitly increase the heap size. */
|
---|
400 | /* Returns 0 on failure, 1 on success. */
|
---|
401 | GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
|
---|
402 |
|
---|
403 | /* Limit the heap size to n bytes. Useful when you're debugging, */
|
---|
404 | /* especially on systems that don't handle running out of memory well. */
|
---|
405 | /* n == 0 ==> unbounded. This is the default. */
|
---|
406 | GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
|
---|
407 |
|
---|
408 | /* Inform the collector that a certain section of statically allocated */
|
---|
409 | /* memory contains no pointers to garbage collected memory. Thus it */
|
---|
410 | /* need not be scanned. This is sometimes important if the application */
|
---|
411 | /* maps large read/write files into the address space, which could be */
|
---|
412 | /* mistaken for dynamic library data segments on some systems. */
|
---|
413 | GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
|
---|
414 |
|
---|
415 | /* Clear the set of root segments. Wizards only. */
|
---|
416 | GC_API void GC_clear_roots GC_PROTO((void));
|
---|
417 |
|
---|
418 | /* Add a root segment. Wizards only. */
|
---|
419 | GC_API void GC_add_roots GC_PROTO((char * low_address,
|
---|
420 | char * high_address_plus_1));
|
---|
421 |
|
---|
422 | /* Add a displacement to the set of those considered valid by the */
|
---|
423 | /* collector. GC_register_displacement(n) means that if p was returned */
|
---|
424 | /* by GC_malloc, then (char *)p + n will be considered to be a valid */
|
---|
425 | /* pointer to n. N must be small and less than the size of p. */
|
---|
426 | /* (All pointers to the interior of objects from the stack are */
|
---|
427 | /* considered valid in any case. This applies to heap objects and */
|
---|
428 | /* static data.) */
|
---|
429 | /* Preferably, this should be called before any other GC procedures. */
|
---|
430 | /* Calling it later adds to the probability of excess memory */
|
---|
431 | /* retention. */
|
---|
432 | /* This is a no-op if the collector was compiled with recognition of */
|
---|
433 | /* arbitrary interior pointers enabled, which is now the default. */
|
---|
434 | GC_API void GC_register_displacement GC_PROTO((GC_word n));
|
---|
435 |
|
---|
436 | /* The following version should be used if any debugging allocation is */
|
---|
437 | /* being done. */
|
---|
438 | GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
|
---|
439 |
|
---|
440 | /* Explicitly trigger a full, world-stop collection. */
|
---|
441 | GC_API void GC_gcollect GC_PROTO((void));
|
---|
442 |
|
---|
443 | /* Trigger a full world-stopped collection. Abort the collection if */
|
---|
444 | /* and when stop_func returns a nonzero value. Stop_func will be */
|
---|
445 | /* called frequently, and should be reasonably fast. This works even */
|
---|
446 | /* if virtual dirty bits, and hence incremental collection is not */
|
---|
447 | /* available for this architecture. Collections can be aborted faster */
|
---|
448 | /* than normal pause times for incremental collection. However, */
|
---|
449 | /* aborted collections do no useful work; the next collection needs */
|
---|
450 | /* to start from the beginning. */
|
---|
451 | /* Return 0 if the collection was aborted, 1 if it succeeded. */
|
---|
452 | typedef int (* GC_stop_func) GC_PROTO((void));
|
---|
453 | GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
|
---|
454 |
|
---|
455 | /* Return the number of bytes in the heap. Excludes collector private */
|
---|
456 | /* data structures. Includes empty blocks and fragmentation loss. */
|
---|
457 | /* Includes some pages that were allocated but never written. */
|
---|
458 | GC_API size_t GC_get_heap_size GC_PROTO((void));
|
---|
459 |
|
---|
460 | /* Return a lower bound on the number of free bytes in the heap. */
|
---|
461 | GC_API size_t GC_get_free_bytes GC_PROTO((void));
|
---|
462 |
|
---|
463 | /* Return the number of bytes allocated since the last collection. */
|
---|
464 | GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
|
---|
465 |
|
---|
466 | /* Return the total number of bytes allocated in this process. */
|
---|
467 | /* Never decreases. */
|
---|
468 | GC_API size_t GC_get_total_bytes GC_PROTO((void));
|
---|
469 |
|
---|
470 | /* Enable incremental/generational collection. */
|
---|
471 | /* Not advisable unless dirty bits are */
|
---|
472 | /* available or most heap objects are */
|
---|
473 | /* pointerfree(atomic) or immutable. */
|
---|
474 | /* Don't use in leak finding mode. */
|
---|
475 | /* Ignored if GC_dont_gc is true. */
|
---|
476 | /* Only the generational piece of this is */
|
---|
477 | /* functional if GC_parallel is TRUE. */
|
---|
478 | GC_API void GC_enable_incremental GC_PROTO((void));
|
---|
479 |
|
---|
480 | /* Does incremental mode write-protect pages? Returns zero or */
|
---|
481 | /* more of the following, or'ed together: */
|
---|
482 | #define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */
|
---|
483 | #define GC_PROTECTS_PTRFREE_HEAP 2
|
---|
484 | #define GC_PROTECTS_STATIC_DATA 4 /* Curently never. */
|
---|
485 | #define GC_PROTECTS_STACK 8 /* Probably impractical. */
|
---|
486 |
|
---|
487 | #define GC_PROTECTS_NONE 0
|
---|
488 | GC_API int GC_incremental_protection_needs GC_PROTO((void));
|
---|
489 |
|
---|
490 | /* Perform some garbage collection work, if appropriate. */
|
---|
491 | /* Return 0 if there is no more work to be done. */
|
---|
492 | /* Typically performs an amount of work corresponding roughly */
|
---|
493 | /* to marking from one page. May do more work if further */
|
---|
494 | /* progress requires it, e.g. if incremental collection is */
|
---|
495 | /* disabled. It is reasonable to call this in a wait loop */
|
---|
496 | /* until it returns 0. */
|
---|
497 | GC_API int GC_collect_a_little GC_PROTO((void));
|
---|
498 |
|
---|
499 | /* Allocate an object of size lb bytes. The client guarantees that */
|
---|
500 | /* as long as the object is live, it will be referenced by a pointer */
|
---|
501 | /* that points to somewhere within the first 256 bytes of the object. */
|
---|
502 | /* (This should normally be declared volatile to prevent the compiler */
|
---|
503 | /* from invalidating this assertion.) This routine is only useful */
|
---|
504 | /* if a large array is being allocated. It reduces the chance of */
|
---|
505 | /* accidentally retaining such an array as a result of scanning an */
|
---|
506 | /* integer that happens to be an address inside the array. (Actually, */
|
---|
507 | /* it reduces the chance of the allocator not finding space for such */
|
---|
508 | /* an array, since it will try hard to avoid introducing such a false */
|
---|
509 | /* reference.) On a SunOS 4.X or MS Windows system this is recommended */
|
---|
510 | /* for arrays likely to be larger than 100K or so. For other systems, */
|
---|
511 | /* or if the collector is not configured to recognize all interior */
|
---|
512 | /* pointers, the threshold is normally much higher. */
|
---|
513 | GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
|
---|
514 | GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
|
---|
515 |
|
---|
516 | #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
|
---|
517 | # define GC_ADD_CALLER
|
---|
518 | # define GC_RETURN_ADDR (GC_word)__return_address
|
---|
519 | #endif
|
---|
520 |
|
---|
521 | #ifdef GC_ADD_CALLER
|
---|
522 | # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
|
---|
523 | # define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i
|
---|
524 | #else
|
---|
525 | # define GC_EXTRAS __FILE__, __LINE__
|
---|
526 | # define GC_EXTRA_PARAMS GC_CONST char * s, int i
|
---|
527 | #endif
|
---|
528 |
|
---|
529 | /* Debugging (annotated) allocation. GC_gcollect will check */
|
---|
530 | /* objects allocated in this way for overwrites, etc. */
|
---|
531 | GC_API GC_PTR GC_debug_malloc
|
---|
532 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
---|
533 | GC_API GC_PTR GC_debug_malloc_atomic
|
---|
534 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
---|
535 | GC_API GC_PTR GC_debug_malloc_uncollectable
|
---|
536 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
---|
537 | GC_API GC_PTR GC_debug_malloc_stubborn
|
---|
538 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
---|
539 | GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
|
---|
540 | GC_API GC_PTR GC_debug_realloc
|
---|
541 | GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
|
---|
542 | GC_EXTRA_PARAMS));
|
---|
543 |
|
---|
544 | GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
|
---|
545 | GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
|
---|
546 | # ifdef GC_DEBUG
|
---|
547 | # define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
|
---|
548 | # define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
|
---|
549 | # define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
|
---|
550 | GC_EXTRAS)
|
---|
551 | # define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
|
---|
552 | # define GC_FREE(p) GC_debug_free(p)
|
---|
553 | # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
|
---|
554 | GC_debug_register_finalizer(p, f, d, of, od)
|
---|
555 | # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
|
---|
556 | GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
|
---|
557 | # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
|
---|
558 | GC_debug_register_finalizer_no_order(p, f, d, of, od)
|
---|
559 | # define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
|
---|
560 | # define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
|
---|
561 | # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
|
---|
562 | # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
|
---|
563 | GC_general_register_disappearing_link(link, GC_base(obj))
|
---|
564 | # define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
|
---|
565 | # else
|
---|
566 | # define GC_MALLOC(sz) GC_malloc(sz)
|
---|
567 | # define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
|
---|
568 | # define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
|
---|
569 | # define GC_REALLOC(old, sz) GC_realloc(old, sz)
|
---|
570 | # define GC_FREE(p) GC_free(p)
|
---|
571 | # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
|
---|
572 | GC_register_finalizer(p, f, d, of, od)
|
---|
573 | # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
|
---|
574 | GC_register_finalizer_ignore_self(p, f, d, of, od)
|
---|
575 | # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
|
---|
576 | GC_register_finalizer_no_order(p, f, d, of, od)
|
---|
577 | # define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
|
---|
578 | # define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
|
---|
579 | # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
|
---|
580 | # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
|
---|
581 | GC_general_register_disappearing_link(link, obj)
|
---|
582 | # define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
|
---|
583 | # endif
|
---|
584 | /* The following are included because they are often convenient, and */
|
---|
585 | /* reduce the chance for a misspecifed size argument. But calls may */
|
---|
586 | /* expand to something syntactically incorrect if t is a complicated */
|
---|
587 | /* type expression. */
|
---|
588 | # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
|
---|
589 | # define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
|
---|
590 | # define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
|
---|
591 | # define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
|
---|
592 |
|
---|
593 | /* Finalization. Some of these primitives are grossly unsafe. */
|
---|
594 | /* The idea is to make them both cheap, and sufficient to build */
|
---|
595 | /* a safer layer, closer to PCedar finalization. */
|
---|
596 | /* The interface represents my conclusions from a long discussion */
|
---|
597 | /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
|
---|
598 | /* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
|
---|
599 | /* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
|
---|
600 | typedef void (*GC_finalization_proc)
|
---|
601 | GC_PROTO((GC_PTR obj, GC_PTR client_data));
|
---|
602 |
|
---|
603 | GC_API void GC_register_finalizer
|
---|
604 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
605 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
606 | GC_API void GC_debug_register_finalizer
|
---|
607 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
608 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
609 | /* When obj is no longer accessible, invoke */
|
---|
610 | /* (*fn)(obj, cd). If a and b are inaccessible, and */
|
---|
611 | /* a points to b (after disappearing links have been */
|
---|
612 | /* made to disappear), then only a will be */
|
---|
613 | /* finalized. (If this does not create any new */
|
---|
614 | /* pointers to b, then b will be finalized after the */
|
---|
615 | /* next collection.) Any finalizable object that */
|
---|
616 | /* is reachable from itself by following one or more */
|
---|
617 | /* pointers will not be finalized (or collected). */
|
---|
618 | /* Thus cycles involving finalizable objects should */
|
---|
619 | /* be avoided, or broken by disappearing links. */
|
---|
620 | /* All but the last finalizer registered for an object */
|
---|
621 | /* is ignored. */
|
---|
622 | /* Finalization may be removed by passing 0 as fn. */
|
---|
623 | /* Finalizers are implicitly unregistered just before */
|
---|
624 | /* they are invoked. */
|
---|
625 | /* The old finalizer and client data are stored in */
|
---|
626 | /* *ofn and *ocd. */
|
---|
627 | /* Fn is never invoked on an accessible object, */
|
---|
628 | /* provided hidden pointers are converted to real */
|
---|
629 | /* pointers only if the allocation lock is held, and */
|
---|
630 | /* such conversions are not performed by finalization */
|
---|
631 | /* routines. */
|
---|
632 | /* If GC_register_finalizer is aborted as a result of */
|
---|
633 | /* a signal, the object may be left with no */
|
---|
634 | /* finalization, even if neither the old nor new */
|
---|
635 | /* finalizer were NULL. */
|
---|
636 | /* Obj should be the nonNULL starting address of an */
|
---|
637 | /* object allocated by GC_malloc or friends. */
|
---|
638 | /* Note that any garbage collectable object referenced */
|
---|
639 | /* by cd will be considered accessible until the */
|
---|
640 | /* finalizer is invoked. */
|
---|
641 |
|
---|
642 | /* Another versions of the above follow. It ignores */
|
---|
643 | /* self-cycles, i.e. pointers from a finalizable object to */
|
---|
644 | /* itself. There is a stylistic argument that this is wrong, */
|
---|
645 | /* but it's unavoidable for C++, since the compiler may */
|
---|
646 | /* silently introduce these. It's also benign in that specific */
|
---|
647 | /* case. */
|
---|
648 | /* Note that cd will still be viewed as accessible, even if it */
|
---|
649 | /* refers to the object itself. */
|
---|
650 | GC_API void GC_register_finalizer_ignore_self
|
---|
651 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
652 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
653 | GC_API void GC_debug_register_finalizer_ignore_self
|
---|
654 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
655 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
656 |
|
---|
657 | /* Another version of the above. It ignores all cycles. */
|
---|
658 | /* It should probably only be used by Java implementations. */
|
---|
659 | /* Note that cd will still be viewed as accessible, even if it */
|
---|
660 | /* refers to the object itself. */
|
---|
661 | GC_API void GC_register_finalizer_no_order
|
---|
662 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
663 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
664 | GC_API void GC_debug_register_finalizer_no_order
|
---|
665 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
---|
666 | GC_finalization_proc *ofn, GC_PTR *ocd));
|
---|
667 |
|
---|
668 |
|
---|
669 | /* The following routine may be used to break cycles between */
|
---|
670 | /* finalizable objects, thus causing cyclic finalizable */
|
---|
671 | /* objects to be finalized in the correct order. Standard */
|
---|
672 | /* use involves calling GC_register_disappearing_link(&p), */
|
---|
673 | /* where p is a pointer that is not followed by finalization */
|
---|
674 | /* code, and should not be considered in determining */
|
---|
675 | /* finalization order. */
|
---|
676 | GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
|
---|
677 | /* Link should point to a field of a heap allocated */
|
---|
678 | /* object obj. *link will be cleared when obj is */
|
---|
679 | /* found to be inaccessible. This happens BEFORE any */
|
---|
680 | /* finalization code is invoked, and BEFORE any */
|
---|
681 | /* decisions about finalization order are made. */
|
---|
682 | /* This is useful in telling the finalizer that */
|
---|
683 | /* some pointers are not essential for proper */
|
---|
684 | /* finalization. This may avoid finalization cycles. */
|
---|
685 | /* Note that obj may be resurrected by another */
|
---|
686 | /* finalizer, and thus the clearing of *link may */
|
---|
687 | /* be visible to non-finalization code. */
|
---|
688 | /* There's an argument that an arbitrary action should */
|
---|
689 | /* be allowed here, instead of just clearing a pointer. */
|
---|
690 | /* But this causes problems if that action alters, or */
|
---|
691 | /* examines connectivity. */
|
---|
692 | /* Returns 1 if link was already registered, 0 */
|
---|
693 | /* otherwise. */
|
---|
694 | /* Only exists for backward compatibility. See below: */
|
---|
695 |
|
---|
696 | GC_API int GC_general_register_disappearing_link
|
---|
697 | GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
|
---|
698 | /* A slight generalization of the above. *link is */
|
---|
699 | /* cleared when obj first becomes inaccessible. This */
|
---|
700 | /* can be used to implement weak pointers easily and */
|
---|
701 | /* safely. Typically link will point to a location */
|
---|
702 | /* holding a disguised pointer to obj. (A pointer */
|
---|
703 | /* inside an "atomic" object is effectively */
|
---|
704 | /* disguised.) In this way soft */
|
---|
705 | /* pointers are broken before any object */
|
---|
706 | /* reachable from them are finalized. Each link */
|
---|
707 | /* May be registered only once, i.e. with one obj */
|
---|
708 | /* value. This was added after a long email discussion */
|
---|
709 | /* with John Ellis. */
|
---|
710 | /* Obj must be a pointer to the first word of an object */
|
---|
711 | /* we allocated. It is unsafe to explicitly deallocate */
|
---|
712 | /* the object containing link. Explicitly deallocating */
|
---|
713 | /* obj may or may not cause link to eventually be */
|
---|
714 | /* cleared. */
|
---|
715 | GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
|
---|
716 | /* Returns 0 if link was not actually registered. */
|
---|
717 | /* Undoes a registration by either of the above two */
|
---|
718 | /* routines. */
|
---|
719 |
|
---|
720 | /* Auxiliary fns to make finalization work correctly with displaced */
|
---|
721 | /* pointers introduced by the debugging allocators. */
|
---|
722 | GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
|
---|
723 | GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
|
---|
724 |
|
---|
725 | /* Returns !=0 if GC_invoke_finalizers has something to do. */
|
---|
726 | GC_API int GC_should_invoke_finalizers GC_PROTO((void));
|
---|
727 |
|
---|
728 | GC_API int GC_invoke_finalizers GC_PROTO((void));
|
---|
729 | /* Run finalizers for all objects that are ready to */
|
---|
730 | /* be finalized. Return the number of finalizers */
|
---|
731 | /* that were run. Normally this is also called */
|
---|
732 | /* implicitly during some allocations. If */
|
---|
733 | /* GC-finalize_on_demand is nonzero, it must be called */
|
---|
734 | /* explicitly. */
|
---|
735 |
|
---|
736 | /* GC_set_warn_proc can be used to redirect or filter warning messages. */
|
---|
737 | /* p may not be a NULL pointer. */
|
---|
738 | typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
|
---|
739 | GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
|
---|
740 | /* Returns old warning procedure. */
|
---|
741 |
|
---|
742 | /* The following is intended to be used by a higher level */
|
---|
743 | /* (e.g. Java-like) finalization facility. It is expected */
|
---|
744 | /* that finalization code will arrange for hidden pointers to */
|
---|
745 | /* disappear. Otherwise objects can be accessed after they */
|
---|
746 | /* have been collected. */
|
---|
747 | /* Note that putting pointers in atomic objects or in */
|
---|
748 | /* nonpointer slots of "typed" objects is equivalent to */
|
---|
749 | /* disguising them in this way, and may have other advantages. */
|
---|
750 | # if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
|
---|
751 | typedef GC_word GC_hidden_pointer;
|
---|
752 | # define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
|
---|
753 | # define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
|
---|
754 | /* Converting a hidden pointer to a real pointer requires verifying */
|
---|
755 | /* that the object still exists. This involves acquiring the */
|
---|
756 | /* allocator lock to avoid a race with the collector. */
|
---|
757 | # endif /* I_HIDE_POINTERS */
|
---|
758 |
|
---|
759 | typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
|
---|
760 | GC_API GC_PTR GC_call_with_alloc_lock
|
---|
761 | GC_PROTO((GC_fn_type fn, GC_PTR client_data));
|
---|
762 |
|
---|
763 | /* The following routines are primarily intended for use with a */
|
---|
764 | /* preprocessor which inserts calls to check C pointer arithmetic. */
|
---|
765 |
|
---|
766 | /* Check that p and q point to the same object. */
|
---|
767 | /* Fail conspicuously if they don't. */
|
---|
768 | /* Returns the first argument. */
|
---|
769 | /* Succeeds if neither p nor q points to the heap. */
|
---|
770 | /* May succeed if both p and q point to between heap objects. */
|
---|
771 | GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
|
---|
772 |
|
---|
773 | /* Checked pointer pre- and post- increment operations. Note that */
|
---|
774 | /* the second argument is in units of bytes, not multiples of the */
|
---|
775 | /* object size. This should either be invoked from a macro, or the */
|
---|
776 | /* call should be automatically generated. */
|
---|
777 | GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
|
---|
778 | GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
|
---|
779 |
|
---|
780 | /* Check that p is visible */
|
---|
781 | /* to the collector as a possibly pointer containing location. */
|
---|
782 | /* If it isn't fail conspicuously. */
|
---|
783 | /* Returns the argument in all cases. May erroneously succeed */
|
---|
784 | /* in hard cases. (This is intended for debugging use with */
|
---|
785 | /* untyped allocations. The idea is that it should be possible, though */
|
---|
786 | /* slow, to add such a call to all indirect pointer stores.) */
|
---|
787 | /* Currently useless for multithreaded worlds. */
|
---|
788 | GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
|
---|
789 |
|
---|
790 | /* Check that if p is a pointer to a heap page, then it points to */
|
---|
791 | /* a valid displacement within a heap object. */
|
---|
792 | /* Fail conspicuously if this property does not hold. */
|
---|
793 | /* Uninteresting with GC_all_interior_pointers. */
|
---|
794 | /* Always returns its argument. */
|
---|
795 | GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
|
---|
796 |
|
---|
797 | /* Safer, but slow, pointer addition. Probably useful mainly with */
|
---|
798 | /* a preprocessor. Useful only for heap pointers. */
|
---|
799 | #ifdef GC_DEBUG
|
---|
800 | # define GC_PTR_ADD3(x, n, type_of_result) \
|
---|
801 | ((type_of_result)GC_same_obj((x)+(n), (x)))
|
---|
802 | # define GC_PRE_INCR3(x, n, type_of_result) \
|
---|
803 | ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
|
---|
804 | # define GC_POST_INCR2(x, type_of_result) \
|
---|
805 | ((type_of_result)GC_post_incr(&(x), sizeof(*x))
|
---|
806 | # ifdef __GNUC__
|
---|
807 | # define GC_PTR_ADD(x, n) \
|
---|
808 | GC_PTR_ADD3(x, n, typeof(x))
|
---|
809 | # define GC_PRE_INCR(x, n) \
|
---|
810 | GC_PRE_INCR3(x, n, typeof(x))
|
---|
811 | # define GC_POST_INCR(x, n) \
|
---|
812 | GC_POST_INCR3(x, typeof(x))
|
---|
813 | # else
|
---|
814 | /* We can't do this right without typeof, which ANSI */
|
---|
815 | /* decided was not sufficiently useful. Repeatedly */
|
---|
816 | /* mentioning the arguments seems too dangerous to be */
|
---|
817 | /* useful. So does not casting the result. */
|
---|
818 | # define GC_PTR_ADD(x, n) ((x)+(n))
|
---|
819 | # endif
|
---|
820 | #else /* !GC_DEBUG */
|
---|
821 | # define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
|
---|
822 | # define GC_PTR_ADD(x, n) ((x)+(n))
|
---|
823 | # define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
|
---|
824 | # define GC_PRE_INCR(x, n) ((x) += (n))
|
---|
825 | # define GC_POST_INCR2(x, n, type_of_result) ((x)++)
|
---|
826 | # define GC_POST_INCR(x, n) ((x)++)
|
---|
827 | #endif
|
---|
828 |
|
---|
829 | /* Safer assignment of a pointer to a nonstack location. */
|
---|
830 | #ifdef GC_DEBUG
|
---|
831 | # ifdef __STDC__
|
---|
832 | # define GC_PTR_STORE(p, q) \
|
---|
833 | (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
|
---|
834 | # else
|
---|
835 | # define GC_PTR_STORE(p, q) \
|
---|
836 | (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
|
---|
837 | # endif
|
---|
838 | #else /* !GC_DEBUG */
|
---|
839 | # define GC_PTR_STORE(p, q) *((p) = (q))
|
---|
840 | #endif
|
---|
841 |
|
---|
842 | /* Fynctions called to report pointer checking errors */
|
---|
843 | GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
|
---|
844 |
|
---|
845 | GC_API void (*GC_is_valid_displacement_print_proc)
|
---|
846 | GC_PROTO((GC_PTR p));
|
---|
847 |
|
---|
848 | GC_API void (*GC_is_visible_print_proc)
|
---|
849 | GC_PROTO((GC_PTR p));
|
---|
850 |
|
---|
851 |
|
---|
852 | /* For pthread support, we generally need to intercept a number of */
|
---|
853 | /* thread library calls. We do that here by macro defining them. */
|
---|
854 |
|
---|
855 | #if !defined(GC_USE_LD_WRAP) && \
|
---|
856 | (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS))
|
---|
857 | # include "gc_pthread_redirects.h"
|
---|
858 | #endif
|
---|
859 |
|
---|
860 | # if defined(PCR) || defined(GC_SOLARIS_THREADS) || \
|
---|
861 | defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
|
---|
862 | /* Any flavor of threads except SRC_M3. */
|
---|
863 | /* This returns a list of objects, linked through their first */
|
---|
864 | /* word. Its use can greatly reduce lock contention problems, since */
|
---|
865 | /* the allocation lock can be acquired and released many fewer times. */
|
---|
866 | /* lb must be large enough to hold the pointer field. */
|
---|
867 | /* It is used internally by gc_local_alloc.h, which provides a simpler */
|
---|
868 | /* programming interface on Linux. */
|
---|
869 | GC_PTR GC_malloc_many(size_t lb);
|
---|
870 | #define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
|
---|
871 | /* in returned list. */
|
---|
872 | extern void GC_thr_init(); /* Needed for Solaris/X86 */
|
---|
873 |
|
---|
874 | #endif /* THREADS && !SRC_M3 */
|
---|
875 |
|
---|
876 | #if defined(GC_WIN32_THREADS)
|
---|
877 | # include <windows.h>
|
---|
878 |
|
---|
879 | /*
|
---|
880 | * All threads must be created using GC_CreateThread, so that they will be
|
---|
881 | * recorded in the thread table.
|
---|
882 | */
|
---|
883 | HANDLE WINAPI GC_CreateThread(
|
---|
884 | LPSECURITY_ATTRIBUTES lpThreadAttributes,
|
---|
885 | DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress,
|
---|
886 | LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId );
|
---|
887 |
|
---|
888 | # if defined(_WIN32_WCE)
|
---|
889 | /*
|
---|
890 | * win32_threads.c implements the real WinMain, which will start a new thread
|
---|
891 | * to call GC_WinMain after initializing the garbage collector.
|
---|
892 | */
|
---|
893 | int WINAPI GC_WinMain(
|
---|
894 | HINSTANCE hInstance,
|
---|
895 | HINSTANCE hPrevInstance,
|
---|
896 | LPWSTR lpCmdLine,
|
---|
897 | int nCmdShow );
|
---|
898 |
|
---|
899 | # ifndef GC_BUILD
|
---|
900 | # define WinMain GC_WinMain
|
---|
901 | # define CreateThread GC_CreateThread
|
---|
902 | # endif
|
---|
903 | # endif /* defined(_WIN32_WCE) */
|
---|
904 |
|
---|
905 | #endif /* defined(GC_WIN32_THREADS) */
|
---|
906 |
|
---|
907 | /*
|
---|
908 | * If you are planning on putting
|
---|
909 | * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
|
---|
910 | * from the statically loaded program section.
|
---|
911 | * This circumvents a Solaris 2.X (X<=4) linker bug.
|
---|
912 | */
|
---|
913 | #if defined(sparc) || defined(__sparc)
|
---|
914 | # define GC_INIT() { extern end, etext; \
|
---|
915 | GC_noop(&end, &etext); }
|
---|
916 | #else
|
---|
917 | # if (defined(__CYGWIN32__) && defined(GC_USE_DLL)) || defined (_AIX)
|
---|
918 | /*
|
---|
919 | * Similarly gnu-win32 DLLs need explicit initialization
|
---|
920 | */
|
---|
921 | # define GC_INIT() { GC_add_roots(DATASTART, DATAEND); }
|
---|
922 | # else
|
---|
923 | # define GC_INIT()
|
---|
924 | # endif
|
---|
925 | #endif
|
---|
926 |
|
---|
927 | #if !defined(_WIN32_WCE) \
|
---|
928 | && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
|
---|
929 | || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__))
|
---|
930 | /* win32S may not free all resources on process exit. */
|
---|
931 | /* This explicitly deallocates the heap. */
|
---|
932 | GC_API void GC_win32_free_heap ();
|
---|
933 | #endif
|
---|
934 |
|
---|
935 | #if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
|
---|
936 | /* Allocation really goes through GC_amiga_allocwrapper_do */
|
---|
937 | # include "gc_amiga_redirects.h"
|
---|
938 | #endif
|
---|
939 |
|
---|
940 | #if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H)
|
---|
941 | # include "gc_local_alloc.h"
|
---|
942 | #endif
|
---|
943 |
|
---|
944 | #ifdef __cplusplus
|
---|
945 | } /* end of extern "C" */
|
---|
946 | #endif
|
---|
947 |
|
---|
948 | #endif /* _GC_H */
|
---|