blob: 501cb14ed8a6febbe5ac4775ee8cf815482e55ef [file] [log] [blame]
Sean Andersonfba08822022-03-23 14:04:48 -04001/* SPDX-License-Identifier: bzip2-1.0.6 */
2/*
3 This file is part of MemCheck, a heavyweight Valgrind tool for
4 detecting memory errors.
5
6 Copyright (C) 2000-2017 Julian Seward. All rights reserved.
7 */
8
9#ifndef __MEMCHECK_H
10#define __MEMCHECK_H
11
12
13/* This file is for inclusion into client (your!) code.
14
15 You can use these macros to manipulate and query memory permissions
16 inside your own programs.
17
18 See comment near the top of valgrind.h on how to use them.
19*/
20
21#include "valgrind.h"
22
23/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
24 This enum comprises an ABI exported by Valgrind to programs
25 which use client requests. DO NOT CHANGE THE ORDER OF THESE
26 ENTRIES, NOR DELETE ANY -- add new ones at the end. */
27typedef
28 enum {
29 VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
30 VG_USERREQ__MAKE_MEM_UNDEFINED,
31 VG_USERREQ__MAKE_MEM_DEFINED,
32 VG_USERREQ__DISCARD,
33 VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
34 VG_USERREQ__CHECK_MEM_IS_DEFINED,
35 VG_USERREQ__DO_LEAK_CHECK,
36 VG_USERREQ__COUNT_LEAKS,
37
38 VG_USERREQ__GET_VBITS,
39 VG_USERREQ__SET_VBITS,
40
41 VG_USERREQ__CREATE_BLOCK,
42
43 VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
44
45 /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
46 VG_USERREQ__COUNT_LEAK_BLOCKS,
47
48 VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
49 VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
50
51 /* This is just for memcheck's internal use - don't use it */
52 _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
53 = VG_USERREQ_TOOL_BASE('M','C') + 256
54 } Vg_MemCheckClientRequest;
55
56
57
58/* Client-code macros to manipulate the state of memory. */
59
60/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
61#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
62 VALGRIND_DO_CLIENT_REQUEST_STMT( \
63 VG_USERREQ__MAKE_MEM_NOACCESS, \
64 (_qzz_addr), (_qzz_len), 0, 0, 0)
65
66/* Similarly, mark memory at _qzz_addr as addressable but undefined
67 for _qzz_len bytes. */
68#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
69 VALGRIND_DO_CLIENT_REQUEST_STMT( \
70 VG_USERREQ__MAKE_MEM_UNDEFINED, \
71 (_qzz_addr), (_qzz_len), 0, 0, 0)
72
73/* Similarly, mark memory at _qzz_addr as addressable and defined
74 for _qzz_len bytes. */
75#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
76 VALGRIND_DO_CLIENT_REQUEST_STMT( \
77 VG_USERREQ__MAKE_MEM_DEFINED, \
78 (_qzz_addr), (_qzz_len), 0, 0, 0)
79
80/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
81 not altered: bytes which are addressable are marked as defined,
82 but those which are not addressable are left unchanged. */
83#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
84 VALGRIND_DO_CLIENT_REQUEST_STMT( \
85 VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
86 (_qzz_addr), (_qzz_len), 0, 0, 0)
87
88/* Create a block-description handle. The description is an ascii
89 string which is included in any messages pertaining to addresses
90 within the specified memory range. Has no other effect on the
91 properties of the memory range. */
92#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
93 VALGRIND_DO_CLIENT_REQUEST_STMT( \
94 VG_USERREQ__CREATE_BLOCK, \
95 (_qzz_addr), (_qzz_len), (_qzz_desc), \
96 0, 0)
97
98/* Discard a block-description-handle. Returns 1 for an
99 invalid handle, 0 for a valid handle. */
100#define VALGRIND_DISCARD(_qzz_blkindex) \
101 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
102 VG_USERREQ__DISCARD, \
103 0, (_qzz_blkindex), 0, 0, 0)
104
105
106/* Client-code macros to check the state of memory. */
107
108/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
109 If suitable addressibility is not established, Valgrind prints an
110 error message and returns the address of the first offending byte.
111 Otherwise it returns zero. */
112#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
113 VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
114 VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
115 (_qzz_addr), (_qzz_len), 0, 0, 0)
116
117/* Check that memory at _qzz_addr is addressable and defined for
118 _qzz_len bytes. If suitable addressibility and definedness are not
119 established, Valgrind prints an error message and returns the
120 address of the first offending byte. Otherwise it returns zero. */
121#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
122 VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
123 VG_USERREQ__CHECK_MEM_IS_DEFINED, \
124 (_qzz_addr), (_qzz_len), 0, 0, 0)
125
126/* Use this macro to force the definedness and addressibility of an
127 lvalue to be checked. If suitable addressibility and definedness
128 are not established, Valgrind prints an error message and returns
129 the address of the first offending byte. Otherwise it returns
130 zero. */
131#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
132 VALGRIND_CHECK_MEM_IS_DEFINED( \
133 (volatile unsigned char *)&(__lvalue), \
134 (unsigned long)(sizeof (__lvalue)))
135
136
137/* Do a full memory leak check (like --leak-check=full) mid-execution. */
138#define VALGRIND_DO_LEAK_CHECK \
139 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
140 0, 0, 0, 0, 0)
141
142/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
143 which there was an increase in leaked bytes or leaked nr of blocks
144 since the previous leak search. */
145#define VALGRIND_DO_ADDED_LEAK_CHECK \
146 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
147 0, 1, 0, 0, 0)
148
149/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
150 increased or decreased leaked bytes/blocks since previous leak
151 search. */
152#define VALGRIND_DO_CHANGED_LEAK_CHECK \
153 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
154 0, 2, 0, 0, 0)
155
156/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
157#define VALGRIND_DO_QUICK_LEAK_CHECK \
158 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
159 1, 0, 0, 0, 0)
160
161/* Return number of leaked, dubious, reachable and suppressed bytes found by
162 all previous leak checks. They must be lvalues. */
163#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
164 /* For safety on 64-bit platforms we assign the results to private
165 unsigned long variables, then assign these to the lvalues the user
166 specified, which works no matter what type 'leaked', 'dubious', etc
167 are. We also initialise '_qzz_leaked', etc because
168 VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
169 defined. */ \
170 { \
171 unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
172 unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
173 VALGRIND_DO_CLIENT_REQUEST_STMT( \
174 VG_USERREQ__COUNT_LEAKS, \
175 &_qzz_leaked, &_qzz_dubious, \
176 &_qzz_reachable, &_qzz_suppressed, 0); \
177 leaked = _qzz_leaked; \
178 dubious = _qzz_dubious; \
179 reachable = _qzz_reachable; \
180 suppressed = _qzz_suppressed; \
181 }
182
183/* Return number of leaked, dubious, reachable and suppressed bytes found by
184 all previous leak checks. They must be lvalues. */
185#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
186 /* For safety on 64-bit platforms we assign the results to private
187 unsigned long variables, then assign these to the lvalues the user
188 specified, which works no matter what type 'leaked', 'dubious', etc
189 are. We also initialise '_qzz_leaked', etc because
190 VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
191 defined. */ \
192 { \
193 unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
194 unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
195 VALGRIND_DO_CLIENT_REQUEST_STMT( \
196 VG_USERREQ__COUNT_LEAK_BLOCKS, \
197 &_qzz_leaked, &_qzz_dubious, \
198 &_qzz_reachable, &_qzz_suppressed, 0); \
199 leaked = _qzz_leaked; \
200 dubious = _qzz_dubious; \
201 reachable = _qzz_reachable; \
202 suppressed = _qzz_suppressed; \
203 }
204
205
206/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
207 into the provided zzvbits array. Return values:
208 0 if not running on valgrind
209 1 success
210 2 [previously indicated unaligned arrays; these are now allowed]
211 3 if any parts of zzsrc/zzvbits are not addressable.
212 The metadata is not copied in cases 0, 2 or 3 so it should be
213 impossible to segfault your system by using this call.
214*/
215#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
216 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
217 VG_USERREQ__GET_VBITS, \
218 (const char*)(zza), \
219 (char*)(zzvbits), \
220 (zznbytes), 0, 0)
221
222/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
223 from the provided zzvbits array. Return values:
224 0 if not running on valgrind
225 1 success
226 2 [previously indicated unaligned arrays; these are now allowed]
227 3 if any parts of zza/zzvbits are not addressable.
228 The metadata is not copied in cases 0, 2 or 3 so it should be
229 impossible to segfault your system by using this call.
230*/
231#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
232 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
233 VG_USERREQ__SET_VBITS, \
234 (const char*)(zza), \
235 (const char*)(zzvbits), \
236 (zznbytes), 0, 0 )
237
238/* Disable and re-enable reporting of addressing errors in the
239 specified address range. */
240#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
241 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
242 VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
243 (_qzz_addr), (_qzz_len), 0, 0, 0)
244
245#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
246 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
247 VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
248 (_qzz_addr), (_qzz_len), 0, 0, 0)
249
250#endif
251