You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6647 lines
383 KiB

  1. /* -*- c -*-
  2. ----------------------------------------------------------------
  3. Notice that the following BSD-style license applies to this one
  4. file (valgrind.h) only. The rest of Valgrind is licensed under the
  5. terms of the GNU General Public License, version 2, unless
  6. otherwise indicated. See the COPYING file in the source
  7. distribution for details.
  8. ----------------------------------------------------------------
  9. This file is part of Valgrind, a dynamic binary instrumentation
  10. framework.
  11. Copyright (C) 2000-2017 Julian Seward. All rights reserved.
  12. Redistribution and use in source and binary forms, with or without
  13. modification, are permitted provided that the following conditions
  14. are met:
  15. 1. Redistributions of source code must retain the above copyright
  16. notice, this list of conditions and the following disclaimer.
  17. 2. The origin of this software must not be misrepresented; you must
  18. not claim that you wrote the original software. If you use this
  19. software in a product, an acknowledgment in the product
  20. documentation would be appreciated but is not required.
  21. 3. Altered source versions must be plainly marked as such, and must
  22. not be misrepresented as being the original software.
  23. 4. The name of the author may not be used to endorse or promote
  24. products derived from this software without specific prior written
  25. permission.
  26. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
  27. OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  28. WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  29. ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  30. DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  31. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
  32. GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  33. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  34. WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. ----------------------------------------------------------------
  38. Notice that the above BSD-style license applies to this one file
  39. (valgrind.h) only. The entire rest of Valgrind is licensed under
  40. the terms of the GNU General Public License, version 2. See the
  41. COPYING file in the source distribution for details.
  42. ----------------------------------------------------------------
  43. */
  44. /* This file is for inclusion into client (your!) code.
  45. You can use these macros to manipulate and query Valgrind's
  46. execution inside your own programs.
  47. The resulting executables will still run without Valgrind, just a
  48. little bit more slowly than they otherwise would, but otherwise
  49. unchanged. When not running on valgrind, each client request
  50. consumes very few (eg. 7) instructions, so the resulting performance
  51. loss is negligible unless you plan to execute client requests
  52. millions of times per second. Nevertheless, if that is still a
  53. problem, you can compile with the NVALGRIND symbol defined (gcc
  54. -DNVALGRIND) so that client requests are not even compiled in. */
  55. #ifndef __VALGRIND_H
  56. #define __VALGRIND_H
  57. /* ------------------------------------------------------------------ */
  58. /* VERSION NUMBER OF VALGRIND */
  59. /* ------------------------------------------------------------------ */
  60. /* Specify Valgrind's version number, so that user code can
  61. conditionally compile based on our version number. Note that these
  62. were introduced at version 3.6 and so do not exist in version 3.5
  63. or earlier. The recommended way to use them to check for "version
  64. X.Y or later" is (eg)
  65. #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
  66. && (__VALGRIND_MAJOR__ > 3 \
  67. || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
  68. */
  69. #define __VALGRIND_MAJOR__ 3
  70. #define __VALGRIND_MINOR__ 15
  71. #include <stdarg.h>
  72. /* Nb: this file might be included in a file compiled with -ansi. So
  73. we can't use C++ style "//" comments nor the "asm" keyword (instead
  74. use "__asm__"). */
  75. /* Derive some tags indicating what the target platform is. Note
  76. that in this file we're using the compiler's CPP symbols for
  77. identifying architectures, which are different to the ones we use
  78. within the rest of Valgrind. Note, __powerpc__ is active for both
  79. 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
  80. latter (on Linux, that is).
  81. Misc note: how to find out what's predefined in gcc by default:
  82. gcc -Wp,-dM somefile.c
  83. */
  84. #undef PLAT_x86_darwin
  85. #undef PLAT_amd64_darwin
  86. #undef PLAT_x86_win32
  87. #undef PLAT_amd64_win64
  88. #undef PLAT_x86_linux
  89. #undef PLAT_amd64_linux
  90. #undef PLAT_ppc32_linux
  91. #undef PLAT_ppc64be_linux
  92. #undef PLAT_ppc64le_linux
  93. #undef PLAT_arm_linux
  94. #undef PLAT_arm64_linux
  95. #undef PLAT_s390x_linux
  96. #undef PLAT_mips32_linux
  97. #undef PLAT_mips64_linux
  98. #undef PLAT_x86_solaris
  99. #undef PLAT_amd64_solaris
  100. #if defined(__APPLE__) && defined(__i386__)
  101. # define PLAT_x86_darwin 1
  102. #elif defined(__APPLE__) && defined(__x86_64__)
  103. # define PLAT_amd64_darwin 1
  104. #elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
  105. || defined(__CYGWIN32__) \
  106. || (defined(_WIN32) && defined(_M_IX86))
  107. # define PLAT_x86_win32 1
  108. #elif defined(__MINGW64__) \
  109. || (defined(_WIN64) && defined(_M_X64))
  110. # define PLAT_amd64_win64 1
  111. #elif defined(__linux__) && defined(__i386__)
  112. # define PLAT_x86_linux 1
  113. #elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__)
  114. # define PLAT_amd64_linux 1
  115. #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
  116. # define PLAT_ppc32_linux 1
  117. #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2
  118. /* Big Endian uses ELF version 1 */
  119. # define PLAT_ppc64be_linux 1
  120. #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2
  121. /* Little Endian uses ELF version 2 */
  122. # define PLAT_ppc64le_linux 1
  123. #elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__)
  124. # define PLAT_arm_linux 1
  125. #elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__)
  126. # define PLAT_arm64_linux 1
  127. #elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
  128. # define PLAT_s390x_linux 1
  129. #elif defined(__linux__) && defined(__mips__) && (__mips==64)
  130. # define PLAT_mips64_linux 1
  131. #elif defined(__linux__) && defined(__mips__) && (__mips!=64)
  132. # define PLAT_mips32_linux 1
  133. #elif defined(__sun) && defined(__i386__)
  134. # define PLAT_x86_solaris 1
  135. #elif defined(__sun) && defined(__x86_64__)
  136. # define PLAT_amd64_solaris 1
  137. #else
  138. /* If we're not compiling for our target platform, don't generate
  139. any inline asms. */
  140. # if !defined(NVALGRIND)
  141. # define NVALGRIND 1
  142. # endif
  143. #endif
  144. /* ------------------------------------------------------------------ */
  145. /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
  146. /* in here of use to end-users -- skip to the next section. */
  147. /* ------------------------------------------------------------------ */
  148. /*
  149. * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
  150. * request. Accepts both pointers and integers as arguments.
  151. *
  152. * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
  153. * client request that does not return a value.
  154. * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
  155. * client request and whose value equals the client request result. Accepts
  156. * both pointers and integers as arguments. Note that such calls are not
  157. * necessarily pure functions -- they may have side effects.
  158. */
  159. #define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
  160. _zzq_request, _zzq_arg1, _zzq_arg2, \
  161. _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  162. do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
  163. (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
  164. (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
  165. #define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
  166. _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  167. do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
  168. (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
  169. (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
  170. #if defined(NVALGRIND)
  171. /* Define NVALGRIND to completely remove the Valgrind magic sequence
  172. from the compiled code (analogous to NDEBUG's effects on
  173. assert()) */
  174. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  175. _zzq_default, _zzq_request, \
  176. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  177. (_zzq_default)
  178. #else /* ! NVALGRIND */
  179. /* The following defines the magic code sequences which the JITter
  180. spots and handles magically. Don't look too closely at them as
  181. they will rot your brain.
  182. The assembly code sequences for all architectures is in this one
  183. file. This is because this file must be stand-alone, and we don't
  184. want to have multiple files.
  185. For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
  186. value gets put in the return slot, so that everything works when
  187. this is executed not under Valgrind. Args are passed in a memory
  188. block, and so there's no intrinsic limit to the number that could
  189. be passed, but it's currently five.
  190. The macro args are:
  191. _zzq_rlval result lvalue
  192. _zzq_default default value (result returned when running on real CPU)
  193. _zzq_request request code
  194. _zzq_arg1..5 request params
  195. The other two macros are used to support function wrapping, and are
  196. a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
  197. guest's NRADDR pseudo-register and whatever other information is
  198. needed to safely run the call original from the wrapper: on
  199. ppc64-linux, the R2 value at the divert point is also needed. This
  200. information is abstracted into a user-visible type, OrigFn.
  201. VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
  202. guest, but guarantees that the branch instruction will not be
  203. redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
  204. branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
  205. complete inline asm, since it needs to be combined with more magic
  206. inline asm stuff to be useful.
  207. */
  208. /* ----------------- x86-{linux,darwin,solaris} ---------------- */
  209. #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
  210. || (defined(PLAT_x86_win32) && defined(__GNUC__)) \
  211. || defined(PLAT_x86_solaris)
  212. typedef
  213. struct {
  214. unsigned int nraddr; /* where's the code? */
  215. }
  216. OrigFn;
  217. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  218. "roll $3, %%edi ; roll $13, %%edi\n\t" \
  219. "roll $29, %%edi ; roll $19, %%edi\n\t"
  220. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  221. _zzq_default, _zzq_request, \
  222. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  223. __extension__ \
  224. ({volatile unsigned int _zzq_args[6]; \
  225. volatile unsigned int _zzq_result; \
  226. _zzq_args[0] = (unsigned int)(_zzq_request); \
  227. _zzq_args[1] = (unsigned int)(_zzq_arg1); \
  228. _zzq_args[2] = (unsigned int)(_zzq_arg2); \
  229. _zzq_args[3] = (unsigned int)(_zzq_arg3); \
  230. _zzq_args[4] = (unsigned int)(_zzq_arg4); \
  231. _zzq_args[5] = (unsigned int)(_zzq_arg5); \
  232. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  233. /* %EDX = client_request ( %EAX ) */ \
  234. "xchgl %%ebx,%%ebx" \
  235. : "=d" (_zzq_result) \
  236. : "a" (&_zzq_args[0]), "0" (_zzq_default) \
  237. : "cc", "memory" \
  238. ); \
  239. _zzq_result; \
  240. })
  241. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  242. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  243. volatile unsigned int __addr; \
  244. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  245. /* %EAX = guest_NRADDR */ \
  246. "xchgl %%ecx,%%ecx" \
  247. : "=a" (__addr) \
  248. : \
  249. : "cc", "memory" \
  250. ); \
  251. _zzq_orig->nraddr = __addr; \
  252. }
  253. #define VALGRIND_CALL_NOREDIR_EAX \
  254. __SPECIAL_INSTRUCTION_PREAMBLE \
  255. /* call-noredir *%EAX */ \
  256. "xchgl %%edx,%%edx\n\t"
  257. #define VALGRIND_VEX_INJECT_IR() \
  258. do { \
  259. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  260. "xchgl %%edi,%%edi\n\t" \
  261. : : : "cc", "memory" \
  262. ); \
  263. } while (0)
  264. #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__)
  265. || PLAT_x86_solaris */
  266. /* ------------------------- x86-Win32 ------------------------- */
  267. #if defined(PLAT_x86_win32) && !defined(__GNUC__)
  268. typedef
  269. struct {
  270. unsigned int nraddr; /* where's the code? */
  271. }
  272. OrigFn;
  273. #if defined(_MSC_VER)
  274. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  275. __asm rol edi, 3 __asm rol edi, 13 \
  276. __asm rol edi, 29 __asm rol edi, 19
  277. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  278. _zzq_default, _zzq_request, \
  279. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  280. valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
  281. (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
  282. (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
  283. (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
  284. static __inline uintptr_t
  285. valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
  286. uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
  287. uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
  288. uintptr_t _zzq_arg5)
  289. {
  290. volatile uintptr_t _zzq_args[6];
  291. volatile unsigned int _zzq_result;
  292. _zzq_args[0] = (uintptr_t)(_zzq_request);
  293. _zzq_args[1] = (uintptr_t)(_zzq_arg1);
  294. _zzq_args[2] = (uintptr_t)(_zzq_arg2);
  295. _zzq_args[3] = (uintptr_t)(_zzq_arg3);
  296. _zzq_args[4] = (uintptr_t)(_zzq_arg4);
  297. _zzq_args[5] = (uintptr_t)(_zzq_arg5);
  298. __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
  299. __SPECIAL_INSTRUCTION_PREAMBLE
  300. /* %EDX = client_request ( %EAX ) */
  301. __asm xchg ebx,ebx
  302. __asm mov _zzq_result, edx
  303. }
  304. return _zzq_result;
  305. }
  306. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  307. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  308. volatile unsigned int __addr; \
  309. __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
  310. /* %EAX = guest_NRADDR */ \
  311. __asm xchg ecx,ecx \
  312. __asm mov __addr, eax \
  313. } \
  314. _zzq_orig->nraddr = __addr; \
  315. }
  316. #define VALGRIND_CALL_NOREDIR_EAX ERROR
  317. #define VALGRIND_VEX_INJECT_IR() \
  318. do { \
  319. __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
  320. __asm xchg edi,edi \
  321. } \
  322. } while (0)
  323. #else
  324. #error Unsupported compiler.
  325. #endif
  326. #endif /* PLAT_x86_win32 */
  327. /* ----------------- amd64-{linux,darwin,solaris} --------------- */
  328. #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
  329. || defined(PLAT_amd64_solaris) \
  330. || (defined(PLAT_amd64_win64) && defined(__GNUC__))
  331. typedef
  332. struct {
  333. unsigned long int nraddr; /* where's the code? */
  334. }
  335. OrigFn;
  336. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  337. "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
  338. "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
  339. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  340. _zzq_default, _zzq_request, \
  341. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  342. __extension__ \
  343. ({ volatile unsigned long int _zzq_args[6]; \
  344. volatile unsigned long int _zzq_result; \
  345. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  346. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  347. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  348. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  349. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  350. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  351. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  352. /* %RDX = client_request ( %RAX ) */ \
  353. "xchgq %%rbx,%%rbx" \
  354. : "=d" (_zzq_result) \
  355. : "a" (&_zzq_args[0]), "0" (_zzq_default) \
  356. : "cc", "memory" \
  357. ); \
  358. _zzq_result; \
  359. })
  360. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  361. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  362. volatile unsigned long int __addr; \
  363. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  364. /* %RAX = guest_NRADDR */ \
  365. "xchgq %%rcx,%%rcx" \
  366. : "=a" (__addr) \
  367. : \
  368. : "cc", "memory" \
  369. ); \
  370. _zzq_orig->nraddr = __addr; \
  371. }
  372. #define VALGRIND_CALL_NOREDIR_RAX \
  373. __SPECIAL_INSTRUCTION_PREAMBLE \
  374. /* call-noredir *%RAX */ \
  375. "xchgq %%rdx,%%rdx\n\t"
  376. #define VALGRIND_VEX_INJECT_IR() \
  377. do { \
  378. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  379. "xchgq %%rdi,%%rdi\n\t" \
  380. : : : "cc", "memory" \
  381. ); \
  382. } while (0)
  383. #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
  384. /* ------------------------- amd64-Win64 ------------------------- */
  385. #if defined(PLAT_amd64_win64) && !defined(__GNUC__)
  386. #error Unsupported compiler.
  387. #endif /* PLAT_amd64_win64 */
  388. /* ------------------------ ppc32-linux ------------------------ */
  389. #if defined(PLAT_ppc32_linux)
  390. typedef
  391. struct {
  392. unsigned int nraddr; /* where's the code? */
  393. }
  394. OrigFn;
  395. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  396. "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \
  397. "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t"
  398. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  399. _zzq_default, _zzq_request, \
  400. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  401. \
  402. __extension__ \
  403. ({ unsigned int _zzq_args[6]; \
  404. unsigned int _zzq_result; \
  405. unsigned int* _zzq_ptr; \
  406. _zzq_args[0] = (unsigned int)(_zzq_request); \
  407. _zzq_args[1] = (unsigned int)(_zzq_arg1); \
  408. _zzq_args[2] = (unsigned int)(_zzq_arg2); \
  409. _zzq_args[3] = (unsigned int)(_zzq_arg3); \
  410. _zzq_args[4] = (unsigned int)(_zzq_arg4); \
  411. _zzq_args[5] = (unsigned int)(_zzq_arg5); \
  412. _zzq_ptr = _zzq_args; \
  413. __asm__ volatile("mr 3,%1\n\t" /*default*/ \
  414. "mr 4,%2\n\t" /*ptr*/ \
  415. __SPECIAL_INSTRUCTION_PREAMBLE \
  416. /* %R3 = client_request ( %R4 ) */ \
  417. "or 1,1,1\n\t" \
  418. "mr %0,3" /*result*/ \
  419. : "=b" (_zzq_result) \
  420. : "b" (_zzq_default), "b" (_zzq_ptr) \
  421. : "cc", "memory", "r3", "r4"); \
  422. _zzq_result; \
  423. })
  424. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  425. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  426. unsigned int __addr; \
  427. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  428. /* %R3 = guest_NRADDR */ \
  429. "or 2,2,2\n\t" \
  430. "mr %0,3" \
  431. : "=b" (__addr) \
  432. : \
  433. : "cc", "memory", "r3" \
  434. ); \
  435. _zzq_orig->nraddr = __addr; \
  436. }
  437. #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
  438. __SPECIAL_INSTRUCTION_PREAMBLE \
  439. /* branch-and-link-to-noredir *%R11 */ \
  440. "or 3,3,3\n\t"
  441. #define VALGRIND_VEX_INJECT_IR() \
  442. do { \
  443. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  444. "or 5,5,5\n\t" \
  445. ); \
  446. } while (0)
  447. #endif /* PLAT_ppc32_linux */
  448. /* ------------------------ ppc64-linux ------------------------ */
  449. #if defined(PLAT_ppc64be_linux)
  450. typedef
  451. struct {
  452. unsigned long int nraddr; /* where's the code? */
  453. unsigned long int r2; /* what tocptr do we need? */
  454. }
  455. OrigFn;
  456. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  457. "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
  458. "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
  459. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  460. _zzq_default, _zzq_request, \
  461. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  462. \
  463. __extension__ \
  464. ({ unsigned long int _zzq_args[6]; \
  465. unsigned long int _zzq_result; \
  466. unsigned long int* _zzq_ptr; \
  467. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  468. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  469. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  470. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  471. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  472. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  473. _zzq_ptr = _zzq_args; \
  474. __asm__ volatile("mr 3,%1\n\t" /*default*/ \
  475. "mr 4,%2\n\t" /*ptr*/ \
  476. __SPECIAL_INSTRUCTION_PREAMBLE \
  477. /* %R3 = client_request ( %R4 ) */ \
  478. "or 1,1,1\n\t" \
  479. "mr %0,3" /*result*/ \
  480. : "=b" (_zzq_result) \
  481. : "b" (_zzq_default), "b" (_zzq_ptr) \
  482. : "cc", "memory", "r3", "r4"); \
  483. _zzq_result; \
  484. })
  485. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  486. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  487. unsigned long int __addr; \
  488. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  489. /* %R3 = guest_NRADDR */ \
  490. "or 2,2,2\n\t" \
  491. "mr %0,3" \
  492. : "=b" (__addr) \
  493. : \
  494. : "cc", "memory", "r3" \
  495. ); \
  496. _zzq_orig->nraddr = __addr; \
  497. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  498. /* %R3 = guest_NRADDR_GPR2 */ \
  499. "or 4,4,4\n\t" \
  500. "mr %0,3" \
  501. : "=b" (__addr) \
  502. : \
  503. : "cc", "memory", "r3" \
  504. ); \
  505. _zzq_orig->r2 = __addr; \
  506. }
  507. #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
  508. __SPECIAL_INSTRUCTION_PREAMBLE \
  509. /* branch-and-link-to-noredir *%R11 */ \
  510. "or 3,3,3\n\t"
  511. #define VALGRIND_VEX_INJECT_IR() \
  512. do { \
  513. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  514. "or 5,5,5\n\t" \
  515. ); \
  516. } while (0)
  517. #endif /* PLAT_ppc64be_linux */
  518. #if defined(PLAT_ppc64le_linux)
  519. typedef
  520. struct {
  521. unsigned long int nraddr; /* where's the code? */
  522. unsigned long int r2; /* what tocptr do we need? */
  523. }
  524. OrigFn;
  525. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  526. "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
  527. "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
  528. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  529. _zzq_default, _zzq_request, \
  530. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  531. \
  532. __extension__ \
  533. ({ unsigned long int _zzq_args[6]; \
  534. unsigned long int _zzq_result; \
  535. unsigned long int* _zzq_ptr; \
  536. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  537. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  538. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  539. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  540. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  541. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  542. _zzq_ptr = _zzq_args; \
  543. __asm__ volatile("mr 3,%1\n\t" /*default*/ \
  544. "mr 4,%2\n\t" /*ptr*/ \
  545. __SPECIAL_INSTRUCTION_PREAMBLE \
  546. /* %R3 = client_request ( %R4 ) */ \
  547. "or 1,1,1\n\t" \
  548. "mr %0,3" /*result*/ \
  549. : "=b" (_zzq_result) \
  550. : "b" (_zzq_default), "b" (_zzq_ptr) \
  551. : "cc", "memory", "r3", "r4"); \
  552. _zzq_result; \
  553. })
  554. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  555. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  556. unsigned long int __addr; \
  557. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  558. /* %R3 = guest_NRADDR */ \
  559. "or 2,2,2\n\t" \
  560. "mr %0,3" \
  561. : "=b" (__addr) \
  562. : \
  563. : "cc", "memory", "r3" \
  564. ); \
  565. _zzq_orig->nraddr = __addr; \
  566. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  567. /* %R3 = guest_NRADDR_GPR2 */ \
  568. "or 4,4,4\n\t" \
  569. "mr %0,3" \
  570. : "=b" (__addr) \
  571. : \
  572. : "cc", "memory", "r3" \
  573. ); \
  574. _zzq_orig->r2 = __addr; \
  575. }
  576. #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
  577. __SPECIAL_INSTRUCTION_PREAMBLE \
  578. /* branch-and-link-to-noredir *%R12 */ \
  579. "or 3,3,3\n\t"
  580. #define VALGRIND_VEX_INJECT_IR() \
  581. do { \
  582. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  583. "or 5,5,5\n\t" \
  584. ); \
  585. } while (0)
  586. #endif /* PLAT_ppc64le_linux */
  587. /* ------------------------- arm-linux ------------------------- */
  588. #if defined(PLAT_arm_linux)
  589. typedef
  590. struct {
  591. unsigned int nraddr; /* where's the code? */
  592. }
  593. OrigFn;
  594. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  595. "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
  596. "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
  597. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  598. _zzq_default, _zzq_request, \
  599. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  600. \
  601. __extension__ \
  602. ({volatile unsigned int _zzq_args[6]; \
  603. volatile unsigned int _zzq_result; \
  604. _zzq_args[0] = (unsigned int)(_zzq_request); \
  605. _zzq_args[1] = (unsigned int)(_zzq_arg1); \
  606. _zzq_args[2] = (unsigned int)(_zzq_arg2); \
  607. _zzq_args[3] = (unsigned int)(_zzq_arg3); \
  608. _zzq_args[4] = (unsigned int)(_zzq_arg4); \
  609. _zzq_args[5] = (unsigned int)(_zzq_arg5); \
  610. __asm__ volatile("mov r3, %1\n\t" /*default*/ \
  611. "mov r4, %2\n\t" /*ptr*/ \
  612. __SPECIAL_INSTRUCTION_PREAMBLE \
  613. /* R3 = client_request ( R4 ) */ \
  614. "orr r10, r10, r10\n\t" \
  615. "mov %0, r3" /*result*/ \
  616. : "=r" (_zzq_result) \
  617. : "r" (_zzq_default), "r" (&_zzq_args[0]) \
  618. : "cc","memory", "r3", "r4"); \
  619. _zzq_result; \
  620. })
  621. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  622. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  623. unsigned int __addr; \
  624. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  625. /* R3 = guest_NRADDR */ \
  626. "orr r11, r11, r11\n\t" \
  627. "mov %0, r3" \
  628. : "=r" (__addr) \
  629. : \
  630. : "cc", "memory", "r3" \
  631. ); \
  632. _zzq_orig->nraddr = __addr; \
  633. }
  634. #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
  635. __SPECIAL_INSTRUCTION_PREAMBLE \
  636. /* branch-and-link-to-noredir *%R4 */ \
  637. "orr r12, r12, r12\n\t"
  638. #define VALGRIND_VEX_INJECT_IR() \
  639. do { \
  640. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  641. "orr r9, r9, r9\n\t" \
  642. : : : "cc", "memory" \
  643. ); \
  644. } while (0)
  645. #endif /* PLAT_arm_linux */
  646. /* ------------------------ arm64-linux ------------------------- */
  647. #if defined(PLAT_arm64_linux)
  648. typedef
  649. struct {
  650. unsigned long int nraddr; /* where's the code? */
  651. }
  652. OrigFn;
  653. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  654. "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \
  655. "ror x12, x12, #51 ; ror x12, x12, #61 \n\t"
  656. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  657. _zzq_default, _zzq_request, \
  658. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  659. \
  660. __extension__ \
  661. ({volatile unsigned long int _zzq_args[6]; \
  662. volatile unsigned long int _zzq_result; \
  663. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  664. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  665. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  666. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  667. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  668. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  669. __asm__ volatile("mov x3, %1\n\t" /*default*/ \
  670. "mov x4, %2\n\t" /*ptr*/ \
  671. __SPECIAL_INSTRUCTION_PREAMBLE \
  672. /* X3 = client_request ( X4 ) */ \
  673. "orr x10, x10, x10\n\t" \
  674. "mov %0, x3" /*result*/ \
  675. : "=r" (_zzq_result) \
  676. : "r" ((unsigned long int)(_zzq_default)), \
  677. "r" (&_zzq_args[0]) \
  678. : "cc","memory", "x3", "x4"); \
  679. _zzq_result; \
  680. })
  681. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  682. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  683. unsigned long int __addr; \
  684. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  685. /* X3 = guest_NRADDR */ \
  686. "orr x11, x11, x11\n\t" \
  687. "mov %0, x3" \
  688. : "=r" (__addr) \
  689. : \
  690. : "cc", "memory", "x3" \
  691. ); \
  692. _zzq_orig->nraddr = __addr; \
  693. }
  694. #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
  695. __SPECIAL_INSTRUCTION_PREAMBLE \
  696. /* branch-and-link-to-noredir X8 */ \
  697. "orr x12, x12, x12\n\t"
  698. #define VALGRIND_VEX_INJECT_IR() \
  699. do { \
  700. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  701. "orr x9, x9, x9\n\t" \
  702. : : : "cc", "memory" \
  703. ); \
  704. } while (0)
  705. #endif /* PLAT_arm64_linux */
  706. /* ------------------------ s390x-linux ------------------------ */
  707. #if defined(PLAT_s390x_linux)
  708. typedef
  709. struct {
  710. unsigned long int nraddr; /* where's the code? */
  711. }
  712. OrigFn;
  713. /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
  714. * code. This detection is implemented in platform specific toIR.c
  715. * (e.g. VEX/priv/guest_s390_decoder.c).
  716. */
  717. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  718. "lr 15,15\n\t" \
  719. "lr 1,1\n\t" \
  720. "lr 2,2\n\t" \
  721. "lr 3,3\n\t"
  722. #define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
  723. #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
  724. #define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
  725. #define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
  726. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  727. _zzq_default, _zzq_request, \
  728. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  729. __extension__ \
  730. ({volatile unsigned long int _zzq_args[6]; \
  731. volatile unsigned long int _zzq_result; \
  732. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  733. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  734. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  735. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  736. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  737. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  738. __asm__ volatile(/* r2 = args */ \
  739. "lgr 2,%1\n\t" \
  740. /* r3 = default */ \
  741. "lgr 3,%2\n\t" \
  742. __SPECIAL_INSTRUCTION_PREAMBLE \
  743. __CLIENT_REQUEST_CODE \
  744. /* results = r3 */ \
  745. "lgr %0, 3\n\t" \
  746. : "=d" (_zzq_result) \
  747. : "a" (&_zzq_args[0]), "0" (_zzq_default) \
  748. : "cc", "2", "3", "memory" \
  749. ); \
  750. _zzq_result; \
  751. })
  752. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  753. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  754. volatile unsigned long int __addr; \
  755. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  756. __GET_NR_CONTEXT_CODE \
  757. "lgr %0, 3\n\t" \
  758. : "=a" (__addr) \
  759. : \
  760. : "cc", "3", "memory" \
  761. ); \
  762. _zzq_orig->nraddr = __addr; \
  763. }
  764. #define VALGRIND_CALL_NOREDIR_R1 \
  765. __SPECIAL_INSTRUCTION_PREAMBLE \
  766. __CALL_NO_REDIR_CODE
  767. #define VALGRIND_VEX_INJECT_IR() \
  768. do { \
  769. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  770. __VEX_INJECT_IR_CODE); \
  771. } while (0)
  772. #endif /* PLAT_s390x_linux */
  773. /* ------------------------- mips32-linux ---------------- */
  774. #if defined(PLAT_mips32_linux)
  775. typedef
  776. struct {
  777. unsigned int nraddr; /* where's the code? */
  778. }
  779. OrigFn;
  780. /* .word 0x342
  781. * .word 0x742
  782. * .word 0xC2
  783. * .word 0x4C2*/
  784. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  785. "srl $0, $0, 13\n\t" \
  786. "srl $0, $0, 29\n\t" \
  787. "srl $0, $0, 3\n\t" \
  788. "srl $0, $0, 19\n\t"
  789. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  790. _zzq_default, _zzq_request, \
  791. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  792. __extension__ \
  793. ({ volatile unsigned int _zzq_args[6]; \
  794. volatile unsigned int _zzq_result; \
  795. _zzq_args[0] = (unsigned int)(_zzq_request); \
  796. _zzq_args[1] = (unsigned int)(_zzq_arg1); \
  797. _zzq_args[2] = (unsigned int)(_zzq_arg2); \
  798. _zzq_args[3] = (unsigned int)(_zzq_arg3); \
  799. _zzq_args[4] = (unsigned int)(_zzq_arg4); \
  800. _zzq_args[5] = (unsigned int)(_zzq_arg5); \
  801. __asm__ volatile("move $11, %1\n\t" /*default*/ \
  802. "move $12, %2\n\t" /*ptr*/ \
  803. __SPECIAL_INSTRUCTION_PREAMBLE \
  804. /* T3 = client_request ( T4 ) */ \
  805. "or $13, $13, $13\n\t" \
  806. "move %0, $11\n\t" /*result*/ \
  807. : "=r" (_zzq_result) \
  808. : "r" (_zzq_default), "r" (&_zzq_args[0]) \
  809. : "$11", "$12", "memory"); \
  810. _zzq_result; \
  811. })
  812. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  813. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  814. volatile unsigned int __addr; \
  815. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  816. /* %t9 = guest_NRADDR */ \
  817. "or $14, $14, $14\n\t" \
  818. "move %0, $11" /*result*/ \
  819. : "=r" (__addr) \
  820. : \
  821. : "$11" \
  822. ); \
  823. _zzq_orig->nraddr = __addr; \
  824. }
  825. #define VALGRIND_CALL_NOREDIR_T9 \
  826. __SPECIAL_INSTRUCTION_PREAMBLE \
  827. /* call-noredir *%t9 */ \
  828. "or $15, $15, $15\n\t"
  829. #define VALGRIND_VEX_INJECT_IR() \
  830. do { \
  831. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  832. "or $11, $11, $11\n\t" \
  833. ); \
  834. } while (0)
  835. #endif /* PLAT_mips32_linux */
  836. /* ------------------------- mips64-linux ---------------- */
  837. #if defined(PLAT_mips64_linux)
  838. typedef
  839. struct {
  840. unsigned long nraddr; /* where's the code? */
  841. }
  842. OrigFn;
  843. /* dsll $0,$0, 3
  844. * dsll $0,$0, 13
  845. * dsll $0,$0, 29
  846. * dsll $0,$0, 19*/
  847. #define __SPECIAL_INSTRUCTION_PREAMBLE \
  848. "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
  849. "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
  850. #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  851. _zzq_default, _zzq_request, \
  852. _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
  853. __extension__ \
  854. ({ volatile unsigned long int _zzq_args[6]; \
  855. volatile unsigned long int _zzq_result; \
  856. _zzq_args[0] = (unsigned long int)(_zzq_request); \
  857. _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
  858. _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
  859. _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
  860. _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
  861. _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
  862. __asm__ volatile("move $11, %1\n\t" /*default*/ \
  863. "move $12, %2\n\t" /*ptr*/ \
  864. __SPECIAL_INSTRUCTION_PREAMBLE \
  865. /* $11 = client_request ( $12 ) */ \
  866. "or $13, $13, $13\n\t" \
  867. "move %0, $11\n\t" /*result*/ \
  868. : "=r" (_zzq_result) \
  869. : "r" (_zzq_default), "r" (&_zzq_args[0]) \
  870. : "$11", "$12", "memory"); \
  871. _zzq_result; \
  872. })
  873. #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
  874. { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
  875. volatile unsigned long int __addr; \
  876. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  877. /* $11 = guest_NRADDR */ \
  878. "or $14, $14, $14\n\t" \
  879. "move %0, $11" /*result*/ \
  880. : "=r" (__addr) \
  881. : \
  882. : "$11"); \
  883. _zzq_orig->nraddr = __addr; \
  884. }
  885. #define VALGRIND_CALL_NOREDIR_T9 \
  886. __SPECIAL_INSTRUCTION_PREAMBLE \
  887. /* call-noredir $25 */ \
  888. "or $15, $15, $15\n\t"
  889. #define VALGRIND_VEX_INJECT_IR() \
  890. do { \
  891. __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
  892. "or $11, $11, $11\n\t" \
  893. ); \
  894. } while (0)
  895. #endif /* PLAT_mips64_linux */
  896. /* Insert assembly code for other platforms here... */
  897. #endif /* NVALGRIND */
  898. /* ------------------------------------------------------------------ */
  899. /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
  900. /* ugly. It's the least-worst tradeoff I can think of. */
  901. /* ------------------------------------------------------------------ */
  902. /* This section defines magic (a.k.a appalling-hack) macros for doing
  903. guaranteed-no-redirection macros, so as to get from function
  904. wrappers to the functions they are wrapping. The whole point is to
  905. construct standard call sequences, but to do the call itself with a
  906. special no-redirect call pseudo-instruction that the JIT
  907. understands and handles specially. This section is long and
  908. repetitious, and I can't see a way to make it shorter.
  909. The naming scheme is as follows:
  910. CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
  911. 'W' stands for "word" and 'v' for "void". Hence there are
  912. different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
  913. and for each, the possibility of returning a word-typed result, or
  914. no result.
  915. */
  916. /* Use these to write the name of your wrapper. NOTE: duplicates
  917. VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
  918. the default behaviour equivalance class tag "0000" into the name.
  919. See pub_tool_redir.h for details -- normally you don't need to
  920. think about this, though. */
  921. /* Use an extra level of macroisation so as to ensure the soname/fnname
  922. args are fully macro-expanded before pasting them together. */
  923. #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
  924. #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
  925. VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
  926. #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
  927. VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
  928. /* Use this macro from within a wrapper function to collect the
  929. context (address and possibly other info) of the original function.
  930. Once you have that you can then use it in one of the CALL_FN_
  931. macros. The type of the argument _lval is OrigFn. */
  932. #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
  933. /* Also provide end-user facilities for function replacement, rather
  934. than wrapping. A replacement function differs from a wrapper in
  935. that it has no way to get hold of the original function being
  936. called, and hence no way to call onwards to it. In a replacement
  937. function, VALGRIND_GET_ORIG_FN always returns zero. */
  938. #define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
  939. VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
  940. #define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
  941. VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
  942. /* Derivatives of the main macros below, for calling functions
  943. returning void. */
  944. #define CALL_FN_v_v(fnptr) \
  945. do { volatile unsigned long _junk; \
  946. CALL_FN_W_v(_junk,fnptr); } while (0)
  947. #define CALL_FN_v_W(fnptr, arg1) \
  948. do { volatile unsigned long _junk; \
  949. CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
  950. #define CALL_FN_v_WW(fnptr, arg1,arg2) \
  951. do { volatile unsigned long _junk; \
  952. CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
  953. #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
  954. do { volatile unsigned long _junk; \
  955. CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
  956. #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
  957. do { volatile unsigned long _junk; \
  958. CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
  959. #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
  960. do { volatile unsigned long _junk; \
  961. CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
  962. #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
  963. do { volatile unsigned long _junk; \
  964. CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
  965. #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
  966. do { volatile unsigned long _junk; \
  967. CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
  968. /* ----------------- x86-{linux,darwin,solaris} ---------------- */
  969. #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
  970. || defined(PLAT_x86_solaris)
  971. /* These regs are trashed by the hidden call. No need to mention eax
  972. as gcc can already see that, plus causes gcc to bomb. */
  973. #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
  974. /* Macros to save and align the stack before making a function
  975. call and restore it afterwards as gcc may not keep the stack
  976. pointer aligned if it doesn't realise calls are being made
  977. to other functions. */
  978. #define VALGRIND_ALIGN_STACK \
  979. "movl %%esp,%%edi\n\t" \
  980. "andl $0xfffffff0,%%esp\n\t"
  981. #define VALGRIND_RESTORE_STACK \
  982. "movl %%edi,%%esp\n\t"
  983. /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
  984. long) == 4. */
  985. #define CALL_FN_W_v(lval, orig) \
  986. do { \
  987. volatile OrigFn _orig = (orig); \
  988. volatile unsigned long _argvec[1]; \
  989. volatile unsigned long _res; \
  990. _argvec[0] = (unsigned long)_orig.nraddr; \
  991. __asm__ volatile( \
  992. VALGRIND_ALIGN_STACK \
  993. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  994. VALGRIND_CALL_NOREDIR_EAX \
  995. VALGRIND_RESTORE_STACK \
  996. : /*out*/ "=a" (_res) \
  997. : /*in*/ "a" (&_argvec[0]) \
  998. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  999. ); \
  1000. lval = (__typeof__(lval)) _res; \
  1001. } while (0)
  1002. #define CALL_FN_W_W(lval, orig, arg1) \
  1003. do { \
  1004. volatile OrigFn _orig = (orig); \
  1005. volatile unsigned long _argvec[2]; \
  1006. volatile unsigned long _res; \
  1007. _argvec[0] = (unsigned long)_orig.nraddr; \
  1008. _argvec[1] = (unsigned long)(arg1); \
  1009. __asm__ volatile( \
  1010. VALGRIND_ALIGN_STACK \
  1011. "subl $12, %%esp\n\t" \
  1012. "pushl 4(%%eax)\n\t" \
  1013. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1014. VALGRIND_CALL_NOREDIR_EAX \
  1015. VALGRIND_RESTORE_STACK \
  1016. : /*out*/ "=a" (_res) \
  1017. : /*in*/ "a" (&_argvec[0]) \
  1018. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1019. ); \
  1020. lval = (__typeof__(lval)) _res; \
  1021. } while (0)
  1022. #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
  1023. do { \
  1024. volatile OrigFn _orig = (orig); \
  1025. volatile unsigned long _argvec[3]; \
  1026. volatile unsigned long _res; \
  1027. _argvec[0] = (unsigned long)_orig.nraddr; \
  1028. _argvec[1] = (unsigned long)(arg1); \
  1029. _argvec[2] = (unsigned long)(arg2); \
  1030. __asm__ volatile( \
  1031. VALGRIND_ALIGN_STACK \
  1032. "subl $8, %%esp\n\t" \
  1033. "pushl 8(%%eax)\n\t" \
  1034. "pushl 4(%%eax)\n\t" \
  1035. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1036. VALGRIND_CALL_NOREDIR_EAX \
  1037. VALGRIND_RESTORE_STACK \
  1038. : /*out*/ "=a" (_res) \
  1039. : /*in*/ "a" (&_argvec[0]) \
  1040. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1041. ); \
  1042. lval = (__typeof__(lval)) _res; \
  1043. } while (0)
  1044. #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
  1045. do { \
  1046. volatile OrigFn _orig = (orig); \
  1047. volatile unsigned long _argvec[4]; \
  1048. volatile unsigned long _res; \
  1049. _argvec[0] = (unsigned long)_orig.nraddr; \
  1050. _argvec[1] = (unsigned long)(arg1); \
  1051. _argvec[2] = (unsigned long)(arg2); \
  1052. _argvec[3] = (unsigned long)(arg3); \
  1053. __asm__ volatile( \
  1054. VALGRIND_ALIGN_STACK \
  1055. "subl $4, %%esp\n\t" \
  1056. "pushl 12(%%eax)\n\t" \
  1057. "pushl 8(%%eax)\n\t" \
  1058. "pushl 4(%%eax)\n\t" \
  1059. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1060. VALGRIND_CALL_NOREDIR_EAX \
  1061. VALGRIND_RESTORE_STACK \
  1062. : /*out*/ "=a" (_res) \
  1063. : /*in*/ "a" (&_argvec[0]) \
  1064. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1065. ); \
  1066. lval = (__typeof__(lval)) _res; \
  1067. } while (0)
  1068. #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
  1069. do { \
  1070. volatile OrigFn _orig = (orig); \
  1071. volatile unsigned long _argvec[5]; \
  1072. volatile unsigned long _res; \
  1073. _argvec[0] = (unsigned long)_orig.nraddr; \
  1074. _argvec[1] = (unsigned long)(arg1); \
  1075. _argvec[2] = (unsigned long)(arg2); \
  1076. _argvec[3] = (unsigned long)(arg3); \
  1077. _argvec[4] = (unsigned long)(arg4); \
  1078. __asm__ volatile( \
  1079. VALGRIND_ALIGN_STACK \
  1080. "pushl 16(%%eax)\n\t" \
  1081. "pushl 12(%%eax)\n\t" \
  1082. "pushl 8(%%eax)\n\t" \
  1083. "pushl 4(%%eax)\n\t" \
  1084. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1085. VALGRIND_CALL_NOREDIR_EAX \
  1086. VALGRIND_RESTORE_STACK \
  1087. : /*out*/ "=a" (_res) \
  1088. : /*in*/ "a" (&_argvec[0]) \
  1089. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1090. ); \
  1091. lval = (__typeof__(lval)) _res; \
  1092. } while (0)
  1093. #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
  1094. do { \
  1095. volatile OrigFn _orig = (orig); \
  1096. volatile unsigned long _argvec[6]; \
  1097. volatile unsigned long _res; \
  1098. _argvec[0] = (unsigned long)_orig.nraddr; \
  1099. _argvec[1] = (unsigned long)(arg1); \
  1100. _argvec[2] = (unsigned long)(arg2); \
  1101. _argvec[3] = (unsigned long)(arg3); \
  1102. _argvec[4] = (unsigned long)(arg4); \
  1103. _argvec[5] = (unsigned long)(arg5); \
  1104. __asm__ volatile( \
  1105. VALGRIND_ALIGN_STACK \
  1106. "subl $12, %%esp\n\t" \
  1107. "pushl 20(%%eax)\n\t" \
  1108. "pushl 16(%%eax)\n\t" \
  1109. "pushl 12(%%eax)\n\t" \
  1110. "pushl 8(%%eax)\n\t" \
  1111. "pushl 4(%%eax)\n\t" \
  1112. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1113. VALGRIND_CALL_NOREDIR_EAX \
  1114. VALGRIND_RESTORE_STACK \
  1115. : /*out*/ "=a" (_res) \
  1116. : /*in*/ "a" (&_argvec[0]) \
  1117. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1118. ); \
  1119. lval = (__typeof__(lval)) _res; \
  1120. } while (0)
  1121. #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
  1122. do { \
  1123. volatile OrigFn _orig = (orig); \
  1124. volatile unsigned long _argvec[7]; \
  1125. volatile unsigned long _res; \
  1126. _argvec[0] = (unsigned long)_orig.nraddr; \
  1127. _argvec[1] = (unsigned long)(arg1); \
  1128. _argvec[2] = (unsigned long)(arg2); \
  1129. _argvec[3] = (unsigned long)(arg3); \
  1130. _argvec[4] = (unsigned long)(arg4); \
  1131. _argvec[5] = (unsigned long)(arg5); \
  1132. _argvec[6] = (unsigned long)(arg6); \
  1133. __asm__ volatile( \
  1134. VALGRIND_ALIGN_STACK \
  1135. "subl $8, %%esp\n\t" \
  1136. "pushl 24(%%eax)\n\t" \
  1137. "pushl 20(%%eax)\n\t" \
  1138. "pushl 16(%%eax)\n\t" \
  1139. "pushl 12(%%eax)\n\t" \
  1140. "pushl 8(%%eax)\n\t" \
  1141. "pushl 4(%%eax)\n\t" \
  1142. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1143. VALGRIND_CALL_NOREDIR_EAX \
  1144. VALGRIND_RESTORE_STACK \
  1145. : /*out*/ "=a" (_res) \
  1146. : /*in*/ "a" (&_argvec[0]) \
  1147. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1148. ); \
  1149. lval = (__typeof__(lval)) _res; \
  1150. } while (0)
  1151. #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1152. arg7) \
  1153. do { \
  1154. volatile OrigFn _orig = (orig); \
  1155. volatile unsigned long _argvec[8]; \
  1156. volatile unsigned long _res; \
  1157. _argvec[0] = (unsigned long)_orig.nraddr; \
  1158. _argvec[1] = (unsigned long)(arg1); \
  1159. _argvec[2] = (unsigned long)(arg2); \
  1160. _argvec[3] = (unsigned long)(arg3); \
  1161. _argvec[4] = (unsigned long)(arg4); \
  1162. _argvec[5] = (unsigned long)(arg5); \
  1163. _argvec[6] = (unsigned long)(arg6); \
  1164. _argvec[7] = (unsigned long)(arg7); \
  1165. __asm__ volatile( \
  1166. VALGRIND_ALIGN_STACK \
  1167. "subl $4, %%esp\n\t" \
  1168. "pushl 28(%%eax)\n\t" \
  1169. "pushl 24(%%eax)\n\t" \
  1170. "pushl 20(%%eax)\n\t" \
  1171. "pushl 16(%%eax)\n\t" \
  1172. "pushl 12(%%eax)\n\t" \
  1173. "pushl 8(%%eax)\n\t" \
  1174. "pushl 4(%%eax)\n\t" \
  1175. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1176. VALGRIND_CALL_NOREDIR_EAX \
  1177. VALGRIND_RESTORE_STACK \
  1178. : /*out*/ "=a" (_res) \
  1179. : /*in*/ "a" (&_argvec[0]) \
  1180. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1181. ); \
  1182. lval = (__typeof__(lval)) _res; \
  1183. } while (0)
  1184. #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1185. arg7,arg8) \
  1186. do { \
  1187. volatile OrigFn _orig = (orig); \
  1188. volatile unsigned long _argvec[9]; \
  1189. volatile unsigned long _res; \
  1190. _argvec[0] = (unsigned long)_orig.nraddr; \
  1191. _argvec[1] = (unsigned long)(arg1); \
  1192. _argvec[2] = (unsigned long)(arg2); \
  1193. _argvec[3] = (unsigned long)(arg3); \
  1194. _argvec[4] = (unsigned long)(arg4); \
  1195. _argvec[5] = (unsigned long)(arg5); \
  1196. _argvec[6] = (unsigned long)(arg6); \
  1197. _argvec[7] = (unsigned long)(arg7); \
  1198. _argvec[8] = (unsigned long)(arg8); \
  1199. __asm__ volatile( \
  1200. VALGRIND_ALIGN_STACK \
  1201. "pushl 32(%%eax)\n\t" \
  1202. "pushl 28(%%eax)\n\t" \
  1203. "pushl 24(%%eax)\n\t" \
  1204. "pushl 20(%%eax)\n\t" \
  1205. "pushl 16(%%eax)\n\t" \
  1206. "pushl 12(%%eax)\n\t" \
  1207. "pushl 8(%%eax)\n\t" \
  1208. "pushl 4(%%eax)\n\t" \
  1209. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1210. VALGRIND_CALL_NOREDIR_EAX \
  1211. VALGRIND_RESTORE_STACK \
  1212. : /*out*/ "=a" (_res) \
  1213. : /*in*/ "a" (&_argvec[0]) \
  1214. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1215. ); \
  1216. lval = (__typeof__(lval)) _res; \
  1217. } while (0)
  1218. #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1219. arg7,arg8,arg9) \
  1220. do { \
  1221. volatile OrigFn _orig = (orig); \
  1222. volatile unsigned long _argvec[10]; \
  1223. volatile unsigned long _res; \
  1224. _argvec[0] = (unsigned long)_orig.nraddr; \
  1225. _argvec[1] = (unsigned long)(arg1); \
  1226. _argvec[2] = (unsigned long)(arg2); \
  1227. _argvec[3] = (unsigned long)(arg3); \
  1228. _argvec[4] = (unsigned long)(arg4); \
  1229. _argvec[5] = (unsigned long)(arg5); \
  1230. _argvec[6] = (unsigned long)(arg6); \
  1231. _argvec[7] = (unsigned long)(arg7); \
  1232. _argvec[8] = (unsigned long)(arg8); \
  1233. _argvec[9] = (unsigned long)(arg9); \
  1234. __asm__ volatile( \
  1235. VALGRIND_ALIGN_STACK \
  1236. "subl $12, %%esp\n\t" \
  1237. "pushl 36(%%eax)\n\t" \
  1238. "pushl 32(%%eax)\n\t" \
  1239. "pushl 28(%%eax)\n\t" \
  1240. "pushl 24(%%eax)\n\t" \
  1241. "pushl 20(%%eax)\n\t" \
  1242. "pushl 16(%%eax)\n\t" \
  1243. "pushl 12(%%eax)\n\t" \
  1244. "pushl 8(%%eax)\n\t" \
  1245. "pushl 4(%%eax)\n\t" \
  1246. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1247. VALGRIND_CALL_NOREDIR_EAX \
  1248. VALGRIND_RESTORE_STACK \
  1249. : /*out*/ "=a" (_res) \
  1250. : /*in*/ "a" (&_argvec[0]) \
  1251. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1252. ); \
  1253. lval = (__typeof__(lval)) _res; \
  1254. } while (0)
  1255. #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1256. arg7,arg8,arg9,arg10) \
  1257. do { \
  1258. volatile OrigFn _orig = (orig); \
  1259. volatile unsigned long _argvec[11]; \
  1260. volatile unsigned long _res; \
  1261. _argvec[0] = (unsigned long)_orig.nraddr; \
  1262. _argvec[1] = (unsigned long)(arg1); \
  1263. _argvec[2] = (unsigned long)(arg2); \
  1264. _argvec[3] = (unsigned long)(arg3); \
  1265. _argvec[4] = (unsigned long)(arg4); \
  1266. _argvec[5] = (unsigned long)(arg5); \
  1267. _argvec[6] = (unsigned long)(arg6); \
  1268. _argvec[7] = (unsigned long)(arg7); \
  1269. _argvec[8] = (unsigned long)(arg8); \
  1270. _argvec[9] = (unsigned long)(arg9); \
  1271. _argvec[10] = (unsigned long)(arg10); \
  1272. __asm__ volatile( \
  1273. VALGRIND_ALIGN_STACK \
  1274. "subl $8, %%esp\n\t" \
  1275. "pushl 40(%%eax)\n\t" \
  1276. "pushl 36(%%eax)\n\t" \
  1277. "pushl 32(%%eax)\n\t" \
  1278. "pushl 28(%%eax)\n\t" \
  1279. "pushl 24(%%eax)\n\t" \
  1280. "pushl 20(%%eax)\n\t" \
  1281. "pushl 16(%%eax)\n\t" \
  1282. "pushl 12(%%eax)\n\t" \
  1283. "pushl 8(%%eax)\n\t" \
  1284. "pushl 4(%%eax)\n\t" \
  1285. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1286. VALGRIND_CALL_NOREDIR_EAX \
  1287. VALGRIND_RESTORE_STACK \
  1288. : /*out*/ "=a" (_res) \
  1289. : /*in*/ "a" (&_argvec[0]) \
  1290. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1291. ); \
  1292. lval = (__typeof__(lval)) _res; \
  1293. } while (0)
  1294. #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
  1295. arg6,arg7,arg8,arg9,arg10, \
  1296. arg11) \
  1297. do { \
  1298. volatile OrigFn _orig = (orig); \
  1299. volatile unsigned long _argvec[12]; \
  1300. volatile unsigned long _res; \
  1301. _argvec[0] = (unsigned long)_orig.nraddr; \
  1302. _argvec[1] = (unsigned long)(arg1); \
  1303. _argvec[2] = (unsigned long)(arg2); \
  1304. _argvec[3] = (unsigned long)(arg3); \
  1305. _argvec[4] = (unsigned long)(arg4); \
  1306. _argvec[5] = (unsigned long)(arg5); \
  1307. _argvec[6] = (unsigned long)(arg6); \
  1308. _argvec[7] = (unsigned long)(arg7); \
  1309. _argvec[8] = (unsigned long)(arg8); \
  1310. _argvec[9] = (unsigned long)(arg9); \
  1311. _argvec[10] = (unsigned long)(arg10); \
  1312. _argvec[11] = (unsigned long)(arg11); \
  1313. __asm__ volatile( \
  1314. VALGRIND_ALIGN_STACK \
  1315. "subl $4, %%esp\n\t" \
  1316. "pushl 44(%%eax)\n\t" \
  1317. "pushl 40(%%eax)\n\t" \
  1318. "pushl 36(%%eax)\n\t" \
  1319. "pushl 32(%%eax)\n\t" \
  1320. "pushl 28(%%eax)\n\t" \
  1321. "pushl 24(%%eax)\n\t" \
  1322. "pushl 20(%%eax)\n\t" \
  1323. "pushl 16(%%eax)\n\t" \
  1324. "pushl 12(%%eax)\n\t" \
  1325. "pushl 8(%%eax)\n\t" \
  1326. "pushl 4(%%eax)\n\t" \
  1327. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1328. VALGRIND_CALL_NOREDIR_EAX \
  1329. VALGRIND_RESTORE_STACK \
  1330. : /*out*/ "=a" (_res) \
  1331. : /*in*/ "a" (&_argvec[0]) \
  1332. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1333. ); \
  1334. lval = (__typeof__(lval)) _res; \
  1335. } while (0)
  1336. #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
  1337. arg6,arg7,arg8,arg9,arg10, \
  1338. arg11,arg12) \
  1339. do { \
  1340. volatile OrigFn _orig = (orig); \
  1341. volatile unsigned long _argvec[13]; \
  1342. volatile unsigned long _res; \
  1343. _argvec[0] = (unsigned long)_orig.nraddr; \
  1344. _argvec[1] = (unsigned long)(arg1); \
  1345. _argvec[2] = (unsigned long)(arg2); \
  1346. _argvec[3] = (unsigned long)(arg3); \
  1347. _argvec[4] = (unsigned long)(arg4); \
  1348. _argvec[5] = (unsigned long)(arg5); \
  1349. _argvec[6] = (unsigned long)(arg6); \
  1350. _argvec[7] = (unsigned long)(arg7); \
  1351. _argvec[8] = (unsigned long)(arg8); \
  1352. _argvec[9] = (unsigned long)(arg9); \
  1353. _argvec[10] = (unsigned long)(arg10); \
  1354. _argvec[11] = (unsigned long)(arg11); \
  1355. _argvec[12] = (unsigned long)(arg12); \
  1356. __asm__ volatile( \
  1357. VALGRIND_ALIGN_STACK \
  1358. "pushl 48(%%eax)\n\t" \
  1359. "pushl 44(%%eax)\n\t" \
  1360. "pushl 40(%%eax)\n\t" \
  1361. "pushl 36(%%eax)\n\t" \
  1362. "pushl 32(%%eax)\n\t" \
  1363. "pushl 28(%%eax)\n\t" \
  1364. "pushl 24(%%eax)\n\t" \
  1365. "pushl 20(%%eax)\n\t" \
  1366. "pushl 16(%%eax)\n\t" \
  1367. "pushl 12(%%eax)\n\t" \
  1368. "pushl 8(%%eax)\n\t" \
  1369. "pushl 4(%%eax)\n\t" \
  1370. "movl (%%eax), %%eax\n\t" /* target->%eax */ \
  1371. VALGRIND_CALL_NOREDIR_EAX \
  1372. VALGRIND_RESTORE_STACK \
  1373. : /*out*/ "=a" (_res) \
  1374. : /*in*/ "a" (&_argvec[0]) \
  1375. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
  1376. ); \
  1377. lval = (__typeof__(lval)) _res; \
  1378. } while (0)
  1379. #endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */
  1380. /* ---------------- amd64-{linux,darwin,solaris} --------------- */
  1381. #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
  1382. || defined(PLAT_amd64_solaris)
  1383. /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
  1384. /* These regs are trashed by the hidden call. */
  1385. #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
  1386. "rdi", "r8", "r9", "r10", "r11"
  1387. /* This is all pretty complex. It's so as to make stack unwinding
  1388. work reliably. See bug 243270. The basic problem is the sub and
  1389. add of 128 of %rsp in all of the following macros. If gcc believes
  1390. the CFA is in %rsp, then unwinding may fail, because what's at the
  1391. CFA is not what gcc "expected" when it constructs the CFIs for the
  1392. places where the macros are instantiated.
  1393. But we can't just add a CFI annotation to increase the CFA offset
  1394. by 128, to match the sub of 128 from %rsp, because we don't know
  1395. whether gcc has chosen %rsp as the CFA at that point, or whether it
  1396. has chosen some other register (eg, %rbp). In the latter case,
  1397. adding a CFI annotation to change the CFA offset is simply wrong.
  1398. So the solution is to get hold of the CFA using
  1399. __builtin_dwarf_cfa(), put it in a known register, and add a
  1400. CFI annotation to say what the register is. We choose %rbp for
  1401. this (perhaps perversely), because:
  1402. (1) %rbp is already subject to unwinding. If a new register was
  1403. chosen then the unwinder would have to unwind it in all stack
  1404. traces, which is expensive, and
  1405. (2) %rbp is already subject to precise exception updates in the
  1406. JIT. If a new register was chosen, we'd have to have precise
  1407. exceptions for it too, which reduces performance of the
  1408. generated code.
  1409. However .. one extra complication. We can't just whack the result
  1410. of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
  1411. list of trashed registers at the end of the inline assembly
  1412. fragments; gcc won't allow %rbp to appear in that list. Hence
  1413. instead we need to stash %rbp in %r15 for the duration of the asm,
  1414. and say that %r15 is trashed instead. gcc seems happy to go with
  1415. that.
  1416. Oh .. and this all needs to be conditionalised so that it is
  1417. unchanged from before this commit, when compiled with older gccs
  1418. that don't support __builtin_dwarf_cfa. Furthermore, since
  1419. this header file is freestanding, it has to be independent of
  1420. config.h, and so the following conditionalisation cannot depend on
  1421. configure time checks.
  1422. Although it's not clear from
  1423. 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
  1424. this expression excludes Darwin.
  1425. .cfi directives in Darwin assembly appear to be completely
  1426. different and I haven't investigated how they work.
  1427. For even more entertainment value, note we have to use the
  1428. completely undocumented __builtin_dwarf_cfa(), which appears to
  1429. really compute the CFA, whereas __builtin_frame_address(0) claims
  1430. to but actually doesn't. See
  1431. https://bugs.kde.org/show_bug.cgi?id=243270#c47
  1432. */
  1433. #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
  1434. # define __FRAME_POINTER \
  1435. ,"r"(__builtin_dwarf_cfa())
  1436. # define VALGRIND_CFI_PROLOGUE \
  1437. "movq %%rbp, %%r15\n\t" \
  1438. "movq %2, %%rbp\n\t" \
  1439. ".cfi_remember_state\n\t" \
  1440. ".cfi_def_cfa rbp, 0\n\t"
  1441. # define VALGRIND_CFI_EPILOGUE \
  1442. "movq %%r15, %%rbp\n\t" \
  1443. ".cfi_restore_state\n\t"
  1444. #else
  1445. # define __FRAME_POINTER
  1446. # define VALGRIND_CFI_PROLOGUE
  1447. # define VALGRIND_CFI_EPILOGUE
  1448. #endif
  1449. /* Macros to save and align the stack before making a function
  1450. call and restore it afterwards as gcc may not keep the stack
  1451. pointer aligned if it doesn't realise calls are being made
  1452. to other functions. */
  1453. #define VALGRIND_ALIGN_STACK \
  1454. "movq %%rsp,%%r14\n\t" \
  1455. "andq $0xfffffffffffffff0,%%rsp\n\t"
  1456. #define VALGRIND_RESTORE_STACK \
  1457. "movq %%r14,%%rsp\n\t"
  1458. /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
  1459. long) == 8. */
  1460. /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
  1461. macros. In order not to trash the stack redzone, we need to drop
  1462. %rsp by 128 before the hidden call, and restore afterwards. The
  1463. nastyness is that it is only by luck that the stack still appears
  1464. to be unwindable during the hidden call - since then the behaviour
  1465. of any routine using this macro does not match what the CFI data
  1466. says. Sigh.
  1467. Why is this important? Imagine that a wrapper has a stack
  1468. allocated local, and passes to the hidden call, a pointer to it.
  1469. Because gcc does not know about the hidden call, it may allocate
  1470. that local in the redzone. Unfortunately the hidden call may then
  1471. trash it before it comes to use it. So we must step clear of the
  1472. redzone, for the duration of the hidden call, to make it safe.
  1473. Probably the same problem afflicts the other redzone-style ABIs too
  1474. (ppc64-linux); but for those, the stack is
  1475. self describing (none of this CFI nonsense) so at least messing
  1476. with the stack pointer doesn't give a danger of non-unwindable
  1477. stack. */
  1478. #define CALL_FN_W_v(lval, orig) \
  1479. do { \
  1480. volatile OrigFn _orig = (orig); \
  1481. volatile unsigned long _argvec[1]; \
  1482. volatile unsigned long _res; \
  1483. _argvec[0] = (unsigned long)_orig.nraddr; \
  1484. __asm__ volatile( \
  1485. VALGRIND_CFI_PROLOGUE \
  1486. VALGRIND_ALIGN_STACK \
  1487. "subq $128,%%rsp\n\t" \
  1488. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1489. VALGRIND_CALL_NOREDIR_RAX \
  1490. VALGRIND_RESTORE_STACK \
  1491. VALGRIND_CFI_EPILOGUE \
  1492. : /*out*/ "=a" (_res) \
  1493. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1494. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1495. ); \
  1496. lval = (__typeof__(lval)) _res; \
  1497. } while (0)
  1498. #define CALL_FN_W_W(lval, orig, arg1) \
  1499. do { \
  1500. volatile OrigFn _orig = (orig); \
  1501. volatile unsigned long _argvec[2]; \
  1502. volatile unsigned long _res; \
  1503. _argvec[0] = (unsigned long)_orig.nraddr; \
  1504. _argvec[1] = (unsigned long)(arg1); \
  1505. __asm__ volatile( \
  1506. VALGRIND_CFI_PROLOGUE \
  1507. VALGRIND_ALIGN_STACK \
  1508. "subq $128,%%rsp\n\t" \
  1509. "movq 8(%%rax), %%rdi\n\t" \
  1510. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1511. VALGRIND_CALL_NOREDIR_RAX \
  1512. VALGRIND_RESTORE_STACK \
  1513. VALGRIND_CFI_EPILOGUE \
  1514. : /*out*/ "=a" (_res) \
  1515. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1516. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1517. ); \
  1518. lval = (__typeof__(lval)) _res; \
  1519. } while (0)
  1520. #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
  1521. do { \
  1522. volatile OrigFn _orig = (orig); \
  1523. volatile unsigned long _argvec[3]; \
  1524. volatile unsigned long _res; \
  1525. _argvec[0] = (unsigned long)_orig.nraddr; \
  1526. _argvec[1] = (unsigned long)(arg1); \
  1527. _argvec[2] = (unsigned long)(arg2); \
  1528. __asm__ volatile( \
  1529. VALGRIND_CFI_PROLOGUE \
  1530. VALGRIND_ALIGN_STACK \
  1531. "subq $128,%%rsp\n\t" \
  1532. "movq 16(%%rax), %%rsi\n\t" \
  1533. "movq 8(%%rax), %%rdi\n\t" \
  1534. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1535. VALGRIND_CALL_NOREDIR_RAX \
  1536. VALGRIND_RESTORE_STACK \
  1537. VALGRIND_CFI_EPILOGUE \
  1538. : /*out*/ "=a" (_res) \
  1539. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1540. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1541. ); \
  1542. lval = (__typeof__(lval)) _res; \
  1543. } while (0)
  1544. #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
  1545. do { \
  1546. volatile OrigFn _orig = (orig); \
  1547. volatile unsigned long _argvec[4]; \
  1548. volatile unsigned long _res; \
  1549. _argvec[0] = (unsigned long)_orig.nraddr; \
  1550. _argvec[1] = (unsigned long)(arg1); \
  1551. _argvec[2] = (unsigned long)(arg2); \
  1552. _argvec[3] = (unsigned long)(arg3); \
  1553. __asm__ volatile( \
  1554. VALGRIND_CFI_PROLOGUE \
  1555. VALGRIND_ALIGN_STACK \
  1556. "subq $128,%%rsp\n\t" \
  1557. "movq 24(%%rax), %%rdx\n\t" \
  1558. "movq 16(%%rax), %%rsi\n\t" \
  1559. "movq 8(%%rax), %%rdi\n\t" \
  1560. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1561. VALGRIND_CALL_NOREDIR_RAX \
  1562. VALGRIND_RESTORE_STACK \
  1563. VALGRIND_CFI_EPILOGUE \
  1564. : /*out*/ "=a" (_res) \
  1565. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1566. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1567. ); \
  1568. lval = (__typeof__(lval)) _res; \
  1569. } while (0)
  1570. #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
  1571. do { \
  1572. volatile OrigFn _orig = (orig); \
  1573. volatile unsigned long _argvec[5]; \
  1574. volatile unsigned long _res; \
  1575. _argvec[0] = (unsigned long)_orig.nraddr; \
  1576. _argvec[1] = (unsigned long)(arg1); \
  1577. _argvec[2] = (unsigned long)(arg2); \
  1578. _argvec[3] = (unsigned long)(arg3); \
  1579. _argvec[4] = (unsigned long)(arg4); \
  1580. __asm__ volatile( \
  1581. VALGRIND_CFI_PROLOGUE \
  1582. VALGRIND_ALIGN_STACK \
  1583. "subq $128,%%rsp\n\t" \
  1584. "movq 32(%%rax), %%rcx\n\t" \
  1585. "movq 24(%%rax), %%rdx\n\t" \
  1586. "movq 16(%%rax), %%rsi\n\t" \
  1587. "movq 8(%%rax), %%rdi\n\t" \
  1588. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1589. VALGRIND_CALL_NOREDIR_RAX \
  1590. VALGRIND_RESTORE_STACK \
  1591. VALGRIND_CFI_EPILOGUE \
  1592. : /*out*/ "=a" (_res) \
  1593. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1594. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1595. ); \
  1596. lval = (__typeof__(lval)) _res; \
  1597. } while (0)
  1598. #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
  1599. do { \
  1600. volatile OrigFn _orig = (orig); \
  1601. volatile unsigned long _argvec[6]; \
  1602. volatile unsigned long _res; \
  1603. _argvec[0] = (unsigned long)_orig.nraddr; \
  1604. _argvec[1] = (unsigned long)(arg1); \
  1605. _argvec[2] = (unsigned long)(arg2); \
  1606. _argvec[3] = (unsigned long)(arg3); \
  1607. _argvec[4] = (unsigned long)(arg4); \
  1608. _argvec[5] = (unsigned long)(arg5); \
  1609. __asm__ volatile( \
  1610. VALGRIND_CFI_PROLOGUE \
  1611. VALGRIND_ALIGN_STACK \
  1612. "subq $128,%%rsp\n\t" \
  1613. "movq 40(%%rax), %%r8\n\t" \
  1614. "movq 32(%%rax), %%rcx\n\t" \
  1615. "movq 24(%%rax), %%rdx\n\t" \
  1616. "movq 16(%%rax), %%rsi\n\t" \
  1617. "movq 8(%%rax), %%rdi\n\t" \
  1618. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1619. VALGRIND_CALL_NOREDIR_RAX \
  1620. VALGRIND_RESTORE_STACK \
  1621. VALGRIND_CFI_EPILOGUE \
  1622. : /*out*/ "=a" (_res) \
  1623. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1624. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1625. ); \
  1626. lval = (__typeof__(lval)) _res; \
  1627. } while (0)
  1628. #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
  1629. do { \
  1630. volatile OrigFn _orig = (orig); \
  1631. volatile unsigned long _argvec[7]; \
  1632. volatile unsigned long _res; \
  1633. _argvec[0] = (unsigned long)_orig.nraddr; \
  1634. _argvec[1] = (unsigned long)(arg1); \
  1635. _argvec[2] = (unsigned long)(arg2); \
  1636. _argvec[3] = (unsigned long)(arg3); \
  1637. _argvec[4] = (unsigned long)(arg4); \
  1638. _argvec[5] = (unsigned long)(arg5); \
  1639. _argvec[6] = (unsigned long)(arg6); \
  1640. __asm__ volatile( \
  1641. VALGRIND_CFI_PROLOGUE \
  1642. VALGRIND_ALIGN_STACK \
  1643. "subq $128,%%rsp\n\t" \
  1644. "movq 48(%%rax), %%r9\n\t" \
  1645. "movq 40(%%rax), %%r8\n\t" \
  1646. "movq 32(%%rax), %%rcx\n\t" \
  1647. "movq 24(%%rax), %%rdx\n\t" \
  1648. "movq 16(%%rax), %%rsi\n\t" \
  1649. "movq 8(%%rax), %%rdi\n\t" \
  1650. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1651. VALGRIND_CALL_NOREDIR_RAX \
  1652. VALGRIND_RESTORE_STACK \
  1653. VALGRIND_CFI_EPILOGUE \
  1654. : /*out*/ "=a" (_res) \
  1655. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1656. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1657. ); \
  1658. lval = (__typeof__(lval)) _res; \
  1659. } while (0)
  1660. #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1661. arg7) \
  1662. do { \
  1663. volatile OrigFn _orig = (orig); \
  1664. volatile unsigned long _argvec[8]; \
  1665. volatile unsigned long _res; \
  1666. _argvec[0] = (unsigned long)_orig.nraddr; \
  1667. _argvec[1] = (unsigned long)(arg1); \
  1668. _argvec[2] = (unsigned long)(arg2); \
  1669. _argvec[3] = (unsigned long)(arg3); \
  1670. _argvec[4] = (unsigned long)(arg4); \
  1671. _argvec[5] = (unsigned long)(arg5); \
  1672. _argvec[6] = (unsigned long)(arg6); \
  1673. _argvec[7] = (unsigned long)(arg7); \
  1674. __asm__ volatile( \
  1675. VALGRIND_CFI_PROLOGUE \
  1676. VALGRIND_ALIGN_STACK \
  1677. "subq $136,%%rsp\n\t" \
  1678. "pushq 56(%%rax)\n\t" \
  1679. "movq 48(%%rax), %%r9\n\t" \
  1680. "movq 40(%%rax), %%r8\n\t" \
  1681. "movq 32(%%rax), %%rcx\n\t" \
  1682. "movq 24(%%rax), %%rdx\n\t" \
  1683. "movq 16(%%rax), %%rsi\n\t" \
  1684. "movq 8(%%rax), %%rdi\n\t" \
  1685. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1686. VALGRIND_CALL_NOREDIR_RAX \
  1687. VALGRIND_RESTORE_STACK \
  1688. VALGRIND_CFI_EPILOGUE \
  1689. : /*out*/ "=a" (_res) \
  1690. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1691. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1692. ); \
  1693. lval = (__typeof__(lval)) _res; \
  1694. } while (0)
  1695. #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1696. arg7,arg8) \
  1697. do { \
  1698. volatile OrigFn _orig = (orig); \
  1699. volatile unsigned long _argvec[9]; \
  1700. volatile unsigned long _res; \
  1701. _argvec[0] = (unsigned long)_orig.nraddr; \
  1702. _argvec[1] = (unsigned long)(arg1); \
  1703. _argvec[2] = (unsigned long)(arg2); \
  1704. _argvec[3] = (unsigned long)(arg3); \
  1705. _argvec[4] = (unsigned long)(arg4); \
  1706. _argvec[5] = (unsigned long)(arg5); \
  1707. _argvec[6] = (unsigned long)(arg6); \
  1708. _argvec[7] = (unsigned long)(arg7); \
  1709. _argvec[8] = (unsigned long)(arg8); \
  1710. __asm__ volatile( \
  1711. VALGRIND_CFI_PROLOGUE \
  1712. VALGRIND_ALIGN_STACK \
  1713. "subq $128,%%rsp\n\t" \
  1714. "pushq 64(%%rax)\n\t" \
  1715. "pushq 56(%%rax)\n\t" \
  1716. "movq 48(%%rax), %%r9\n\t" \
  1717. "movq 40(%%rax), %%r8\n\t" \
  1718. "movq 32(%%rax), %%rcx\n\t" \
  1719. "movq 24(%%rax), %%rdx\n\t" \
  1720. "movq 16(%%rax), %%rsi\n\t" \
  1721. "movq 8(%%rax), %%rdi\n\t" \
  1722. "movq (%%rax), %%rax\n\t" /* target->%rax */ \
  1723. VALGRIND_CALL_NOREDIR_RAX \
  1724. VALGRIND_RESTORE_STACK \
  1725. VALGRIND_CFI_EPILOGUE \
  1726. : /*out*/ "=a" (_res) \
  1727. : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
  1728. : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
  1729. ); \
  1730. lval = (__typeof__(lval)) _res; \
  1731. } while (0)
  1732. #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
  1733. arg7,arg8,arg9) \
  1734. do { \
  1735. volatile OrigFn _orig = (orig); \
  1736. volatile unsigned long _argvec[10]; \
  1737. volatile unsigned long _res; \
  1738. _argvec[0] = (unsigned long)_orig.nraddr; \
  1739. _argvec[1] = (unsigned long)(arg1); \
  1740. _argvec[2] = (unsigned long)(arg2); \
  1741. _argvec[3] = (unsigned long)(arg3); \
  1742. _argvec[4] = (unsigned long)(arg4); \
  1743. _argvec[5] = (unsigned long)(arg5); \
  1744. _argvec[6] = (unsigned long)(arg6); \
  1745. _argvec[7] = (unsigned long)(arg7); \
  1746. _argvec[8] = (unsigned long)(arg8); \
  1747. _argvec[9] = (unsigned long)(arg9); \
  1748. __asm__ volatile( \
  1749. VALGRIND_CFI_PROLOGUE \
  1750. VALGRIND_ALIGN_STACK \
  1751. "subq $136,%%rsp\n\t" \
  1752. "pushq 72(%%rax)\n\t" \
  1753. "pushq 64(%%rax)\n\t" \
  1754. "pushq 56(%%rax)\n\t" \
  1755. "movq 48(%%rax), %%r9\n\t" \
  1756. "movq 40(%%rax), %%r8\n\t" \
  1757. "movq 32(%%rax), %%rcx\n\t" \
  1758. "movq 24(%%rax), %%rdx\n\t" \
  1759. "movq 16(%%rax), %%rsi\n\t