loader.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /* Copyright (C) 2001, 2009, 2010, 2011, 2012
  2. * 2013, 2014 Free Software Foundation, Inc.
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public License
  6. * as published by the Free Software Foundation; either version 3 of
  7. * the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301 USA
  18. */
  19. #if HAVE_CONFIG_H
  20. # include <config.h>
  21. #endif
  22. #include <string.h>
  23. #include <fcntl.h>
  24. #include <unistd.h>
  25. #ifdef HAVE_SYS_MMAN_H
  26. #include <sys/mman.h>
  27. #endif
  28. #include <sys/stat.h>
  29. #include <sys/types.h>
  30. #include <assert.h>
  31. #include <alignof.h>
  32. #include <byteswap.h>
  33. #include <verify.h>
  34. #include <full-read.h>
  35. #include "_scm.h"
  36. #include "elf.h"
  37. #include "programs.h"
  38. #include "loader.h"
  39. /* This file contains the loader for Guile's on-disk format: ELF with
  40. some custom tags in the dynamic segment. */
  41. #if SIZEOF_SCM_T_BITS == 4
  42. #define Elf_Half Elf32_Half
  43. #define Elf_Word Elf32_Word
  44. #define Elf_Ehdr Elf32_Ehdr
  45. #define ELFCLASS ELFCLASS32
  46. #define Elf_Phdr Elf32_Phdr
  47. #define Elf_Dyn Elf32_Dyn
  48. #elif SIZEOF_SCM_T_BITS == 8
  49. #define Elf_Half Elf64_Half
  50. #define Elf_Word Elf64_Word
  51. #define Elf_Ehdr Elf64_Ehdr
  52. #define ELFCLASS ELFCLASS64
  53. #define Elf_Phdr Elf64_Phdr
  54. #define Elf_Dyn Elf64_Dyn
  55. #else
  56. #error
  57. #endif
  58. #define DT_LOGUILE 0x37146000 /* Start of Guile-specific */
  59. #define DT_GUILE_GC_ROOT 0x37146000 /* Offset of GC roots */
  60. #define DT_GUILE_GC_ROOT_SZ 0x37146001 /* Size in machine words of GC
  61. roots */
  62. #define DT_GUILE_ENTRY 0x37146002 /* Address of entry thunk */
  63. #define DT_GUILE_VM_VERSION 0x37146003 /* Bytecode version */
  64. #define DT_GUILE_FRAME_MAPS 0x37146004 /* Frame maps */
  65. #define DT_HIGUILE 0x37146fff /* End of Guile-specific */
  66. #ifdef WORDS_BIGENDIAN
  67. #define ELFDATA ELFDATA2MSB
  68. #else
  69. #define ELFDATA ELFDATA2LSB
  70. #endif
  71. static void register_elf (char *data, size_t len, char *frame_maps);
  72. enum bytecode_kind
  73. {
  74. BYTECODE_KIND_NONE,
  75. BYTECODE_KIND_GUILE_2_2
  76. };
  77. static SCM
  78. pointer_to_procedure (enum bytecode_kind bytecode_kind, char *ptr)
  79. {
  80. switch (bytecode_kind)
  81. {
  82. case BYTECODE_KIND_GUILE_2_2:
  83. {
  84. return scm_i_make_program ((scm_t_uint32 *) ptr);
  85. }
  86. case BYTECODE_KIND_NONE:
  87. default:
  88. abort ();
  89. }
  90. }
  91. static const char*
  92. check_elf_header (const Elf_Ehdr *header)
  93. {
  94. if (!(header->e_ident[EI_MAG0] == ELFMAG0
  95. && header->e_ident[EI_MAG1] == ELFMAG1
  96. && header->e_ident[EI_MAG2] == ELFMAG2
  97. && header->e_ident[EI_MAG3] == ELFMAG3))
  98. return "not an ELF file";
  99. if (header->e_ident[EI_CLASS] != ELFCLASS)
  100. return "ELF file does not have native word size";
  101. if (header->e_ident[EI_DATA] != ELFDATA)
  102. return "ELF file does not have native byte order";
  103. if (header->e_ident[EI_VERSION] != EV_CURRENT)
  104. return "bad ELF version";
  105. if (header->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
  106. return "unexpected OS ABI";
  107. if (header->e_ident[EI_ABIVERSION] != 0)
  108. return "unexpected ABI version";
  109. if (header->e_type != ET_DYN)
  110. return "unexpected ELF type";
  111. if (header->e_machine != EM_NONE)
  112. return "unexpected machine";
  113. if (header->e_version != EV_CURRENT)
  114. return "unexpected ELF version";
  115. if (header->e_ehsize != sizeof *header)
  116. return "unexpected header size";
  117. if (header->e_phentsize != sizeof (Elf_Phdr))
  118. return "unexpected program header size";
  119. return NULL;
  120. }
  121. #define IS_ALIGNED(offset, alignment) \
  122. (!((offset) & ((alignment) - 1)))
  123. #define ALIGN(offset, alignment) \
  124. ((offset + (alignment - 1)) & ~(alignment - 1))
  125. /* Return the alignment required by the ELF at DATA, of LEN bytes. */
  126. static size_t
  127. elf_alignment (const char *data, size_t len)
  128. {
  129. Elf_Ehdr *header;
  130. int i;
  131. size_t alignment = 8;
  132. if (len < sizeof(Elf_Ehdr))
  133. return alignment;
  134. header = (Elf_Ehdr *) data;
  135. if (header->e_phoff + header->e_phnum * header->e_phentsize >= len)
  136. return alignment;
  137. for (i = 0; i < header->e_phnum; i++)
  138. {
  139. Elf_Phdr *phdr;
  140. const char *phdr_addr = data + header->e_phoff + i * header->e_phentsize;
  141. if (!IS_ALIGNED ((scm_t_uintptr) phdr_addr, alignof_type (Elf_Phdr)))
  142. return alignment;
  143. phdr = (Elf_Phdr *) phdr_addr;
  144. if (phdr->p_align & (phdr->p_align - 1))
  145. return alignment;
  146. if (phdr->p_align > alignment)
  147. alignment = phdr->p_align;
  148. }
  149. return alignment;
  150. }
  151. /* This function leaks the memory that it allocates. */
  152. static char*
  153. alloc_aligned (size_t len, unsigned alignment)
  154. {
  155. char *ret;
  156. if (alignment == 8)
  157. {
  158. /* FIXME: Assert that we actually have an 8-byte-aligned malloc. */
  159. ret = malloc (len);
  160. }
  161. #if defined(HAVE_SYS_MMAN_H) && defined(MMAP_ANONYMOUS)
  162. else if (alignment == SCM_PAGE_SIZE)
  163. {
  164. ret = mmap (NULL, len, PROT_READ | PROT_WRITE, -1, 0);
  165. if (ret == MAP_FAILED)
  166. SCM_SYSERROR;
  167. }
  168. #endif
  169. else
  170. {
  171. if (len + alignment < len)
  172. abort ();
  173. ret = malloc (len + alignment - 1);
  174. if (!ret)
  175. abort ();
  176. ret = (char *) ALIGN ((scm_t_uintptr) ret, alignment);
  177. }
  178. return ret;
  179. }
  180. static char*
  181. copy_and_align_elf_data (const char *data, size_t len)
  182. {
  183. size_t alignment;
  184. char *copy;
  185. alignment = elf_alignment (data, len);
  186. copy = alloc_aligned (len, alignment);
  187. memcpy(copy, data, len);
  188. return copy;
  189. }
  190. #ifdef HAVE_SYS_MMAN_H
  191. static int
  192. segment_flags_to_prot (Elf_Word flags)
  193. {
  194. int prot = 0;
  195. if (flags & PF_X)
  196. prot |= PROT_EXEC;
  197. if (flags & PF_W)
  198. prot |= PROT_WRITE;
  199. if (flags & PF_R)
  200. prot |= PROT_READ;
  201. return prot;
  202. }
  203. #endif
  204. static char*
  205. process_dynamic_segment (char *base, Elf_Phdr *dyn_phdr,
  206. SCM *init_out, SCM *entry_out, char **frame_maps_out)
  207. {
  208. char *dyn_addr = base + dyn_phdr->p_vaddr;
  209. Elf_Dyn *dyn = (Elf_Dyn *) dyn_addr;
  210. size_t i, dyn_size = dyn_phdr->p_memsz / sizeof (Elf_Dyn);
  211. char *init = 0, *gc_root = 0, *entry = 0, *frame_maps = 0;
  212. scm_t_ptrdiff gc_root_size = 0;
  213. enum bytecode_kind bytecode_kind = BYTECODE_KIND_NONE;
  214. for (i = 0; i < dyn_size; i++)
  215. {
  216. if (dyn[i].d_tag == DT_NULL)
  217. break;
  218. switch (dyn[i].d_tag)
  219. {
  220. case DT_INIT:
  221. if (init)
  222. return "duplicate DT_INIT";
  223. init = base + dyn[i].d_un.d_val;
  224. break;
  225. case DT_GUILE_GC_ROOT:
  226. if (gc_root)
  227. return "duplicate DT_GUILE_GC_ROOT";
  228. gc_root = base + dyn[i].d_un.d_val;
  229. break;
  230. case DT_GUILE_GC_ROOT_SZ:
  231. if (gc_root_size)
  232. return "duplicate DT_GUILE_GC_ROOT_SZ";
  233. gc_root_size = dyn[i].d_un.d_val;
  234. break;
  235. case DT_GUILE_ENTRY:
  236. if (entry)
  237. return "duplicate DT_GUILE_ENTRY";
  238. entry = base + dyn[i].d_un.d_val;
  239. break;
  240. case DT_GUILE_VM_VERSION:
  241. if (bytecode_kind != BYTECODE_KIND_NONE)
  242. return "duplicate DT_GUILE_VM_VERSION";
  243. {
  244. scm_t_uint16 major = dyn[i].d_un.d_val >> 16;
  245. scm_t_uint16 minor = dyn[i].d_un.d_val & 0xffff;
  246. switch (major)
  247. {
  248. case 0x0202:
  249. bytecode_kind = BYTECODE_KIND_GUILE_2_2;
  250. /* As we get closer to 2.2, we will allow for backwards
  251. compatibility and we can change this test to ">"
  252. instead of "!=". However until then, to deal with VM
  253. churn it's best to keep these things in
  254. lock-step. */
  255. if (minor != SCM_OBJCODE_MINOR_VERSION)
  256. return "incompatible bytecode version";
  257. break;
  258. default:
  259. return "incompatible bytecode kind";
  260. }
  261. break;
  262. }
  263. case DT_GUILE_FRAME_MAPS:
  264. if (frame_maps)
  265. return "duplicate DT_GUILE_FRAME_MAPS";
  266. frame_maps = base + dyn[i].d_un.d_val;
  267. break;
  268. }
  269. }
  270. if (!entry)
  271. return "missing DT_GUILE_ENTRY";
  272. switch (bytecode_kind)
  273. {
  274. case BYTECODE_KIND_GUILE_2_2:
  275. if ((scm_t_uintptr) init % 4)
  276. return "unaligned DT_INIT";
  277. if ((scm_t_uintptr) entry % 4)
  278. return "unaligned DT_GUILE_ENTRY";
  279. break;
  280. case BYTECODE_KIND_NONE:
  281. default:
  282. return "missing DT_GUILE_VM_VERSION";
  283. }
  284. if (gc_root)
  285. GC_add_roots (gc_root, gc_root + gc_root_size);
  286. *init_out = init ? pointer_to_procedure (bytecode_kind, init) : SCM_BOOL_F;
  287. *entry_out = pointer_to_procedure (bytecode_kind, entry);
  288. *frame_maps_out = frame_maps;
  289. return NULL;
  290. }
  291. #define ABORT(msg) do { err_msg = msg; goto cleanup; } while (0)
  292. static SCM
  293. load_thunk_from_memory (char *data, size_t len, int is_read_only)
  294. #define FUNC_NAME "load-thunk-from-memory"
  295. {
  296. Elf_Ehdr *header;
  297. Elf_Phdr *ph;
  298. const char *err_msg = 0;
  299. size_t n, alignment = 8;
  300. int i;
  301. int dynamic_segment = -1;
  302. SCM init = SCM_BOOL_F, entry = SCM_BOOL_F;
  303. char *frame_maps = 0;
  304. if (len < sizeof *header)
  305. ABORT ("object file too small");
  306. header = (Elf_Ehdr*) data;
  307. if ((err_msg = check_elf_header (header)))
  308. goto cleanup;
  309. if (header->e_phnum == 0)
  310. ABORT ("no loadable segments");
  311. n = header->e_phnum;
  312. if (len < header->e_phoff + n * sizeof (Elf_Phdr))
  313. ABORT ("object file too small");
  314. ph = (Elf_Phdr*) (data + header->e_phoff);
  315. /* Check that the segment table is sane. */
  316. for (i = 0; i < n; i++)
  317. {
  318. if (ph[i].p_filesz != ph[i].p_memsz)
  319. ABORT ("expected p_filesz == p_memsz");
  320. if (!ph[i].p_flags)
  321. ABORT ("expected nonzero segment flags");
  322. if (ph[i].p_align < alignment)
  323. {
  324. if (ph[i].p_align % alignment)
  325. ABORT ("expected new alignment to be multiple of old");
  326. alignment = ph[i].p_align;
  327. }
  328. if (ph[i].p_type == PT_DYNAMIC)
  329. {
  330. if (dynamic_segment >= 0)
  331. ABORT ("expected only one PT_DYNAMIC segment");
  332. dynamic_segment = i;
  333. }
  334. if (i == 0)
  335. {
  336. if (ph[i].p_vaddr != 0)
  337. ABORT ("first loadable vaddr is not 0");
  338. }
  339. else
  340. {
  341. if (ph[i].p_vaddr < ph[i-1].p_vaddr + ph[i-1].p_memsz)
  342. ABORT ("overlapping segments");
  343. if (ph[i].p_offset + ph[i].p_filesz > len)
  344. ABORT ("segment beyond end of byte array");
  345. }
  346. }
  347. if (dynamic_segment < 0)
  348. ABORT ("no PT_DYNAMIC segment");
  349. if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
  350. ABORT ("incorrectly aligned base");
  351. /* Allow writes to writable pages. */
  352. if (is_read_only)
  353. {
  354. #ifdef HAVE_SYS_MMAN_H
  355. for (i = 0; i < n; i++)
  356. {
  357. if (ph[i].p_flags == PF_R)
  358. continue;
  359. if (ph[i].p_align != 4096)
  360. continue;
  361. if (mprotect (data + ph[i].p_vaddr,
  362. ph[i].p_memsz,
  363. segment_flags_to_prot (ph[i].p_flags)))
  364. goto cleanup;
  365. }
  366. #else
  367. ABORT ("expected writable pages");
  368. #endif
  369. }
  370. if ((err_msg = process_dynamic_segment (data, &ph[dynamic_segment],
  371. &init, &entry, &frame_maps)))
  372. goto cleanup;
  373. if (scm_is_true (init))
  374. scm_call_0 (init);
  375. register_elf (data, len, frame_maps);
  376. /* Finally! Return the thunk. */
  377. return entry;
  378. cleanup:
  379. {
  380. if (errno)
  381. SCM_SYSERROR;
  382. scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file",
  383. SCM_EOL);
  384. }
  385. }
  386. #undef FUNC_NAME
  387. #define SCM_PAGE_SIZE 4096
  388. static char*
  389. map_file_contents (int fd, size_t len, int *is_read_only)
  390. #define FUNC_NAME "load-thunk-from-file"
  391. {
  392. char *data;
  393. #ifdef HAVE_SYS_MMAN_H
  394. data = mmap (NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
  395. if (data == MAP_FAILED)
  396. SCM_SYSERROR;
  397. *is_read_only = 1;
  398. #else
  399. if (lseek (fd, 0, SEEK_START) < 0)
  400. {
  401. int errno_save = errno;
  402. (void) close (fd);
  403. errno = errno_save;
  404. SCM_SYSERROR;
  405. }
  406. /* Given that we are using the read fallback, optimistically assume
  407. that the .go files were made with 8-byte alignment.
  408. alignment. */
  409. data = malloc (end);
  410. if (!data)
  411. {
  412. (void) close (fd);
  413. scm_misc_error (FUNC_NAME, "failed to allocate ~A bytes",
  414. scm_list_1 (scm_from_size_t (end)));
  415. }
  416. if (full_read (fd, data, end) != end)
  417. {
  418. int errno_save = errno;
  419. (void) close (fd);
  420. errno = errno_save;
  421. if (errno)
  422. SCM_SYSERROR;
  423. scm_misc_error (FUNC_NAME, "short read while loading objcode",
  424. SCM_EOL);
  425. }
  426. /* If our optimism failed, fall back. */
  427. {
  428. unsigned alignment = sniff_elf_alignment (data, end);
  429. if (alignment != 8)
  430. {
  431. char *copy = copy_and_align_elf_data (data, end, alignment);
  432. free (data);
  433. data = copy;
  434. }
  435. }
  436. *is_read_only = 0;
  437. #endif
  438. return data;
  439. }
  440. #undef FUNC_NAME
  441. SCM_DEFINE (scm_load_thunk_from_file, "load-thunk-from-file", 1, 0, 0,
  442. (SCM filename),
  443. "")
  444. #define FUNC_NAME s_scm_load_thunk_from_file
  445. {
  446. char *c_filename;
  447. int fd, is_read_only;
  448. off_t end;
  449. char *data;
  450. SCM_VALIDATE_STRING (1, filename);
  451. c_filename = scm_to_locale_string (filename);
  452. fd = open (c_filename, O_RDONLY | O_BINARY | O_CLOEXEC);
  453. free (c_filename);
  454. if (fd < 0) SCM_SYSERROR;
  455. end = lseek (fd, 0, SEEK_END);
  456. if (end < 0)
  457. SCM_SYSERROR;
  458. data = map_file_contents (fd, end, &is_read_only);
  459. (void) close (fd);
  460. return load_thunk_from_memory (data, end, is_read_only);
  461. }
  462. #undef FUNC_NAME
  463. SCM_DEFINE (scm_load_thunk_from_memory, "load-thunk-from-memory", 1, 0, 0,
  464. (SCM bv),
  465. "")
  466. #define FUNC_NAME s_scm_load_thunk_from_memory
  467. {
  468. char *data;
  469. size_t len;
  470. SCM_VALIDATE_BYTEVECTOR (1, bv);
  471. data = (char *) SCM_BYTEVECTOR_CONTENTS (bv);
  472. len = SCM_BYTEVECTOR_LENGTH (bv);
  473. /* Copy data in order to align it, to trace its GC roots and
  474. writable sections, and to keep it in memory. */
  475. data = copy_and_align_elf_data (data, len);
  476. return load_thunk_from_memory (data, len, 0);
  477. }
  478. #undef FUNC_NAME
  479. struct mapped_elf_image
  480. {
  481. char *start;
  482. char *end;
  483. char *frame_maps;
  484. };
  485. static struct mapped_elf_image *mapped_elf_images = NULL;
  486. static size_t mapped_elf_images_count = 0;
  487. static size_t mapped_elf_images_allocated = 0;
  488. static size_t
  489. find_mapped_elf_insertion_index (char *ptr)
  490. {
  491. /* "mapped_elf_images_count" must never be dereferenced. */
  492. size_t start = 0, end = mapped_elf_images_count;
  493. while (start < end)
  494. {
  495. size_t n = start + (end - start) / 2;
  496. if (ptr < mapped_elf_images[n].end)
  497. end = n;
  498. else
  499. start = n + 1;
  500. }
  501. return start;
  502. }
  503. static void
  504. register_elf (char *data, size_t len, char *frame_maps)
  505. {
  506. scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
  507. {
  508. /* My kingdom for a generic growable sorted vector library. */
  509. if (mapped_elf_images_count == mapped_elf_images_allocated)
  510. {
  511. struct mapped_elf_image *prev;
  512. size_t n;
  513. if (mapped_elf_images_allocated)
  514. mapped_elf_images_allocated *= 2;
  515. else
  516. mapped_elf_images_allocated = 16;
  517. prev = mapped_elf_images;
  518. mapped_elf_images =
  519. scm_gc_malloc_pointerless (sizeof (*mapped_elf_images)
  520. * mapped_elf_images_allocated,
  521. "mapped elf images");
  522. for (n = 0; n < mapped_elf_images_count; n++)
  523. {
  524. mapped_elf_images[n].start = prev[n].start;
  525. mapped_elf_images[n].end = prev[n].end;
  526. mapped_elf_images[n].frame_maps = prev[n].frame_maps;
  527. }
  528. }
  529. {
  530. size_t end;
  531. size_t n = find_mapped_elf_insertion_index (data);
  532. for (end = mapped_elf_images_count; n < end; end--)
  533. {
  534. const struct mapped_elf_image *prev = &mapped_elf_images[end - 1];
  535. mapped_elf_images[end].start = prev->start;
  536. mapped_elf_images[end].end = prev->end;
  537. mapped_elf_images[end].frame_maps = prev->frame_maps;
  538. }
  539. mapped_elf_images_count++;
  540. mapped_elf_images[n].start = data;
  541. mapped_elf_images[n].end = data + len;
  542. mapped_elf_images[n].frame_maps = frame_maps;
  543. }
  544. }
  545. scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
  546. }
  547. static struct mapped_elf_image *
  548. find_mapped_elf_image_unlocked (char *ptr)
  549. {
  550. size_t n = find_mapped_elf_insertion_index ((char *) ptr);
  551. if (n < mapped_elf_images_count
  552. && mapped_elf_images[n].start <= ptr
  553. && ptr < mapped_elf_images[n].end)
  554. return &mapped_elf_images[n];
  555. return NULL;
  556. }
  557. static int
  558. find_mapped_elf_image (char *ptr, struct mapped_elf_image *image)
  559. {
  560. int result;
  561. scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
  562. {
  563. struct mapped_elf_image *img = find_mapped_elf_image_unlocked (ptr);
  564. if (img)
  565. {
  566. memcpy (image, img, sizeof (*image));
  567. result = 1;
  568. }
  569. else
  570. result = 0;
  571. }
  572. scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
  573. return result;
  574. }
  575. static SCM
  576. scm_find_mapped_elf_image (SCM ip)
  577. {
  578. struct mapped_elf_image image;
  579. if (find_mapped_elf_image ((char *) scm_to_uintptr_t (ip), &image))
  580. {
  581. signed char *data = (signed char *) image.start;
  582. size_t len = image.end - image.start;
  583. return scm_c_take_gc_bytevector (data, len, SCM_BOOL_F);
  584. }
  585. return SCM_BOOL_F;
  586. }
  587. static SCM
  588. scm_all_mapped_elf_images (void)
  589. {
  590. SCM result = SCM_EOL;
  591. scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
  592. {
  593. size_t n;
  594. for (n = 0; n < mapped_elf_images_count; n++)
  595. {
  596. signed char *data = (signed char *) mapped_elf_images[n].start;
  597. size_t len = mapped_elf_images[n].end - mapped_elf_images[n].start;
  598. result = scm_cons (scm_c_take_gc_bytevector (data, len, SCM_BOOL_F),
  599. result);
  600. }
  601. }
  602. scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
  603. return result;
  604. }
  605. struct frame_map_prefix
  606. {
  607. scm_t_uint32 text_offset;
  608. scm_t_uint32 maps_offset;
  609. };
  610. struct frame_map_header
  611. {
  612. scm_t_uint32 addr;
  613. scm_t_uint32 map_offset;
  614. };
  615. verify (sizeof (struct frame_map_prefix) == 8);
  616. verify (sizeof (struct frame_map_header) == 8);
  617. const scm_t_uint8 *
  618. scm_find_dead_slot_map_unlocked (const scm_t_uint32 *ip)
  619. {
  620. struct mapped_elf_image *image;
  621. char *base;
  622. struct frame_map_prefix *prefix;
  623. struct frame_map_header *headers;
  624. scm_t_uintptr addr = (scm_t_uintptr) ip;
  625. size_t start, end;
  626. image = find_mapped_elf_image_unlocked ((char *) ip);
  627. if (!image || !image->frame_maps)
  628. return NULL;
  629. base = image->frame_maps;
  630. prefix = (struct frame_map_prefix *) base;
  631. headers = (struct frame_map_header *) (base + sizeof (*prefix));
  632. if (addr < ((scm_t_uintptr) image->start) + prefix->text_offset)
  633. return NULL;
  634. addr -= ((scm_t_uintptr) image->start) + prefix->text_offset;
  635. start = 0;
  636. end = (prefix->maps_offset - sizeof (*prefix)) / sizeof (*headers);
  637. if (end == 0 || addr > headers[end - 1].addr)
  638. return NULL;
  639. while (start < end)
  640. {
  641. size_t n = start + (end - start) / 2;
  642. if (addr == headers[n].addr)
  643. return (const scm_t_uint8*) (base + headers[n].map_offset);
  644. else if (addr < headers[n].addr)
  645. end = n;
  646. else
  647. start = n + 1;
  648. }
  649. return NULL;
  650. }
  651. void
  652. scm_bootstrap_loader (void)
  653. {
  654. scm_c_register_extension ("libguile-" SCM_EFFECTIVE_VERSION,
  655. "scm_init_loader",
  656. (scm_t_extension_init_func)scm_init_loader, NULL);
  657. }
  658. void
  659. scm_init_loader (void)
  660. {
  661. #ifndef SCM_MAGIC_SNARFER
  662. #include "libguile/loader.x"
  663. #endif
  664. scm_c_define_gsubr ("find-mapped-elf-image", 1, 0, 0,
  665. (scm_t_subr) scm_find_mapped_elf_image);
  666. scm_c_define_gsubr ("all-mapped-elf-images", 0, 0, 0,
  667. (scm_t_subr) scm_all_mapped_elf_images);
  668. }
  669. /*
  670. Local Variables:
  671. c-file-style: "gnu"
  672. End:
  673. */