xen_hyper.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137
  1. /*
  2. * xen_hyper.c
  3. *
  4. * Portions Copyright (C) 2006-2007 Fujitsu Limited
  5. * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
  6. *
  7. * Authors: Itsuro Oda <oda@valinux.co.jp>
  8. * Fumihiko Kakuma <kakuma@valinux.co.jp>
  9. *
  10. * This file is part of Xencrash.
  11. *
  12. * Xencrash is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * Xencrash is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with Xencrash; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  25. */
  26. #include "defs.h"
  27. #ifdef XEN_HYPERVISOR_ARCH
  28. #include "xen_hyper_defs.h"
  29. static void xen_hyper_schedule_init(void);
  30. /*
  31. * Do initialization for Xen Hyper system here.
  32. */
  33. void
  34. xen_hyper_init(void)
  35. {
  36. char *buf;
  37. #if defined(X86) || defined(X86_64)
  38. long member_offset;
  39. #endif
  40. #ifdef X86_64
  41. xht->xen_virt_start = symbol_value("start");
  42. /*
  43. * Xen virtual mapping is aligned to 1 GiB boundary.
  44. * Image starts no more than 1 GiB below
  45. * beginning of virtual address space.
  46. */
  47. xht->xen_virt_start &= 0xffffffffc0000000;
  48. #endif
  49. if (machine_type("X86_64") &&
  50. symbol_exists("xen_phys_start") && !xen_phys_start())
  51. error(WARNING,
  52. "This hypervisor is relocatable; if initialization fails below, try\n"
  53. " using the \"--xen_phys_start <address>\" command line option.\n\n");
  54. if (symbol_exists("crashing_cpu")) {
  55. get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu),
  56. &xht->crashing_cpu);
  57. } else {
  58. xht->crashing_cpu = XEN_HYPER_PCPU_ID_INVALID;
  59. }
  60. machdep->get_smp_cpus();
  61. machdep->memory_size();
  62. if (symbol_exists("__per_cpu_offset")) {
  63. xht->flags |= XEN_HYPER_SMP;
  64. if((xht->__per_cpu_offset = malloc(sizeof(ulong) * XEN_HYPER_MAX_CPUS())) == NULL) {
  65. error(FATAL, "cannot malloc __per_cpu_offset space.\n");
  66. }
  67. if (!readmem(symbol_value("__per_cpu_offset"), KVADDR,
  68. xht->__per_cpu_offset, sizeof(ulong) * XEN_HYPER_MAX_CPUS(),
  69. "__per_cpu_offset", RETURN_ON_ERROR)) {
  70. error(FATAL, "cannot read __per_cpu_offset.\n");
  71. }
  72. }
  73. #if defined(X86) || defined(X86_64)
  74. if (symbol_exists("__per_cpu_shift")) {
  75. xht->percpu_shift = (int)symbol_value("__per_cpu_shift");
  76. } else if (xen_major_version() >= 3 && xen_minor_version() >= 3) {
  77. xht->percpu_shift = 13;
  78. } else {
  79. xht->percpu_shift = 12;
  80. }
  81. member_offset = MEMBER_OFFSET("cpuinfo_x86", "x86_model_id");
  82. buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_x86));
  83. if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
  84. xen_hyper_x86_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf);
  85. } else {
  86. xen_hyper_x86_fill_cpu_data(xht->cpu_idxs[0], buf);
  87. }
  88. strncpy(xht->utsname.machine, (char *)(buf + member_offset),
  89. sizeof(xht->utsname.machine)-1);
  90. FREEBUF(buf);
  91. #elif defined(IA64)
  92. buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_ia64));
  93. if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
  94. xen_hyper_ia64_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf);
  95. } else {
  96. xen_hyper_ia64_fill_cpu_data(xht->cpu_idxs[0], buf);
  97. }
  98. strncpy(xht->utsname.machine, (char *)(buf + XEN_HYPER_OFFSET(cpuinfo_ia64_vendor)),
  99. sizeof(xht->utsname.machine)-1);
  100. FREEBUF(buf);
  101. #endif
  102. #ifndef IA64
  103. XEN_HYPER_STRUCT_SIZE_INIT(note_buf_t, "note_buf_t");
  104. XEN_HYPER_STRUCT_SIZE_INIT(crash_note_t, "crash_note_t");
  105. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_core, "crash_note_t", "core");
  106. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen, "crash_note_t", "xen");
  107. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_regs, "crash_note_t", "xen_regs");
  108. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_info, "crash_note_t", "xen_info");
  109. XEN_HYPER_STRUCT_SIZE_INIT(crash_note_core_t, "crash_note_core_t");
  110. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_note, "crash_note_core_t", "note");
  111. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_desc, "crash_note_core_t", "desc");
  112. XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_t, "crash_note_xen_t");
  113. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_note, "crash_note_xen_t", "note");
  114. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_desc, "crash_note_xen_t", "desc");
  115. XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_core_t, "crash_note_xen_core_t");
  116. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_note, "crash_note_xen_core_t", "note");
  117. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_desc, "crash_note_xen_core_t", "desc");
  118. XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_info_t, "crash_note_xen_info_t");
  119. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_note, "crash_note_xen_info_t", "note");
  120. XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_desc, "crash_note_xen_info_t", "desc");
  121. XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_core_t, "crash_xen_core_t");
  122. XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_info_t, "crash_xen_info_t");
  123. XEN_HYPER_STRUCT_SIZE_INIT(xen_crash_xen_regs_t, "xen_crash_xen_regs_t");
  124. XEN_HYPER_STRUCT_SIZE_INIT(ELF_Prstatus,"ELF_Prstatus");
  125. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_info, "ELF_Prstatus", "pr_info");
  126. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cursig, "ELF_Prstatus", "pr_cursig");
  127. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sigpend, "ELF_Prstatus", "pr_sigpend");
  128. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sighold, "ELF_Prstatus", "pr_sighold");
  129. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pid, "ELF_Prstatus", "pr_pid");
  130. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_ppid, "ELF_Prstatus", "pr_ppid");
  131. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pgrp, "ELF_Prstatus", "pr_pgrp");
  132. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sid, "ELF_Prstatus", "pr_sid");
  133. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_utime, "ELF_Prstatus", "pr_utime");
  134. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_stime, "ELF_Prstatus", "pr_stime");
  135. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cutime, "ELF_Prstatus", "pr_cutime");
  136. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cstime, "ELF_Prstatus", "pr_cstime");
  137. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_reg, "ELF_Prstatus", "pr_reg");
  138. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_fpvalid, "ELF_Prstatus", "pr_fpvalid");
  139. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_sec, "ELF_Timeval", "tv_sec");
  140. XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_usec, "ELF_Timeval", "tv_usec");
  141. XEN_HYPER_STRUCT_SIZE_INIT(ELF_Signifo,"ELF_Signifo");
  142. XEN_HYPER_STRUCT_SIZE_INIT(ELF_Gregset,"ELF_Gregset");
  143. XEN_HYPER_STRUCT_SIZE_INIT(ELF_Timeval,"ELF_Timeval");
  144. #endif
  145. XEN_HYPER_STRUCT_SIZE_INIT(domain, "domain");
  146. XEN_HYPER_STRUCT_SIZE_INIT(vcpu, "vcpu");
  147. #ifndef IA64
  148. XEN_HYPER_STRUCT_SIZE_INIT(cpu_info, "cpu_info");
  149. #endif
  150. XEN_HYPER_STRUCT_SIZE_INIT(cpu_user_regs, "cpu_user_regs");
  151. xht->idle_vcpu_size = get_array_length("idle_vcpu", NULL, 0);
  152. xht->idle_vcpu_array = (ulong *)malloc(xht->idle_vcpu_size * sizeof(ulong));
  153. if (xht->idle_vcpu_array == NULL) {
  154. error(FATAL, "cannot malloc idle_vcpu_array space.\n");
  155. }
  156. if (!readmem(symbol_value("idle_vcpu"), KVADDR, xht->idle_vcpu_array,
  157. xht->idle_vcpu_size * sizeof(ulong), "idle_vcpu_array",
  158. RETURN_ON_ERROR)) {
  159. error(FATAL, "cannot read idle_vcpu array.\n");
  160. }
  161. /*
  162. * Do some initialization.
  163. */
  164. #ifndef IA64
  165. xen_hyper_dumpinfo_init();
  166. #endif
  167. xhmachdep->pcpu_init();
  168. xen_hyper_domain_init();
  169. xen_hyper_vcpu_init();
  170. xen_hyper_misc_init();
  171. /*
  172. * xen_hyper_post_init() have to be called after all initialize
  173. * functions finished.
  174. */
  175. xen_hyper_post_init();
  176. }
  177. /*
  178. * Do initialization for Domain of Xen Hyper system here.
  179. */
  180. void
  181. xen_hyper_domain_init(void)
  182. {
  183. XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_id, "domain", "domain_id");
  184. XEN_HYPER_MEMBER_OFFSET_INIT(domain_tot_pages, "domain", "tot_pages");
  185. XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_pages, "domain", "max_pages");
  186. XEN_HYPER_MEMBER_OFFSET_INIT(domain_xenheap_pages, "domain", "xenheap_pages");
  187. XEN_HYPER_MEMBER_OFFSET_INIT(domain_shared_info, "domain", "shared_info");
  188. XEN_HYPER_MEMBER_OFFSET_INIT(domain_sched_priv, "domain", "sched_priv");
  189. XEN_HYPER_MEMBER_OFFSET_INIT(domain_next_in_list, "domain", "next_in_list");
  190. XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_flags, "domain", "domain_flags");
  191. XEN_HYPER_MEMBER_OFFSET_INIT(domain_evtchn, "domain", "evtchn");
  192. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_hvm, "domain", "is_hvm");
  193. XEN_HYPER_MEMBER_OFFSET_INIT(domain_guest_type, "domain", "guest_type");
  194. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_privileged, "domain", "is_privileged");
  195. XEN_HYPER_MEMBER_OFFSET_INIT(domain_debugger_attached, "domain", "debugger_attached");
  196. /*
  197. * Will be removed in Xen 4.4 (hg ae9b223a675d),
  198. * need to check that with XEN_HYPER_VALID_MEMBER() before using
  199. */
  200. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_polling, "domain", "is_polling");
  201. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_dying, "domain", "is_dying");
  202. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_paused_by_controller, "domain", "is_paused_by_controller");
  203. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shutting_down, "domain", "is_shutting_down");
  204. XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shut_down, "domain", "is_shut_down");
  205. XEN_HYPER_MEMBER_OFFSET_INIT(domain_vcpu, "domain", "vcpu");
  206. XEN_HYPER_MEMBER_SIZE_INIT(domain_vcpu, "domain", "vcpu");
  207. XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_vcpus, "domain", "max_vcpus");
  208. XEN_HYPER_MEMBER_OFFSET_INIT(domain_arch, "domain", "arch");
  209. XEN_HYPER_STRUCT_SIZE_INIT(arch_shared_info, "arch_shared_info");
  210. XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_max_pfn, "arch_shared_info", "max_pfn");
  211. XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_pfn_to_mfn_frame_list_list, "arch_shared_info", "pfn_to_mfn_frame_list_list");
  212. XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_nmi_reason, "arch_shared_info", "nmi_reason");
  213. XEN_HYPER_STRUCT_SIZE_INIT(shared_info, "shared_info");
  214. XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_vcpu_info, "shared_info", "vcpu_info");
  215. XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_pending, "shared_info", "evtchn_pending");
  216. XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_mask, "shared_info", "evtchn_mask");
  217. XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_arch, "shared_info", "arch");
  218. XEN_HYPER_STRUCT_SIZE_INIT(arch_domain, "arch_domain");
  219. #ifdef IA64
  220. XEN_HYPER_MEMBER_OFFSET_INIT(arch_domain_mm, "arch_domain", "mm");
  221. XEN_HYPER_STRUCT_SIZE_INIT(mm_struct, "mm_struct");
  222. XEN_HYPER_MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd");
  223. #endif
  224. if((xhdt->domain_struct = malloc(XEN_HYPER_SIZE(domain))) == NULL) {
  225. error(FATAL, "cannot malloc domain struct space.\n");
  226. }
  227. if((xhdt->domain_struct_verify = malloc(XEN_HYPER_SIZE(domain))) == NULL) {
  228. error(FATAL, "cannot malloc domain struct space to verification.\n");
  229. }
  230. xen_hyper_refresh_domain_context_space();
  231. xhdt->flags |= XEN_HYPER_DOMAIN_F_INIT;
  232. }
  233. /*
  234. * Do initialization for vcpu of Xen Hyper system here.
  235. */
  236. void
  237. xen_hyper_vcpu_init(void)
  238. {
  239. XEN_HYPER_STRUCT_SIZE_INIT(timer, "timer");
  240. XEN_HYPER_MEMBER_OFFSET_INIT(timer_expires, "timer", "expires");
  241. XEN_HYPER_MEMBER_OFFSET_INIT(timer_cpu, "timer", "cpu");
  242. XEN_HYPER_MEMBER_OFFSET_INIT(timer_function, "timer", "function");
  243. XEN_HYPER_MEMBER_OFFSET_INIT(timer_data, "timer", "data");
  244. XEN_HYPER_MEMBER_OFFSET_INIT(timer_heap_offset, "timer", "heap_offset");
  245. XEN_HYPER_MEMBER_OFFSET_INIT(timer_killed, "timer", "killed");
  246. XEN_HYPER_STRUCT_SIZE_INIT(vcpu_runstate_info, "vcpu_runstate_info");
  247. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state, "vcpu_runstate_info", "state");
  248. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state_entry_time, "vcpu_runstate_info", "state_entry_time");
  249. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_time, "vcpu_runstate_info", "time");
  250. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_id, "vcpu", "vcpu_id");
  251. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_processor, "vcpu", "processor");
  252. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_info, "vcpu", "vcpu_info");
  253. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_domain, "vcpu", "domain");
  254. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_next_in_list, "vcpu", "next_in_list");
  255. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_timer, "vcpu", "timer");
  256. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sleep_tick, "vcpu", "sleep_tick");
  257. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_poll_timer, "vcpu", "poll_timer");
  258. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sched_priv, "vcpu", "sched_priv");
  259. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate, "vcpu", "runstate");
  260. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_guest, "vcpu", "runstate_guest");
  261. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_flags, "vcpu", "vcpu_flags");
  262. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_pause_count, "vcpu", "pause_count");
  263. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_virq_to_evtchn, "vcpu", "virq_to_evtchn");
  264. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_cpu_affinity, "vcpu", "cpu_affinity");
  265. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_nmi_addr, "vcpu", "nmi_addr");
  266. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_dirty_cpumask, "vcpu", "vcpu_dirty_cpumask");
  267. XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_arch, "vcpu", "arch");
  268. #ifdef IA64
  269. XEN_HYPER_ASSIGN_OFFSET(vcpu_thread_ksp) =
  270. MEMBER_OFFSET("vcpu", "arch") + MEMBER_OFFSET("arch_vcpu", "_thread") +
  271. MEMBER_OFFSET("thread_struct", "ksp");
  272. #endif
  273. if((xhvct->vcpu_struct = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) {
  274. error(FATAL, "cannot malloc vcpu struct space.\n");
  275. }
  276. if((xhvct->vcpu_struct_verify = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) {
  277. error(FATAL, "cannot malloc vcpu struct space to verification.\n");
  278. }
  279. xen_hyper_refresh_vcpu_context_space();
  280. xhvct->flags |= XEN_HYPER_VCPU_F_INIT;
  281. xhvct->idle_vcpu = symbol_value("idle_vcpu");
  282. }
  283. /*
  284. * Do initialization for pcpu of Xen Hyper system here.
  285. */
  286. #if defined(X86) || defined(X86_64)
  287. void
  288. xen_hyper_x86_pcpu_init(void)
  289. {
  290. ulong cpu_info;
  291. ulong init_tss_base, init_tss;
  292. ulong sp;
  293. struct xen_hyper_pcpu_context *pcc;
  294. char *buf, *bp;
  295. int i, cpuid;
  296. int flag;
  297. XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_guest_cpu_user_regs, "cpu_info", "guest_cpu_user_regs");
  298. XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_processor_id, "cpu_info", "processor_id");
  299. XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_current_vcpu, "cpu_info", "current_vcpu");
  300. if((xhpct->pcpu_struct = malloc(XEN_HYPER_SIZE(cpu_info))) == NULL) {
  301. error(FATAL, "cannot malloc pcpu struct space.\n");
  302. }
  303. /* get physical cpu context */
  304. xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS());
  305. if (symbol_exists("per_cpu__init_tss")) {
  306. init_tss_base = symbol_value("per_cpu__init_tss");
  307. flag = TRUE;
  308. } else {
  309. init_tss_base = symbol_value("init_tss");
  310. flag = FALSE;
  311. }
  312. buf = GETBUF(XEN_HYPER_SIZE(tss_struct));
  313. for_cpu_indexes(i, cpuid)
  314. {
  315. if (flag)
  316. init_tss = xen_hyper_per_cpu(init_tss_base, cpuid);
  317. else
  318. init_tss = init_tss_base +
  319. XEN_HYPER_SIZE(tss_struct) * cpuid;
  320. if (!readmem(init_tss, KVADDR, buf,
  321. XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) {
  322. error(FATAL, "cannot read init_tss.\n");
  323. }
  324. if (machine_type("X86")) {
  325. sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0));
  326. } else if (machine_type("X86_64")) {
  327. sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_rsp0));
  328. } else
  329. sp = 0;
  330. cpu_info = XEN_HYPER_GET_CPU_INFO(sp);
  331. if (CRASHDEBUG(1)) {
  332. fprintf(fp, "sp=%lx, cpu_info=%lx\n", sp, cpu_info);
  333. }
  334. if(!(bp = xen_hyper_read_pcpu(cpu_info))) {
  335. error(FATAL, "cannot read cpu_info.\n");
  336. }
  337. pcc = &xhpct->context_array[cpuid];
  338. xen_hyper_store_pcpu_context(pcc, cpu_info, bp);
  339. xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf);
  340. }
  341. FREEBUF(buf);
  342. }
  343. #elif defined(IA64)
  344. void
  345. xen_hyper_ia64_pcpu_init(void)
  346. {
  347. struct xen_hyper_pcpu_context *pcc;
  348. int i, cpuid;
  349. /* get physical cpu context */
  350. xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS());
  351. for_cpu_indexes(i, cpuid)
  352. {
  353. pcc = &xhpct->context_array[cpuid];
  354. pcc->processor_id = cpuid;
  355. }
  356. }
  357. #endif
  358. /*
  359. * Do initialization for some miscellaneous thing
  360. * of Xen Hyper system here.
  361. */
  362. void
  363. xen_hyper_misc_init(void)
  364. {
  365. XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data");
  366. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock, "schedule_data", "schedule_lock");
  367. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data", "curr");
  368. if (MEMBER_EXISTS("schedule_data", "idle"))
  369. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle, "schedule_data", "idle");
  370. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv, "schedule_data", "sched_priv");
  371. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data", "s_timer");
  372. XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data", "tick");
  373. XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler");
  374. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name");
  375. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_opt_name, "scheduler", "opt_name");
  376. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sched_id, "scheduler", "sched_id");
  377. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init, "scheduler", "init");
  378. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_tick, "scheduler", "tick");
  379. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init_vcpu, "scheduler", "init_vcpu");
  380. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_destroy_domain, "scheduler", "destroy_domain");
  381. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sleep, "scheduler", "sleep");
  382. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_wake, "scheduler", "wake");
  383. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_set_affinity, "scheduler", "set_affinity");
  384. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_do_schedule, "scheduler", "do_schedule");
  385. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_adjust, "scheduler", "adjust");
  386. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_settings, "scheduler", "dump_settings");
  387. XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_cpu_state, "scheduler", "dump_cpu_state");
  388. xen_hyper_schedule_init();
  389. }
  390. /*
  391. * Do initialization for scheduler of Xen Hyper system here.
  392. */
  393. #define XEN_HYPER_SCHEDULERS_ARRAY_CNT 10
  394. #define XEN_HYPER_SCHEDULER_NAME 1024
  395. static void
  396. xen_hyper_schedule_init(void)
  397. {
  398. ulong addr, opt_sched, schedulers, opt_name;
  399. long scheduler_opt_name;
  400. long schedulers_buf[XEN_HYPER_SCHEDULERS_ARRAY_CNT];
  401. struct xen_hyper_sched_context *schc;
  402. char *buf;
  403. char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE];
  404. int i, cpuid, flag;
  405. /* get scheduler information */
  406. if((xhscht->scheduler_struct =
  407. malloc(XEN_HYPER_SIZE(scheduler))) == NULL) {
  408. error(FATAL, "cannot malloc scheduler struct space.\n");
  409. }
  410. buf = GETBUF(XEN_HYPER_SCHEDULER_NAME);
  411. scheduler_opt_name = XEN_HYPER_OFFSET(scheduler_opt_name);
  412. if (symbol_exists("ops")) {
  413. if (!readmem(symbol_value("ops") + scheduler_opt_name, KVADDR,
  414. &opt_sched, sizeof(ulong), "ops.opt_name",
  415. RETURN_ON_ERROR)) {
  416. error(FATAL, "cannot read ops.opt_name.\n");
  417. }
  418. } else {
  419. opt_sched = symbol_value("opt_sched");
  420. }
  421. if (!readmem(opt_sched, KVADDR, xhscht->opt_sched,
  422. XEN_HYPER_OPT_SCHED_SIZE, "opt_sched,", RETURN_ON_ERROR)) {
  423. error(FATAL, "cannot read opt_sched,.\n");
  424. }
  425. schedulers = symbol_value("schedulers");
  426. addr = schedulers;
  427. while (xhscht->name == NULL) {
  428. if (!readmem(addr, KVADDR, schedulers_buf,
  429. sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT,
  430. "schedulers", RETURN_ON_ERROR)) {
  431. error(FATAL, "cannot read schedulers.\n");
  432. }
  433. for (i = 0; i < XEN_HYPER_SCHEDULERS_ARRAY_CNT; i++) {
  434. if (schedulers_buf[i] == 0) {
  435. error(FATAL, "schedule data not found.\n");
  436. }
  437. if (!readmem(schedulers_buf[i], KVADDR,
  438. xhscht->scheduler_struct, XEN_HYPER_SIZE(scheduler),
  439. "scheduler", RETURN_ON_ERROR)) {
  440. error(FATAL, "cannot read scheduler.\n");
  441. }
  442. opt_name = ULONG(xhscht->scheduler_struct +
  443. scheduler_opt_name);
  444. if (!readmem(opt_name, KVADDR, opt_name_buf,
  445. XEN_HYPER_OPT_SCHED_SIZE, "opt_name", RETURN_ON_ERROR)) {
  446. error(FATAL, "cannot read opt_name.\n");
  447. }
  448. if (strncmp(xhscht->opt_sched, opt_name_buf,
  449. XEN_HYPER_OPT_SCHED_SIZE))
  450. continue;
  451. xhscht->scheduler = schedulers_buf[i];
  452. xhscht->sched_id = INT(xhscht->scheduler_struct +
  453. XEN_HYPER_OFFSET(scheduler_sched_id));
  454. addr = ULONG(xhscht->scheduler_struct +
  455. XEN_HYPER_OFFSET(scheduler_name));
  456. if (!readmem(addr, KVADDR, buf, XEN_HYPER_SCHEDULER_NAME,
  457. "scheduler_name", RETURN_ON_ERROR)) {
  458. error(FATAL, "cannot read scheduler_name.\n");
  459. }
  460. if (strlen(buf) >= XEN_HYPER_SCHEDULER_NAME) {
  461. error(FATAL, "cannot read scheduler_name.\n");
  462. }
  463. if((xhscht->name = malloc(strlen(buf) + 1)) == NULL) {
  464. error(FATAL, "cannot malloc scheduler_name space.\n");
  465. }
  466. BZERO(xhscht->name, strlen(buf) + 1);
  467. strncpy(xhscht->name, buf, strlen(buf));
  468. break;
  469. }
  470. addr += sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT;
  471. }
  472. FREEBUF(buf);
  473. /* get schedule_data information */
  474. if((xhscht->sched_context_array =
  475. malloc(sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS())) == NULL) {
  476. error(FATAL, "cannot malloc xen_hyper_sched_context struct space.\n");
  477. }
  478. BZERO(xhscht->sched_context_array,
  479. sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS());
  480. buf = GETBUF(XEN_HYPER_SIZE(schedule_data));
  481. if (symbol_exists("per_cpu__schedule_data")) {
  482. addr = symbol_value("per_cpu__schedule_data");
  483. flag = TRUE;
  484. } else {
  485. addr = symbol_value("schedule_data");
  486. flag = FALSE;
  487. }
  488. for_cpu_indexes(i, cpuid)
  489. {
  490. schc = &xhscht->sched_context_array[cpuid];
  491. if (flag) {
  492. schc->schedule_data =
  493. xen_hyper_per_cpu(addr, i);
  494. } else {
  495. schc->schedule_data = addr +
  496. XEN_HYPER_SIZE(schedule_data) * i;
  497. }
  498. if (!readmem(schc->schedule_data,
  499. KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
  500. "schedule_data", RETURN_ON_ERROR)) {
  501. error(FATAL, "cannot read schedule_data.\n");
  502. }
  503. schc->cpu_id = cpuid;
  504. schc->curr = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_curr));
  505. if (MEMBER_EXISTS("schedule_data", "idle"))
  506. schc->idle = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_idle));
  507. else
  508. schc->idle = xht->idle_vcpu_array[cpuid];
  509. schc->sched_priv =
  510. ULONG(buf + XEN_HYPER_OFFSET(schedule_data_sched_priv));
  511. if (XEN_HYPER_VALID_MEMBER(schedule_data_tick))
  512. schc->tick = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_tick));
  513. }
  514. FREEBUF(buf);
  515. }
  516. /*
  517. * This should be called after all initailize process finished.
  518. */
  519. void
  520. xen_hyper_post_init(void)
  521. {
  522. struct xen_hyper_pcpu_context *pcc;
  523. int i, cpuid;
  524. /* set current vcpu to pcpu context */
  525. for_cpu_indexes(i, cpuid)
  526. {
  527. pcc = &xhpct->context_array[cpuid];
  528. if (!pcc->current_vcpu) {
  529. pcc->current_vcpu =
  530. xen_hyper_get_active_vcpu_from_pcpuid(cpuid);
  531. }
  532. }
  533. /* set pcpu last */
  534. if (!(xhpct->last =
  535. xen_hyper_id_to_pcpu_context(XEN_HYPER_CRASHING_CPU()))) {
  536. xhpct->last = &xhpct->context_array[xht->cpu_idxs[0]];
  537. }
  538. /* set vcpu last */
  539. if (xhpct->last) {
  540. xhvct->last =
  541. xen_hyper_vcpu_to_vcpu_context(xhpct->last->current_vcpu);
  542. /* set crashing vcpu */
  543. xht->crashing_vcc = xhvct->last;
  544. }
  545. if (!xhvct->last) {
  546. xhvct->last = xhvct->vcpu_context_arrays->context_array;
  547. }
  548. /* set domain last */
  549. if (xhvct->last) {
  550. xhdt->last =
  551. xen_hyper_domain_to_domain_context(xhvct->last->domain);
  552. }
  553. if (!xhdt->last) {
  554. xhdt->last = xhdt->context_array;
  555. }
  556. }
  557. /*
  558. * Do initialization for dump information here.
  559. */
  560. void
  561. xen_hyper_dumpinfo_init(void)
  562. {
  563. Elf32_Nhdr *note;
  564. char *buf, *bp, *np, *upp;
  565. char *nccp, *xccp;
  566. ulong addr;
  567. long size;
  568. int i, cpuid, samp_cpuid;
  569. /*
  570. * NOTE kakuma: It is not clear that what kind of
  571. * a elf note format each one of the xen uses.
  572. * So, we decide it confirming whether a symbol exists.
  573. */
  574. if (STRUCT_EXISTS("note_buf_t"))
  575. xhdit->note_ver = XEN_HYPER_ELF_NOTE_V1;
  576. else if (STRUCT_EXISTS("crash_note_xen_t"))
  577. xhdit->note_ver = XEN_HYPER_ELF_NOTE_V2;
  578. else if (STRUCT_EXISTS("crash_xen_core_t")) {
  579. if (STRUCT_EXISTS("crash_note_xen_core_t"))
  580. xhdit->note_ver = XEN_HYPER_ELF_NOTE_V3;
  581. else
  582. xhdit->note_ver = XEN_HYPER_ELF_NOTE_V4;
  583. } else {
  584. error(WARNING, "found unsupported elf note format while checking of xen dumpinfo.\n");
  585. return;
  586. }
  587. if (!xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
  588. error(WARNING, "crashing_cpu not found.\n");
  589. return;
  590. }
  591. /* allocate a context area */
  592. size = sizeof(struct xen_hyper_dumpinfo_context) * machdep->get_smp_cpus();
  593. if((xhdit->context_array = malloc(size)) == NULL) {
  594. error(FATAL, "cannot malloc dumpinfo table context space.\n");
  595. }
  596. BZERO(xhdit->context_array, size);
  597. size = sizeof(struct xen_hyper_dumpinfo_context_xen_core) * machdep->get_smp_cpus();
  598. if((xhdit->context_xen_core_array = malloc(size)) == NULL) {
  599. error(FATAL, "cannot malloc dumpinfo table context_xen_core_array space.\n");
  600. }
  601. BZERO(xhdit->context_xen_core_array, size);
  602. if (symbol_exists("per_cpu__crash_notes"))
  603. addr = symbol_value("per_cpu__crash_notes");
  604. else
  605. get_symbol_data("crash_notes", sizeof(ulong), &addr);
  606. for (i = 0; i < machdep->get_smp_cpus(); i++) {
  607. ulong addr_notes;
  608. if (symbol_exists("per_cpu__crash_notes"))
  609. addr_notes = xen_hyper_per_cpu(addr, i);
  610. else
  611. addr_notes = addr + i * STRUCT_SIZE("crash_note_range_t") +
  612. MEMBER_OFFSET("crash_note_range_t", "start");
  613. if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) {
  614. if (!readmem(addr_notes, KVADDR, &(xhdit->context_array[i].note),
  615. sizeof(ulong), "crash_notes", RETURN_ON_ERROR)) {
  616. error(WARNING, "cannot read crash_notes.\n");
  617. return;
  618. }
  619. } else {
  620. xhdit->context_array[i].note = addr_notes;
  621. }
  622. }
  623. if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V1) {
  624. xhdit->note_size = XEN_HYPER_SIZE(note_buf_t);
  625. } else if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) {
  626. xhdit->note_size = XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE;
  627. } else {
  628. xhdit->note_size = XEN_HYPER_SIZE(crash_note_t);
  629. }
  630. /* read a sample note */
  631. buf = GETBUF(xhdit->note_size);
  632. if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4)
  633. samp_cpuid = xht->cpu_idxs[0];
  634. else
  635. samp_cpuid = XEN_HYPER_CRASHING_CPU();
  636. xhdit->xen_info_cpu = samp_cpuid;
  637. if (!xen_hyper_fill_elf_notes(xhdit->context_array[samp_cpuid].note,
  638. buf, XEN_HYPER_ELF_NOTE_FILL_T_NOTE)) {
  639. error(FATAL, "cannot read crash_notes.\n");
  640. }
  641. bp = buf;
  642. /* Get elf format information for each version. */
  643. switch (xhdit->note_ver) {
  644. case XEN_HYPER_ELF_NOTE_V1:
  645. /* core data */
  646. note = (Elf32_Nhdr *)bp;
  647. np = bp + sizeof(Elf32_Nhdr);
  648. upp = np + note->n_namesz;
  649. upp = (char *)roundup((ulong)upp, 4);
  650. xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note);
  651. note = (Elf32_Nhdr *)(upp + note->n_descsz);
  652. /* cr3 data */
  653. np = (char *)note + sizeof(Elf32_Nhdr);
  654. upp = np + note->n_namesz;
  655. upp = (char *)roundup((ulong)upp, 4);
  656. upp = upp + note->n_descsz;
  657. xhdit->core_size = upp - bp;
  658. break;
  659. case XEN_HYPER_ELF_NOTE_V2:
  660. /* core data */
  661. xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc);
  662. xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t);
  663. /* xen core */
  664. xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_t_desc);
  665. xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_t);
  666. break;
  667. case XEN_HYPER_ELF_NOTE_V3:
  668. /* core data */
  669. xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc);
  670. xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t);
  671. /* xen core */
  672. xhdit->xen_core_offset = XEN_HYPER_OFFSET(crash_note_xen_core_t_desc);
  673. xhdit->xen_core_size = XEN_HYPER_SIZE(crash_note_xen_core_t);
  674. /* xen info */
  675. xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_info_t_desc);
  676. xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_info_t);
  677. break;
  678. case XEN_HYPER_ELF_NOTE_V4:
  679. /* core data */
  680. note = (Elf32_Nhdr *)bp;
  681. np = bp + sizeof(Elf32_Nhdr);
  682. upp = np + note->n_namesz;
  683. upp = (char *)roundup((ulong)upp, 4);
  684. xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note);
  685. upp = upp + note->n_descsz;
  686. xhdit->core_size = (Elf_Word)((ulong)upp - (ulong)note);
  687. if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + 32) {
  688. error(WARNING, "note size is assumed on crash is incorrect.(core data)\n");
  689. return;
  690. }
  691. /* xen core */
  692. note = (Elf32_Nhdr *)upp;
  693. np = (char *)note + sizeof(Elf32_Nhdr);
  694. upp = np + note->n_namesz;
  695. upp = (char *)roundup((ulong)upp, 4);
  696. xhdit->xen_core_offset = (Elf_Word)((ulong)upp - (ulong)note);
  697. upp = upp + note->n_descsz;
  698. xhdit->xen_core_size = (Elf_Word)((ulong)upp - (ulong)note);
  699. if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE <
  700. xhdit->core_size + xhdit->xen_core_size + 32) {
  701. error(WARNING, "note size is assumed on crash is incorrect.(xen core)\n");
  702. return;
  703. }
  704. /* xen info */
  705. note = (Elf32_Nhdr *)upp;
  706. np = (char *)note + sizeof(Elf32_Nhdr);
  707. upp = np + note->n_namesz;
  708. upp = (char *)roundup((ulong)upp, 4);
  709. xhdit->xen_info_offset = (Elf_Word)((ulong)upp - (ulong)note);
  710. upp = upp + note->n_descsz;
  711. xhdit->xen_info_size = (Elf_Word)((ulong)upp - (ulong)note);
  712. if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE <
  713. xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size) {
  714. error(WARNING, "note size is assumed on crash is incorrect.(xen info)\n");
  715. return;
  716. }
  717. xhdit->note_size = xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size;
  718. break;
  719. default:
  720. error(FATAL, "logic error in cheking elf note format occurs.\n");
  721. }
  722. /* fill xen info context. */
  723. if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) {
  724. if((xhdit->crash_note_xen_info_ptr =
  725. malloc(xhdit->xen_info_size)) == NULL) {
  726. error(FATAL, "cannot malloc dumpinfo table "
  727. "crash_note_xen_info_ptr space.\n");
  728. }
  729. memcpy(xhdit->crash_note_xen_info_ptr,
  730. bp + xhdit->core_size + xhdit->xen_core_size,
  731. xhdit->xen_info_size);
  732. xhdit->context_xen_info.note =
  733. xhdit->context_array[samp_cpuid].note +
  734. xhdit->core_size + xhdit->xen_core_size;
  735. xhdit->context_xen_info.pcpu_id = samp_cpuid;
  736. xhdit->context_xen_info.crash_xen_info_ptr =
  737. xhdit->crash_note_xen_info_ptr + xhdit->xen_info_offset;
  738. }
  739. /* allocate note core */
  740. size = xhdit->core_size * XEN_HYPER_NR_PCPUS();
  741. if(!(xhdit->crash_note_core_array = malloc(size))) {
  742. error(FATAL, "cannot malloc crash_note_core_array space.\n");
  743. }
  744. nccp = xhdit->crash_note_core_array;
  745. BZERO(nccp, size);
  746. xccp = NULL;
  747. /* allocate xen core */
  748. if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) {
  749. size = xhdit->xen_core_size * XEN_HYPER_NR_PCPUS();
  750. if(!(xhdit->crash_note_xen_core_array = malloc(size))) {
  751. error(FATAL, "cannot malloc dumpinfo table "
  752. "crash_note_xen_core_array space.\n");
  753. }
  754. xccp = xhdit->crash_note_xen_core_array;
  755. BZERO(xccp, size);
  756. }
  757. /* fill a context. */
  758. for_cpu_indexes(i, cpuid)
  759. {
  760. /* fill core context. */
  761. addr = xhdit->context_array[cpuid].note;
  762. if (!xen_hyper_fill_elf_notes(addr, nccp,
  763. XEN_HYPER_ELF_NOTE_FILL_T_CORE)) {
  764. error(FATAL, "cannot read elf note core.\n");
  765. }
  766. xhdit->context_array[cpuid].pcpu_id = cpuid;
  767. xhdit->context_array[cpuid].ELF_Prstatus_ptr =
  768. nccp + xhdit->core_offset;
  769. xhdit->context_array[cpuid].pr_reg_ptr =
  770. nccp + xhdit->core_offset +
  771. XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg);
  772. /* Is there xen core data? */
  773. if (xhdit->note_ver < XEN_HYPER_ELF_NOTE_V2) {
  774. nccp += xhdit->core_size;
  775. continue;
  776. }
  777. if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V2 &&
  778. cpuid != samp_cpuid) {
  779. xccp += xhdit->xen_core_size;
  780. nccp += xhdit->core_size;
  781. continue;
  782. }
  783. /* fill xen core context, in case of more elf note V2. */
  784. xhdit->context_xen_core_array[cpuid].note =
  785. xhdit->context_array[cpuid].note +
  786. xhdit->core_size;
  787. xhdit->context_xen_core_array[cpuid].pcpu_id = cpuid;
  788. xhdit->context_xen_core_array[cpuid].crash_xen_core_ptr =
  789. xccp + xhdit->xen_core_offset;
  790. if (!xen_hyper_fill_elf_notes(xhdit->context_xen_core_array[cpuid].note,
  791. xccp, XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)) {
  792. error(FATAL, "cannot read elf note xen core.\n");
  793. }
  794. xccp += xhdit->xen_core_size;
  795. nccp += xhdit->core_size;
  796. }
  797. FREEBUF(buf);
  798. }
  799. /*
  800. * Get dump information context from physical cpu id.
  801. */
  802. struct xen_hyper_dumpinfo_context *
  803. xen_hyper_id_to_dumpinfo_context(uint id)
  804. {
  805. if (!xen_hyper_test_pcpu_id(id))
  806. return NULL;
  807. return &xhdit->context_array[id];
  808. }
  809. /*
  810. * Get dump information context from ELF Note address.
  811. */
  812. struct xen_hyper_dumpinfo_context *
  813. xen_hyper_note_to_dumpinfo_context(ulong note)
  814. {
  815. int i;
  816. for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) {
  817. if (note == xhdit->context_array[i].note) {
  818. return &xhdit->context_array[i];
  819. }
  820. }
  821. return NULL;
  822. }
  823. /*
  824. * Fill ELF Notes header here.
  825. * This assume that variable note has a top address of an area for
  826. * specified type.
  827. */
  828. char *
  829. xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type)
  830. {
  831. long size;
  832. ulong rp = note;
  833. if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE)
  834. size = xhdit->note_size;
  835. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE)
  836. size = xhdit->core_size;
  837. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)
  838. size = xhdit->xen_core_size;
  839. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M)
  840. size = xhdit->core_size + xhdit->xen_core_size;
  841. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS)
  842. size = XEN_HYPER_SIZE(ELF_Prstatus);
  843. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS)
  844. size = XEN_HYPER_SIZE(xen_crash_xen_regs_t);
  845. else
  846. return NULL;
  847. if (!readmem(rp, KVADDR, note_buf, size,
  848. "note_buf_t or crash_note_t", RETURN_ON_ERROR)) {
  849. if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE)
  850. error(WARNING, "cannot fill note_buf_t or crash_note_t.\n");
  851. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE)
  852. error(WARNING, "cannot fill note core.\n");
  853. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)
  854. error(WARNING, "cannot fill note xen core.\n");
  855. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M)
  856. error(WARNING, "cannot fill note core & xen core.\n");
  857. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS)
  858. error(WARNING, "cannot fill ELF_Prstatus.\n");
  859. else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS)
  860. error(WARNING, "cannot fill xen_crash_xen_regs_t.\n");
  861. return NULL;
  862. }
  863. return note_buf;
  864. }
  865. /*
  866. * Get domain status.
  867. */
  868. ulong
  869. xen_hyper_domain_state(struct xen_hyper_domain_context *dc)
  870. {
  871. if (ACTIVE()) {
  872. if (xen_hyper_read_domain_verify(dc->domain) == NULL) {
  873. return XEN_HYPER_DOMF_ERROR;
  874. }
  875. }
  876. return dc->domain_flags;
  877. }
  878. /*
  879. * Allocate domain context space.
  880. */
  881. void
  882. xen_hyper_refresh_domain_context_space(void)
  883. {
  884. char *domain_struct;
  885. ulong domain, next, dom_xen, dom_io, idle_vcpu;
  886. struct xen_hyper_domain_context *dc;
  887. struct xen_hyper_domain_context *dom0;
  888. int i;
  889. if ((xhdt->flags & XEN_HYPER_DOMAIN_F_INIT) && !ACTIVE()) {
  890. return;
  891. }
  892. XEN_HYPER_RUNNING_DOMAINS() = XEN_HYPER_NR_DOMAINS() =
  893. xen_hyper_get_domains();
  894. xen_hyper_alloc_domain_context_space(XEN_HYPER_NR_DOMAINS());
  895. dc = xhdt->context_array;
  896. /* restore an dom_io context. */
  897. get_symbol_data("dom_io", sizeof(dom_io), &dom_io);
  898. if ((domain_struct = xen_hyper_read_domain(dom_io)) == NULL) {
  899. error(FATAL, "cannot read dom_io.\n");
  900. }
  901. xen_hyper_store_domain_context(dc, dom_io, domain_struct);
  902. xhdt->dom_io = dc;
  903. dc++;
  904. /* restore an dom_xen context. */
  905. get_symbol_data("dom_xen", sizeof(dom_xen), &dom_xen);
  906. if ((domain_struct = xen_hyper_read_domain(dom_xen)) == NULL) {
  907. error(FATAL, "cannot read dom_xen.\n");
  908. }
  909. xen_hyper_store_domain_context(dc, dom_xen, domain_struct);
  910. xhdt->dom_xen = dc;
  911. dc++;
  912. /* restore an idle domain context. */
  913. for (i = 0; i < xht->idle_vcpu_size; i += XEN_HYPER_MAX_VIRT_CPUS) {
  914. idle_vcpu = xht->idle_vcpu_array[i];
  915. if (idle_vcpu == 0)
  916. break;
  917. if (!readmem(idle_vcpu + MEMBER_OFFSET("vcpu", "domain"),
  918. KVADDR, &domain, sizeof(domain), "domain", RETURN_ON_ERROR)) {
  919. error(FATAL, "cannot read domain member in vcpu.\n");
  920. }
  921. if (CRASHDEBUG(1)) {
  922. fprintf(fp, "idle_vcpu=%lx, domain=%lx\n", idle_vcpu, domain);
  923. }
  924. if ((domain_struct = xen_hyper_read_domain(domain)) == NULL) {
  925. error(FATAL, "cannot read idle domain.\n");
  926. }
  927. xen_hyper_store_domain_context(dc, domain, domain_struct);
  928. if (i == 0)
  929. xhdt->idle_domain = dc;
  930. dc++;
  931. }
  932. /* restore domain contexts from dom0 symbol. */
  933. xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_DOM0, &next);
  934. domain = next;
  935. dom0 = dc;
  936. while((domain_struct =
  937. xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_NEXT, &next)) != NULL) {
  938. xen_hyper_store_domain_context(dc, domain, domain_struct);
  939. domain = next;
  940. dc++;
  941. }
  942. xhdt->dom0 = dom0;
  943. }
  944. /*
  945. * Get number of domain.
  946. */
  947. int
  948. xen_hyper_get_domains(void)
  949. {
  950. ulong domain, next_in_list;
  951. long domain_next_in_list;
  952. int i, j;
  953. get_symbol_data("dom0", sizeof(void *), &domain);
  954. domain_next_in_list = MEMBER_OFFSET("domain", "next_in_list");
  955. i = 0;
  956. while (domain != 0) {
  957. i++;
  958. next_in_list = domain + domain_next_in_list;
  959. if (!readmem(next_in_list, KVADDR, &domain, sizeof(void *),
  960. "domain.next_in_list", RETURN_ON_ERROR)) {
  961. error(FATAL, "cannot read domain.next_in_list.\n");
  962. }
  963. }
  964. i += 2; /* for dom_io, dom_xen */
  965. /* for idle domains */
  966. for (j = 0; j < xht->idle_vcpu_size; j += XEN_HYPER_MAX_VIRT_CPUS) {
  967. if (xht->idle_vcpu_array[j])
  968. i++;
  969. }
  970. return i;
  971. }
  972. /*
  973. * Get next domain struct.
  974. * mod - XEN_HYPER_DOMAIN_READ_DOM0:start from dom0 symbol
  975. * - XEN_HYPER_DOMAIN_READ_INIT:start from xhdt->context_array
  976. * - XEN_HYPER_DOMAIN_READ_NEXT:next
  977. */
  978. char *
  979. xen_hyper_get_domain_next(int mod, ulong *next)
  980. {
  981. static int idx = 0;
  982. char *domain_struct;
  983. struct xen_hyper_domain_context *dc;
  984. switch (mod) {
  985. case XEN_HYPER_DOMAIN_READ_DOM0:
  986. /* Case of search from dom0 symbol. */
  987. idx = 0;
  988. if (xhdt->dom0) {
  989. *next = xhdt->dom0->domain;
  990. } else {
  991. get_symbol_data("dom0", sizeof(void *), next);
  992. }
  993. return xhdt->domain_struct;
  994. break;
  995. case XEN_HYPER_DOMAIN_READ_INIT:
  996. /* Case of search from context_array. */
  997. if (xhdt->context_array && xhdt->context_array->domain) {
  998. idx = 1; /* this has a next index. */
  999. *next = xhdt->context_array->domain;
  1000. } else {
  1001. idx = 0;
  1002. *next = 0;
  1003. return NULL;
  1004. }
  1005. return xhdt->domain_struct;
  1006. break;
  1007. case XEN_HYPER_DOMAIN_READ_NEXT:
  1008. break;
  1009. default :
  1010. error(FATAL, "xen_hyper_get_domain_next mod error: %d\n", mod);
  1011. return NULL;
  1012. }
  1013. /* Finished search */
  1014. if (!*next) {
  1015. return NULL;
  1016. }
  1017. domain_struct = NULL;
  1018. /* Is domain context array valid? */
  1019. if (idx) {
  1020. if ((domain_struct =
  1021. xen_hyper_read_domain(*next)) == NULL) {
  1022. error(FATAL, "cannot get next domain from domain context array.\n");
  1023. }
  1024. if (idx > XEN_HYPER_NR_DOMAINS()) {
  1025. *next = 0;
  1026. } else {
  1027. dc = xhdt->context_array;
  1028. dc += idx;
  1029. *next = dc->domain;
  1030. idx++;
  1031. }
  1032. return domain_struct;
  1033. }
  1034. /* Search from dom0 symbol. */
  1035. if ((domain_struct =
  1036. xen_hyper_read_domain(*next)) == NULL) {
  1037. error(FATAL, "cannot get next domain from dom0 symbol.\n");
  1038. }
  1039. *next = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_next_in_list));
  1040. return domain_struct;
  1041. }
  1042. /*
  1043. * from domain address to id.
  1044. */
  1045. domid_t
  1046. xen_hyper_domain_to_id(ulong domain)
  1047. {
  1048. struct xen_hyper_domain_context *dc;
  1049. /* Is domain context array valid? */
  1050. if (xhdt->context_array && xhdt->context_array->domain) {
  1051. if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) {
  1052. return XEN_HYPER_DOMAIN_ID_INVALID;
  1053. } else {
  1054. return dc->domain_id;
  1055. }
  1056. } else {
  1057. return XEN_HYPER_DOMAIN_ID_INVALID;
  1058. }
  1059. }
  1060. /*
  1061. * Get domain struct from id.
  1062. */
  1063. char *
  1064. xen_hyper_id_to_domain_struct(domid_t id)
  1065. {
  1066. char *domain_struct;
  1067. struct xen_hyper_domain_context *dc;
  1068. domain_struct = NULL;
  1069. /* Is domain context array valid? */
  1070. if (xhdt->context_array && xhdt->context_array->domain) {
  1071. if ((dc = xen_hyper_id_to_domain_context(id)) == NULL) {
  1072. return NULL;
  1073. } else {
  1074. if ((domain_struct =
  1075. xen_hyper_read_domain(dc->domain)) == NULL) {
  1076. error(FATAL, "cannot get domain from domain context array with id.\n");
  1077. }
  1078. return domain_struct;
  1079. }
  1080. } else {
  1081. return NULL;
  1082. }
  1083. }
  1084. /*
  1085. * Get domain context from domain address.
  1086. */
  1087. struct xen_hyper_domain_context *
  1088. xen_hyper_domain_to_domain_context(ulong domain)
  1089. {
  1090. struct xen_hyper_domain_context *dc;
  1091. int i;
  1092. if (xhdt->context_array == NULL ||
  1093. xhdt->context_array->domain == 0) {
  1094. return NULL;
  1095. }
  1096. if (!domain) {
  1097. return NULL;
  1098. }
  1099. for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS();
  1100. i++, dc++) {
  1101. if (domain == dc->domain) {
  1102. return dc;
  1103. }
  1104. }
  1105. return NULL;
  1106. }
  1107. /*
  1108. * Get domain context from domain id.
  1109. */
  1110. struct xen_hyper_domain_context *
  1111. xen_hyper_id_to_domain_context(domid_t id)
  1112. {
  1113. struct xen_hyper_domain_context *dc;
  1114. int i;
  1115. if (xhdt->context_array == NULL ||
  1116. xhdt->context_array->domain == 0) {
  1117. return NULL;
  1118. }
  1119. if (id == XEN_HYPER_DOMAIN_ID_INVALID) {
  1120. return NULL;
  1121. }
  1122. for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS();
  1123. i++, dc++) {
  1124. if (id == dc->domain_id) {
  1125. return dc;
  1126. }
  1127. }
  1128. return NULL;
  1129. }
  1130. /*
  1131. * Store domain struct contents.
  1132. */
  1133. struct xen_hyper_domain_context *
  1134. xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc,
  1135. ulong domain, char *dp)
  1136. {
  1137. char *vcpup;
  1138. unsigned int max_vcpus;
  1139. unsigned int i;
  1140. dc->domain = domain;
  1141. BCOPY((char *)(dp + XEN_HYPER_OFFSET(domain_domain_id)),
  1142. &dc->domain_id, sizeof(domid_t));
  1143. dc->tot_pages = UINT(dp + XEN_HYPER_OFFSET(domain_tot_pages));
  1144. dc->max_pages = UINT(dp + XEN_HYPER_OFFSET(domain_max_pages));
  1145. dc->xenheap_pages = UINT(dp + XEN_HYPER_OFFSET(domain_xenheap_pages));
  1146. dc->shared_info = ULONG(dp + XEN_HYPER_OFFSET(domain_shared_info));
  1147. dc->sched_priv = ULONG(dp + XEN_HYPER_OFFSET(domain_sched_priv));
  1148. dc->next_in_list = ULONG(dp + XEN_HYPER_OFFSET(domain_next_in_list));
  1149. if (XEN_HYPER_VALID_MEMBER(domain_domain_flags))
  1150. dc->domain_flags = ULONG(dp + XEN_HYPER_OFFSET(domain_domain_flags));
  1151. else if (XEN_HYPER_VALID_MEMBER(domain_is_shut_down)) {
  1152. dc->domain_flags = 0;
  1153. if (XEN_HYPER_VALID_MEMBER(domain_is_hvm) &&
  1154. *(dp + XEN_HYPER_OFFSET(domain_is_hvm))) {
  1155. dc->domain_flags |= XEN_HYPER_DOMS_HVM;
  1156. }
  1157. if (XEN_HYPER_VALID_MEMBER(domain_guest_type) &&
  1158. *(dp + XEN_HYPER_OFFSET(domain_guest_type))) {
  1159. /* For now PVH and HVM are the same for crash.
  1160. * and 0 is PV.
  1161. */
  1162. dc->domain_flags |= XEN_HYPER_DOMS_HVM;
  1163. }
  1164. if (*(dp + XEN_HYPER_OFFSET(domain_is_privileged))) {
  1165. dc->domain_flags |= XEN_HYPER_DOMS_privileged;
  1166. }
  1167. if (*(dp + XEN_HYPER_OFFSET(domain_debugger_attached))) {
  1168. dc->domain_flags |= XEN_HYPER_DOMS_debugging;
  1169. }
  1170. if (XEN_HYPER_VALID_MEMBER(domain_is_polling) &&
  1171. *(dp + XEN_HYPER_OFFSET(domain_is_polling))) {
  1172. dc->domain_flags |= XEN_HYPER_DOMS_polling;
  1173. }
  1174. if (*(dp + XEN_HYPER_OFFSET(domain_is_paused_by_controller))) {
  1175. dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause;
  1176. }
  1177. if (*(dp + XEN_HYPER_OFFSET(domain_is_dying))) {
  1178. dc->domain_flags |= XEN_HYPER_DOMS_dying;
  1179. }
  1180. if (*(dp + XEN_HYPER_OFFSET(domain_is_shutting_down))) {
  1181. dc->domain_flags |= XEN_HYPER_DOMS_shuttingdown;
  1182. }
  1183. if (*(dp + XEN_HYPER_OFFSET(domain_is_shut_down))) {
  1184. dc->domain_flags |= XEN_HYPER_DOMS_shutdown;
  1185. }
  1186. } else {
  1187. dc->domain_flags = XEN_HYPER_DOMF_ERROR;
  1188. }
  1189. dc->evtchn = ULONG(dp + XEN_HYPER_OFFSET(domain_evtchn));
  1190. if (XEN_HYPER_VALID_MEMBER(domain_max_vcpus)) {
  1191. max_vcpus = UINT(dp + XEN_HYPER_OFFSET(domain_max_vcpus));
  1192. } else if (XEN_HYPER_VALID_SIZE(domain_vcpu)) {
  1193. max_vcpus = XEN_HYPER_SIZE(domain_vcpu) / sizeof(void *);
  1194. } else {
  1195. max_vcpus = XEN_HYPER_MAX_VIRT_CPUS;
  1196. }
  1197. if (!(dc->vcpu = malloc(sizeof(ulong) * max_vcpus))) {
  1198. error(FATAL, "cannot malloc vcpu array (%d VCPUs).",
  1199. max_vcpus);
  1200. }
  1201. if (MEMBER_TYPE("domain", "vcpu") == TYPE_CODE_ARRAY)
  1202. vcpup = dp + XEN_HYPER_OFFSET(domain_vcpu);
  1203. else {
  1204. ulong vcpu_array = ULONG(dp + XEN_HYPER_OFFSET(domain_vcpu));
  1205. if (vcpu_array && max_vcpus) {
  1206. if (!(vcpup =
  1207. malloc(max_vcpus * sizeof(void *)))) {
  1208. error(FATAL, "cannot malloc VCPU array for domain %lx.",
  1209. domain);
  1210. }
  1211. if (!readmem(vcpu_array, KVADDR,
  1212. vcpup, max_vcpus * sizeof(void*),
  1213. "VCPU array", RETURN_ON_ERROR)) {
  1214. error(FATAL, "cannot read VCPU array for domain %lx.",
  1215. domain);
  1216. }
  1217. } else {
  1218. vcpup = NULL;
  1219. }
  1220. }
  1221. if (vcpup) {
  1222. for (i = 0; i < max_vcpus; i++) {
  1223. dc->vcpu[i] = ULONG(vcpup + i*sizeof(void *));
  1224. if (dc->vcpu[i]) XEN_HYPER_NR_VCPUS_IN_DOM(dc)++;
  1225. }
  1226. if (vcpup != dp + XEN_HYPER_OFFSET(domain_vcpu)) {
  1227. free(vcpup);
  1228. }
  1229. }
  1230. return dc;
  1231. }
  1232. /*
  1233. * Read domain struct from domain context.
  1234. */
  1235. char *
  1236. xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc)
  1237. {
  1238. return xen_hyper_fill_domain_struct(dc->domain, xhdt->domain_struct);
  1239. }
  1240. /*
  1241. * Read domain struct.
  1242. */
  1243. char *
  1244. xen_hyper_read_domain(ulong domain)
  1245. {
  1246. return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct);
  1247. }
  1248. /*
  1249. * Read domain struct to verification.
  1250. */
  1251. char *
  1252. xen_hyper_read_domain_verify(ulong domain)
  1253. {
  1254. return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct_verify);
  1255. }
  1256. /*
  1257. * Fill domain struct.
  1258. */
  1259. char *
  1260. xen_hyper_fill_domain_struct(ulong domain, char *domain_struct)
  1261. {
  1262. if (!readmem(domain, KVADDR, domain_struct,
  1263. XEN_HYPER_SIZE(domain), "fill_domain_struct",
  1264. ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
  1265. error(WARNING, "cannot fill domain struct.\n");
  1266. return NULL;
  1267. }
  1268. return domain_struct;
  1269. }
  1270. /*
  1271. * Allocate domain context space.
  1272. */
  1273. void
  1274. xen_hyper_alloc_domain_context_space(int domains)
  1275. {
  1276. if (xhdt->context_array == NULL) {
  1277. if (!(xhdt->context_array =
  1278. malloc(domains * sizeof(struct xen_hyper_domain_context)))) {
  1279. error(FATAL, "cannot malloc context array (%d domains).",
  1280. domains);
  1281. }
  1282. xhdt->context_array_cnt = domains;
  1283. } else if (domains > xhdt->context_array_cnt) {
  1284. struct xen_hyper_domain_context *dc;
  1285. int i;
  1286. for (dc = xhdt->context_array, i = 0;
  1287. i < xhdt->context_array_cnt; ++dc, ++i) {
  1288. if (dc->vcpu)
  1289. free(dc->vcpu);
  1290. }
  1291. if (!(xhdt->context_array =
  1292. realloc(xhdt->context_array,
  1293. domains * sizeof(struct xen_hyper_domain_context)))) {
  1294. error(FATAL, "cannot realloc context array (%d domains).",
  1295. domains);
  1296. }
  1297. xhdt->context_array_cnt = domains;
  1298. }
  1299. BZERO(xhdt->context_array,
  1300. domains * sizeof(struct xen_hyper_domain_context));
  1301. }
  1302. /*
  1303. * Get vcpu status.
  1304. */
  1305. int
  1306. xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc)
  1307. {
  1308. if (ACTIVE()) {
  1309. if (xen_hyper_read_vcpu_verify(vcc->vcpu) == NULL) {
  1310. return XEN_HYPER_RUNSTATE_ERROR;
  1311. }
  1312. }
  1313. return vcc->state;
  1314. }
  1315. /*
  1316. * Allocate vcpu context space.
  1317. */
  1318. void
  1319. xen_hyper_refresh_vcpu_context_space(void)
  1320. {
  1321. struct xen_hyper_domain_context *dc;
  1322. struct xen_hyper_vcpu_context_array *vcca;
  1323. struct xen_hyper_vcpu_context *vcc;
  1324. int i, j;
  1325. if ((xhvct->flags & XEN_HYPER_VCPU_F_INIT) && !ACTIVE()) {
  1326. return;
  1327. }
  1328. xen_hyper_alloc_vcpu_context_arrays_space(XEN_HYPER_NR_DOMAINS());
  1329. for (i = 0, xht->vcpus = 0, dc = xhdt->context_array,
  1330. vcca = xhvct->vcpu_context_arrays;
  1331. i < XEN_HYPER_NR_DOMAINS(); i++, dc++, vcca++) {
  1332. dc->vcpu_context_array = vcca;
  1333. xen_hyper_alloc_vcpu_context_space(vcca,
  1334. XEN_HYPER_NR_VCPUS_IN_DOM(dc));
  1335. for (j = 0, vcc = vcca->context_array;
  1336. j < XEN_HYPER_NR_VCPUS_IN_DOM(dc); j++, vcc++) {
  1337. xen_hyper_read_vcpu(dc->vcpu[j]);
  1338. xen_hyper_store_vcpu_context(vcc, dc->vcpu[j],
  1339. xhvct->vcpu_struct);
  1340. }
  1341. if (dc == xhdt->idle_domain) {
  1342. xhvct->idle_vcpu_context_array = vcca;
  1343. }
  1344. xht->vcpus += vcca->context_array_cnt;
  1345. }
  1346. }
  1347. /*
  1348. * Get vcpu context from vcpu address.
  1349. */
  1350. struct xen_hyper_vcpu_context *
  1351. xen_hyper_vcpu_to_vcpu_context(ulong vcpu)
  1352. {
  1353. struct xen_hyper_vcpu_context_array *vcca;
  1354. struct xen_hyper_vcpu_context *vcc;
  1355. int i, j;
  1356. if (!vcpu) {
  1357. return NULL;
  1358. }
  1359. for (i = 0, vcca = xhvct->vcpu_context_arrays;
  1360. i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) {
  1361. for (j = 0, vcc = vcca->context_array;
  1362. j < vcca->context_array_cnt; j++, vcc++) {
  1363. if (vcpu == vcc->vcpu) {
  1364. return vcc;
  1365. }
  1366. }
  1367. }
  1368. return NULL;
  1369. }
  1370. /*
  1371. * Get vcpu context.
  1372. */
  1373. struct xen_hyper_vcpu_context *
  1374. xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid)
  1375. {
  1376. struct xen_hyper_vcpu_context_array *vcca;
  1377. struct xen_hyper_vcpu_context *vcc;
  1378. int i;
  1379. if (vcid == XEN_HYPER_VCPU_ID_INVALID) {
  1380. return NULL;
  1381. }
  1382. if ((vcca = xen_hyper_domain_to_vcpu_context_array(domain))) {
  1383. ;
  1384. } else if (!(vcca = xen_hyper_domid_to_vcpu_context_array(did))) {
  1385. return NULL;
  1386. }
  1387. for (i = 0, vcc = vcca->context_array;
  1388. i < vcca->context_array_cnt; i++, vcc++) {
  1389. if (vcid == vcc->vcpu_id) {
  1390. return vcc;
  1391. }
  1392. }
  1393. return NULL;
  1394. }
  1395. /*
  1396. * Get pointer of a vcpu context array from domain address.
  1397. */
  1398. struct xen_hyper_vcpu_context_array *
  1399. xen_hyper_domain_to_vcpu_context_array(ulong domain)
  1400. {
  1401. struct xen_hyper_domain_context *dc;
  1402. if(!(dc = xen_hyper_domain_to_domain_context(domain))) {
  1403. return NULL;
  1404. }
  1405. return dc->vcpu_context_array;
  1406. }
  1407. /*
  1408. * Get pointer of a vcpu context array from domain id.
  1409. */
  1410. struct xen_hyper_vcpu_context_array *
  1411. xen_hyper_domid_to_vcpu_context_array(domid_t id)
  1412. {
  1413. struct xen_hyper_domain_context *dc;
  1414. if (!(dc = xen_hyper_id_to_domain_context(id))) {
  1415. return NULL;
  1416. }
  1417. return dc->vcpu_context_array;
  1418. }
  1419. /*
  1420. * Store vcpu struct contents.
  1421. */
  1422. struct xen_hyper_vcpu_context *
  1423. xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc,
  1424. ulong vcpu, char *vcp)
  1425. {
  1426. vcc->vcpu = vcpu;
  1427. vcc->vcpu_id = INT(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_id));
  1428. vcc->processor = INT(vcp + XEN_HYPER_OFFSET(vcpu_processor));
  1429. vcc->vcpu_info = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_info));
  1430. vcc->domain = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_domain));
  1431. vcc->next_in_list = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_next_in_list));
  1432. if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick))
  1433. vcc->sleep_tick = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sleep_tick));
  1434. vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv));
  1435. vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) +
  1436. XEN_HYPER_OFFSET(vcpu_runstate_info_state));
  1437. vcc->state_entry_time = ULONGLONG(vcp +
  1438. XEN_HYPER_OFFSET(vcpu_runstate) +
  1439. XEN_HYPER_OFFSET(vcpu_runstate_info_state_entry_time));
  1440. vcc->runstate_guest = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate_guest));
  1441. if (XEN_HYPER_VALID_MEMBER(vcpu_vcpu_flags))
  1442. vcc->vcpu_flags = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_flags));
  1443. else
  1444. vcc->vcpu_flags = XEN_HYPER_VCPUF_ERROR;
  1445. return vcc;
  1446. }
  1447. /*
  1448. * Read vcpu struct from vcpu context.
  1449. */
  1450. char *
  1451. xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc)
  1452. {
  1453. return xen_hyper_fill_vcpu_struct(vcc->vcpu, xhvct->vcpu_struct);
  1454. }
  1455. /*
  1456. * Read vcpu struct.
  1457. */
  1458. char *
  1459. xen_hyper_read_vcpu(ulong vcpu)
  1460. {
  1461. return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct);
  1462. }
  1463. /*
  1464. * Read vcpu struct to verification.
  1465. */
  1466. char *
  1467. xen_hyper_read_vcpu_verify(ulong vcpu)
  1468. {
  1469. return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct_verify);
  1470. }
  1471. /*
  1472. * Fill vcpu struct.
  1473. */
  1474. char *
  1475. xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct)
  1476. {
  1477. if (!readmem(vcpu, KVADDR, vcpu_struct,
  1478. XEN_HYPER_SIZE(vcpu), "fill_vcpu_struct",
  1479. ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
  1480. error(WARNING, "cannot fill vcpu struct.\n");
  1481. return NULL;
  1482. }
  1483. return vcpu_struct;
  1484. }
  1485. /*
  1486. * Allocate vcpu context arrays space.
  1487. */
  1488. void
  1489. xen_hyper_alloc_vcpu_context_arrays_space(int domains)
  1490. {
  1491. struct xen_hyper_vcpu_context_array *vcca;
  1492. if (xhvct->vcpu_context_arrays == NULL) {
  1493. if (!(xhvct->vcpu_context_arrays =
  1494. malloc(domains * sizeof(struct xen_hyper_vcpu_context_array)))) {
  1495. error(FATAL, "cannot malloc context arrays (%d domains).",
  1496. domains);
  1497. }
  1498. BZERO(xhvct->vcpu_context_arrays, domains * sizeof(struct xen_hyper_vcpu_context_array));
  1499. xhvct->vcpu_context_arrays_cnt = domains;
  1500. } else if (domains > xhvct->vcpu_context_arrays_cnt) {
  1501. if (!(xhvct->vcpu_context_arrays =
  1502. realloc(xhvct->vcpu_context_arrays,
  1503. domains * sizeof(struct xen_hyper_vcpu_context_array)))) {
  1504. error(FATAL, "cannot realloc context arrays (%d domains).",
  1505. domains);
  1506. }
  1507. vcca = xhvct->vcpu_context_arrays + domains;
  1508. BZERO(vcca, (domains - xhvct->vcpu_context_arrays_cnt) *
  1509. sizeof(struct xen_hyper_vcpu_context_array));
  1510. xhvct->vcpu_context_arrays_cnt = domains;
  1511. }
  1512. }
  1513. /*
  1514. * Allocate vcpu context space.
  1515. */
  1516. void
  1517. xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus)
  1518. {
  1519. if (!vcpus) {
  1520. if (vcca->context_array != NULL) {
  1521. free(vcca->context_array);
  1522. vcca->context_array = NULL;
  1523. }
  1524. vcca->context_array_cnt = vcpus;
  1525. } else if (vcca->context_array == NULL) {
  1526. if (!(vcca->context_array =
  1527. malloc(vcpus * sizeof(struct xen_hyper_vcpu_context)))) {
  1528. error(FATAL, "cannot malloc context array (%d vcpus).",
  1529. vcpus);
  1530. }
  1531. vcca->context_array_cnt = vcpus;
  1532. } else if (vcpus > vcca->context_array_cnt) {
  1533. if (!(vcca->context_array =
  1534. realloc(vcca->context_array,
  1535. vcpus * sizeof(struct xen_hyper_vcpu_context_array)))) {
  1536. error(FATAL, "cannot realloc context array (%d vcpus).",
  1537. vcpus);
  1538. }
  1539. vcca->context_array_cnt = vcpus;
  1540. }
  1541. vcca->context_array_valid = vcpus;
  1542. BZERO(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context));
  1543. }
  1544. /*
  1545. * Get pcpu context from pcpu id.
  1546. */
  1547. struct xen_hyper_pcpu_context *
  1548. xen_hyper_id_to_pcpu_context(uint id)
  1549. {
  1550. if (xhpct->context_array == NULL) {
  1551. return NULL;
  1552. }
  1553. if (!xen_hyper_test_pcpu_id(id)) {
  1554. return NULL;
  1555. }
  1556. return &xhpct->context_array[id];
  1557. }
  1558. /*
  1559. * Get pcpu context from pcpu address.
  1560. */
  1561. struct xen_hyper_pcpu_context *
  1562. xen_hyper_pcpu_to_pcpu_context(ulong pcpu)
  1563. {
  1564. struct xen_hyper_pcpu_context *pcc;
  1565. int i;
  1566. uint cpuid;
  1567. if (xhpct->context_array == NULL) {
  1568. return NULL;
  1569. }
  1570. if (!pcpu) {
  1571. return NULL;
  1572. }
  1573. for_cpu_indexes(i, cpuid)
  1574. {
  1575. pcc = &xhpct->context_array[cpuid];
  1576. if (pcpu == pcc->pcpu) {
  1577. return pcc;
  1578. }
  1579. }
  1580. return NULL;
  1581. }
  1582. /*
  1583. * Store pcpu struct contents.
  1584. */
  1585. struct xen_hyper_pcpu_context *
  1586. xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc,
  1587. ulong pcpu, char *pcp)
  1588. {
  1589. pcc->pcpu = pcpu;
  1590. pcc->processor_id =
  1591. UINT(pcp + XEN_HYPER_OFFSET(cpu_info_processor_id));
  1592. pcc->guest_cpu_user_regs = (ulong)(pcpu +
  1593. XEN_HYPER_OFFSET(cpu_info_guest_cpu_user_regs));
  1594. pcc->current_vcpu =
  1595. ULONG(pcp + XEN_HYPER_OFFSET(cpu_info_current_vcpu));
  1596. return pcc;
  1597. }
  1598. /*
  1599. * Store init_tss contents.
  1600. */
  1601. struct xen_hyper_pcpu_context *
  1602. xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc,
  1603. ulong init_tss, char *tss)
  1604. {
  1605. int i;
  1606. uint64_t *ist_p;
  1607. pcc->init_tss = init_tss;
  1608. if (machine_type("X86")) {
  1609. pcc->sp.esp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_esp0));
  1610. } else if (machine_type("X86_64")) {
  1611. pcc->sp.rsp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_rsp0));
  1612. ist_p = (uint64_t *)(tss + XEN_HYPER_OFFSET(tss_struct_ist));
  1613. for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++, ist_p++) {
  1614. pcc->ist[i] = ULONG(ist_p);
  1615. }
  1616. }
  1617. return pcc;
  1618. }
  1619. /*
  1620. * Read pcpu struct.
  1621. */
  1622. char *
  1623. xen_hyper_read_pcpu(ulong pcpu)
  1624. {
  1625. return xen_hyper_fill_pcpu_struct(pcpu, xhpct->pcpu_struct);
  1626. }
  1627. /*
  1628. * Fill pcpu struct.
  1629. */
  1630. char *
  1631. xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct)
  1632. {
  1633. if (!readmem(pcpu, KVADDR, pcpu_struct,
  1634. XEN_HYPER_SIZE(cpu_info), "fill_pcpu_struct",
  1635. ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
  1636. error(WARNING, "cannot fill pcpu_struct.\n");
  1637. return NULL;
  1638. }
  1639. return pcpu_struct;
  1640. }
  1641. /*
  1642. * Allocate pcpu context space.
  1643. */
  1644. void
  1645. xen_hyper_alloc_pcpu_context_space(int pcpus)
  1646. {
  1647. if (xhpct->context_array == NULL) {
  1648. if (!(xhpct->context_array =
  1649. malloc(pcpus * sizeof(struct xen_hyper_pcpu_context)))) {
  1650. error(FATAL, "cannot malloc context array (%d pcpus).",
  1651. pcpus);
  1652. }
  1653. }
  1654. BZERO(xhpct->context_array, pcpus * sizeof(struct xen_hyper_pcpu_context));
  1655. }
  1656. /*
  1657. * Fill cpu_data.
  1658. */
  1659. char *
  1660. xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86)
  1661. {
  1662. ulong cpu_data;
  1663. if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address)
  1664. return NULL;
  1665. cpu_data = xht->cpu_data_address + XEN_HYPER_SIZE(cpuinfo_x86) * idx;
  1666. if (!readmem(cpu_data, KVADDR, cpuinfo_x86, XEN_HYPER_SIZE(cpuinfo_x86),
  1667. "cpu_data", RETURN_ON_ERROR)) {
  1668. error(WARNING, "cannot read cpu_data.\n");
  1669. return NULL;
  1670. }
  1671. return cpuinfo_x86;
  1672. }
  1673. char *
  1674. xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64)
  1675. {
  1676. ulong cpu_data;
  1677. if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address)
  1678. return NULL;
  1679. cpu_data = xen_hyper_per_cpu(xht->cpu_data_address, idx);
  1680. if (!readmem(cpu_data, KVADDR, cpuinfo_ia64, XEN_HYPER_SIZE(cpuinfo_ia64),
  1681. "cpu_data", RETURN_ON_ERROR)) {
  1682. error(WARNING, "cannot read cpu_data.\n");
  1683. return NULL;
  1684. }
  1685. return cpuinfo_ia64;
  1686. }
  1687. /*
  1688. * Return whether vcpu is crashing.
  1689. */
  1690. int
  1691. xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc)
  1692. {
  1693. if (vcc == xht->crashing_vcc)
  1694. return TRUE;
  1695. return FALSE;
  1696. }
  1697. /*
  1698. * Test whether cpu for pcpu id exists.
  1699. */
  1700. int
  1701. xen_hyper_test_pcpu_id(uint pcpu_id)
  1702. {
  1703. ulong *cpumask = xht->cpumask;
  1704. uint i, j;
  1705. if (pcpu_id == XEN_HYPER_PCPU_ID_INVALID ||
  1706. pcpu_id > XEN_HYPER_MAX_CPUS()) {
  1707. return FALSE;
  1708. }
  1709. i = pcpu_id / (sizeof(ulong) * 8);
  1710. j = pcpu_id % (sizeof(ulong) * 8);
  1711. cpumask += i;
  1712. if (*cpumask & (1UL << j)) {
  1713. return TRUE;
  1714. } else {
  1715. return FALSE;
  1716. }
  1717. }
  1718. /*
  1719. * Calculate and return the uptime.
  1720. */
  1721. ulonglong
  1722. xen_hyper_get_uptime_hyper(void)
  1723. {
  1724. ulong jiffies, tmp1, tmp2;
  1725. ulonglong jiffies_64, wrapped;
  1726. if (symbol_exists("jiffies_64")) {
  1727. get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64);
  1728. wrapped = (jiffies_64 & 0xffffffff00000000ULL);
  1729. if (wrapped) {
  1730. wrapped -= 0x100000000ULL;
  1731. jiffies_64 &= 0x00000000ffffffffULL;
  1732. jiffies_64 |= wrapped;
  1733. jiffies_64 += (ulonglong)(300*machdep->hz);
  1734. } else {
  1735. tmp1 = (ulong)(uint)(-300*machdep->hz);
  1736. tmp2 = (ulong)jiffies_64;
  1737. jiffies_64 = (ulonglong)(tmp2 - tmp1);
  1738. }
  1739. } else if (symbol_exists("jiffies")) {
  1740. get_symbol_data("jiffies", sizeof(long), &jiffies);
  1741. jiffies_64 = (ulonglong)jiffies;
  1742. } else {
  1743. jiffies_64 = 0; /* hypervisor does not have uptime */
  1744. }
  1745. return jiffies_64;
  1746. }
  1747. /*
  1748. * Get cpu informatin around.
  1749. */
  1750. void
  1751. xen_hyper_get_cpu_info(void)
  1752. {
  1753. ulong addr, init_begin, init_end;
  1754. ulong *cpumask;
  1755. uint *cpu_idx;
  1756. int i, j, cpus;
  1757. XEN_HYPER_STRUCT_SIZE_INIT(cpumask_t, "cpumask_t");
  1758. if (symbol_exists("nr_cpu_ids"))
  1759. get_symbol_data("nr_cpu_ids", sizeof(uint), &xht->max_cpus);
  1760. else {
  1761. init_begin = symbol_value("__init_begin");
  1762. init_end = symbol_value("__init_end");
  1763. addr = symbol_value("max_cpus");
  1764. if (addr >= init_begin && addr < init_end)
  1765. xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8;
  1766. else {
  1767. get_symbol_data("max_cpus", sizeof(xht->max_cpus), &xht->max_cpus);
  1768. if (XEN_HYPER_SIZE(cpumask_t) * 8 > xht->max_cpus)
  1769. xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8;
  1770. }
  1771. }
  1772. if (xht->cpumask) {
  1773. free(xht->cpumask);
  1774. }
  1775. if((xht->cpumask = malloc(XEN_HYPER_SIZE(cpumask_t))) == NULL) {
  1776. error(FATAL, "cannot malloc cpumask space.\n");
  1777. }
  1778. addr = symbol_value("cpu_present_map");
  1779. if (!readmem(addr, KVADDR, xht->cpumask,
  1780. XEN_HYPER_SIZE(cpumask_t), "cpu_present_map", RETURN_ON_ERROR)) {
  1781. error(FATAL, "cannot read cpu_present_map.\n");
  1782. }
  1783. if (xht->cpu_idxs) {
  1784. free(xht->cpu_idxs);
  1785. }
  1786. if((xht->cpu_idxs = malloc(sizeof(uint) * XEN_HYPER_MAX_CPUS())) == NULL) {
  1787. error(FATAL, "cannot malloc cpu_idxs space.\n");
  1788. }
  1789. memset(xht->cpu_idxs, 0xff, sizeof(uint) * XEN_HYPER_MAX_CPUS());
  1790. for (i = cpus = 0, cpumask = xht->cpumask, cpu_idx = xht->cpu_idxs;
  1791. i < (XEN_HYPER_SIZE(cpumask_t)/sizeof(ulong)); i++, cpumask++) {
  1792. for (j = 0; j < sizeof(ulong) * 8; j++) {
  1793. if (*cpumask & (1UL << j)) {
  1794. *cpu_idx++ = i * sizeof(ulong) * 8 + j;
  1795. cpus++;
  1796. }
  1797. }
  1798. }
  1799. xht->pcpus = cpus;
  1800. }
  1801. /*
  1802. * Calculate the number of physical cpu for x86.
  1803. */
  1804. int
  1805. xen_hyper_x86_get_smp_cpus(void)
  1806. {
  1807. if (xht->pcpus) {
  1808. return xht->pcpus;
  1809. }
  1810. xen_hyper_get_cpu_info();
  1811. return xht->pcpus;
  1812. }
  1813. /*
  1814. * Calculate used memory size for x86.
  1815. */
  1816. uint64_t
  1817. xen_hyper_x86_memory_size(void)
  1818. {
  1819. ulong vaddr;
  1820. if (machdep->memsize) {
  1821. return machdep->memsize;
  1822. }
  1823. vaddr = symbol_value("total_pages");
  1824. if (!readmem(vaddr, KVADDR, &xht->total_pages, sizeof(xht->total_pages),
  1825. "total_pages", RETURN_ON_ERROR)) {
  1826. error(WARNING, "cannot read total_pages.\n");
  1827. }
  1828. xht->sys_pages = xht->total_pages;
  1829. machdep->memsize = (uint64_t)(xht->sys_pages) * (uint64_t)(machdep->pagesize);
  1830. return machdep->memsize;
  1831. }
  1832. /*
  1833. * Calculate the number of physical cpu for ia64.
  1834. */
  1835. int
  1836. xen_hyper_ia64_get_smp_cpus(void)
  1837. {
  1838. return xen_hyper_x86_get_smp_cpus();
  1839. }
  1840. /*
  1841. * Calculate used memory size for ia64.
  1842. */
  1843. uint64_t
  1844. xen_hyper_ia64_memory_size(void)
  1845. {
  1846. return xen_hyper_x86_memory_size();
  1847. }
  1848. /*
  1849. * Calculate and return the speed of the processor.
  1850. */
  1851. ulong
  1852. xen_hyper_ia64_processor_speed(void)
  1853. {
  1854. ulong mhz, proc_freq;
  1855. if (machdep->mhz)
  1856. return(machdep->mhz);
  1857. mhz = 0;
  1858. if (!xht->cpu_data_address ||
  1859. !XEN_HYPER_VALID_STRUCT(cpuinfo_ia64) ||
  1860. XEN_HYPER_INVALID_MEMBER(cpuinfo_ia64_proc_freq))
  1861. return (machdep->mhz = mhz);
  1862. readmem(xen_hyper_per_cpu(xht->cpu_data_address, xht->cpu_idxs[0]) +
  1863. XEN_HYPER_OFFSET(cpuinfo_ia64_proc_freq),
  1864. KVADDR, &proc_freq, sizeof(ulong),
  1865. "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR);
  1866. mhz = proc_freq/1000000;
  1867. return (machdep->mhz = mhz);
  1868. }
  1869. /*
  1870. * Print an aligned string with specified length.
  1871. */
  1872. void
  1873. xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag)
  1874. {
  1875. char buf[XEN_HYPER_CMD_BUFSIZE];
  1876. int sl, r;
  1877. char *s1, *s2;
  1878. sl = strlen(str1);
  1879. if (sl > len) {
  1880. r = 0;
  1881. } else {
  1882. r = len - sl;
  1883. }
  1884. memset(buf, ' ', sizeof(buf));
  1885. buf[r] = '\0';
  1886. if (flag & XEN_HYPER_PRI_L) {
  1887. s1 = str1;
  1888. s2 = buf;
  1889. } else {
  1890. s1 = buf;
  1891. s2 = str1;
  1892. }
  1893. if (str2) {
  1894. fprintf(fp, "%s%s%s", s1, s2, str2);
  1895. } else {
  1896. fprintf(fp, "%s%s", s1, s2);
  1897. }
  1898. if (flag & XEN_HYPER_PRI_LF) {
  1899. fprintf(fp, "\n");
  1900. }
  1901. }
  1902. ulong
  1903. xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpuid)
  1904. {
  1905. struct xen_hyper_pcpu_context *pcc;
  1906. struct xen_hyper_vcpu_context_array *vcca;
  1907. struct xen_hyper_vcpu_context *vcc;
  1908. int i, j;
  1909. if (!xen_hyper_test_pcpu_id(pcpuid))
  1910. return 0;
  1911. pcc = &xhpct->context_array[pcpuid];
  1912. if (pcc->current_vcpu)
  1913. return pcc->current_vcpu;
  1914. for (i = 0, vcca = xhvct->vcpu_context_arrays;
  1915. i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) {
  1916. for (j = 0, vcc = vcca->context_array;
  1917. j < vcca->context_array_cnt; j++, vcc++) {
  1918. if (vcc->processor == pcpuid &&
  1919. vcc->state == XEN_HYPER_RUNSTATE_running) {
  1920. return vcc->vcpu;
  1921. }
  1922. }
  1923. }
  1924. return 0;
  1925. }
  1926. ulong
  1927. xen_hyper_pcpu_to_active_vcpu(ulong pcpu)
  1928. {
  1929. ulong vcpu;
  1930. /* if pcpu is vcpu address, return it. */
  1931. if (pcpu & (~(PAGESIZE() - 1))) {
  1932. return pcpu;
  1933. }
  1934. if(!(vcpu = XEN_HYPER_CURR_VCPU(pcpu)))
  1935. error(FATAL, "invalid pcpu id\n");
  1936. return vcpu;
  1937. }
  1938. void
  1939. xen_hyper_print_bt_header(FILE *out, ulong vcpu, int newline)
  1940. {
  1941. struct xen_hyper_vcpu_context *vcc;
  1942. if (newline)
  1943. fprintf(out, "\n");
  1944. vcc = xen_hyper_vcpu_to_vcpu_context(vcpu);
  1945. if (!vcc)
  1946. error(FATAL, "invalid vcpu\n");
  1947. fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu);
  1948. }
  1949. #endif