capture_context_mac_test.cc 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // Copyright 2014 The Crashpad Authors. All rights reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "client/capture_context_mac.h"
  15. #include <mach/mach.h>
  16. #include <stdint.h>
  17. #include <algorithm>
  18. #include "build/build_config.h"
  19. #include "gtest/gtest.h"
  20. #include "util/misc/address_sanitizer.h"
  21. #include "util/misc/implicit_cast.h"
  22. namespace crashpad {
  23. namespace test {
  24. namespace {
  25. // If the context structure has fields that tell whether it’s valid, such as
  26. // magic numbers or size fields, sanity-checks those fields for validity with
  27. // fatal gtest assertions. For other fields, where it’s possible to reason about
  28. // their validity based solely on their contents, sanity-checks via nonfatal
  29. // gtest assertions.
  30. void SanityCheckContext(const NativeCPUContext& context) {
  31. #if defined(ARCH_CPU_X86)
  32. ASSERT_EQ(implicit_cast<thread_state_flavor_t>(context.tsh.flavor),
  33. implicit_cast<thread_state_flavor_t>(x86_THREAD_STATE32));
  34. ASSERT_EQ(implicit_cast<uint32_t>(context.tsh.count),
  35. implicit_cast<uint32_t>(x86_THREAD_STATE32_COUNT));
  36. #elif defined(ARCH_CPU_X86_64)
  37. ASSERT_EQ(implicit_cast<thread_state_flavor_t>(context.tsh.flavor),
  38. implicit_cast<thread_state_flavor_t>(x86_THREAD_STATE64));
  39. ASSERT_EQ(implicit_cast<uint32_t>(context.tsh.count),
  40. implicit_cast<uint32_t>(x86_THREAD_STATE64_COUNT));
  41. #endif
  42. #if defined(ARCH_CPU_X86_FAMILY)
  43. // The segment registers are only capable of storing 16-bit quantities, but
  44. // the context structure provides native integer-width fields for them. Ensure
  45. // that the high bits are all clear.
  46. //
  47. // Many bit positions in the flags register are reserved and will always read
  48. // a known value. Most reserved bits are always 0, but bit 1 is always 1.
  49. // Check that the reserved bits are all set to their expected values. Note
  50. // that the set of reserved bits may be relaxed over time with newer CPUs, and
  51. // that this test may need to be changed to reflect these developments. The
  52. // current set of reserved bits are 1, 3, 5, 15, and 22 and higher. See Intel
  53. // Software Developer’s Manual, Volume 1: Basic Architecture (253665-051),
  54. // 3.4.3 “EFLAGS Register”, and AMD Architecture Programmer’s Manual, Volume
  55. // 2: System Programming (24593-3.24), 3.1.6 “RFLAGS Register”.
  56. #if defined(ARCH_CPU_X86)
  57. EXPECT_EQ(context.uts.ts32.__cs & ~0xffff, 0u);
  58. EXPECT_EQ(context.uts.ts32.__ds & ~0xffff, 0u);
  59. EXPECT_EQ(context.uts.ts32.__es & ~0xffff, 0u);
  60. EXPECT_EQ(context.uts.ts32.__fs & ~0xffff, 0u);
  61. EXPECT_EQ(context.uts.ts32.__gs & ~0xffff, 0u);
  62. EXPECT_EQ(context.uts.ts32.__ss & ~0xffff, 0u);
  63. EXPECT_EQ(context.uts.ts32.__eflags & 0xffc0802a, 2u);
  64. #elif defined(ARCH_CPU_X86_64)
  65. EXPECT_EQ(context.uts.ts64.__cs & ~UINT64_C(0xffff), 0u);
  66. EXPECT_EQ(context.uts.ts64.__fs & ~UINT64_C(0xffff), 0u);
  67. EXPECT_EQ(context.uts.ts64.__gs & ~UINT64_C(0xffff), 0u);
  68. EXPECT_EQ(context.uts.ts64.__rflags & UINT64_C(0xffffffffffc0802a), 2u);
  69. #endif
  70. #endif
  71. }
  72. // A CPU-independent function to return the program counter.
  73. uintptr_t ProgramCounterFromContext(const NativeCPUContext& context) {
  74. #if defined(ARCH_CPU_X86)
  75. return context.uts.ts32.__eip;
  76. #elif defined(ARCH_CPU_X86_64)
  77. return context.uts.ts64.__rip;
  78. #endif
  79. }
  80. // A CPU-independent function to return the stack pointer.
  81. uintptr_t StackPointerFromContext(const NativeCPUContext& context) {
  82. #if defined(ARCH_CPU_X86)
  83. return context.uts.ts32.__esp;
  84. #elif defined(ARCH_CPU_X86_64)
  85. return context.uts.ts64.__rsp;
  86. #endif
  87. }
  88. void TestCaptureContext() {
  89. NativeCPUContext context_1;
  90. CaptureContext(&context_1);
  91. {
  92. SCOPED_TRACE("context_1");
  93. ASSERT_NO_FATAL_FAILURE(SanityCheckContext(context_1));
  94. }
  95. // The program counter reference value is this function’s address. The
  96. // captured program counter should be slightly greater than or equal to the
  97. // reference program counter.
  98. uintptr_t pc = ProgramCounterFromContext(context_1);
  99. #if !defined(ADDRESS_SANITIZER)
  100. // AddressSanitizer can cause enough code bloat that the “nearby” check would
  101. // likely fail.
  102. const uintptr_t kReferencePC =
  103. reinterpret_cast<uintptr_t>(TestCaptureContext);
  104. EXPECT_LT(pc - kReferencePC, 64u);
  105. #endif // !defined(ADDRESS_SANITIZER)
  106. // Declare sp and context_2 here because all local variables need to be
  107. // declared before computing the stack pointer reference value, so that the
  108. // reference value can be the lowest value possible.
  109. uintptr_t sp;
  110. NativeCPUContext context_2;
  111. // The stack pointer reference value is the lowest address of a local variable
  112. // in this function. The captured program counter will be slightly less than
  113. // or equal to the reference stack pointer.
  114. const uintptr_t kReferenceSP =
  115. std::min(std::min(reinterpret_cast<uintptr_t>(&context_1),
  116. reinterpret_cast<uintptr_t>(&context_2)),
  117. std::min(reinterpret_cast<uintptr_t>(&pc),
  118. reinterpret_cast<uintptr_t>(&sp)));
  119. sp = StackPointerFromContext(context_1);
  120. EXPECT_LT(kReferenceSP - sp, 512u);
  121. // Capture the context again, expecting that the stack pointer stays the same
  122. // and the program counter increases. Strictly speaking, there’s no guarantee
  123. // that these conditions will hold, although they do for known compilers even
  124. // under typical optimization.
  125. CaptureContext(&context_2);
  126. {
  127. SCOPED_TRACE("context_2");
  128. ASSERT_NO_FATAL_FAILURE(SanityCheckContext(context_2));
  129. }
  130. EXPECT_EQ(StackPointerFromContext(context_2), sp);
  131. EXPECT_GT(ProgramCounterFromContext(context_2), pc);
  132. }
  133. TEST(CaptureContextMac, CaptureContext) {
  134. ASSERT_NO_FATAL_FAILURE(TestCaptureContext());
  135. }
  136. } // namespace
  137. } // namespace test
  138. } // namespace crashpad