VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiPayloadPkg/UefiPayloadEntry/Ia32/DxeLoadFunc.c@ 105381

Last change on this file since 105381 was 101291, checked in by vboxsync, 19 months ago

EFI/FirmwareNew: Make edk2-stable202308 build on all supported platforms (using gcc at least, msvc not tested yet), bugref:4643

  • Property svn:eol-style set to native
File size: 11.9 KB
Line 
1/** @file
2 Ia32-specific functionality for DxeLoad.
3
4Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include <PiPei.h>
12#include <Library/BaseLib.h>
13#include <Library/DebugLib.h>
14#include <Library/BaseMemoryLib.h>
15#include <Library/MemoryAllocationLib.h>
16#include <Library/PcdLib.h>
17#include <Library/HobLib.h>
18#include "VirtualMemory.h"
19#include "UefiPayloadEntry.h"
20
21#define STACK_SIZE 0x20000
22#define IDT_ENTRY_COUNT 32
23
24typedef struct _X64_IDT_TABLE {
25 //
26 // Reserved 4 bytes preceding PeiService and IdtTable,
27 // since IDT base address should be 8-byte alignment.
28 //
29 UINT32 Reserved;
30 CONST EFI_PEI_SERVICES **PeiService;
31 X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];
32} X64_IDT_TABLE;
33
34//
35// Global Descriptor Table (GDT)
36//
37GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {
38 /* selector { Global Segment Descriptor } */
39 /* 0x00 */ {
40 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
41 }, // null descriptor
42 /* 0x08 */ {
43 { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
44 }, // linear data segment descriptor
45 /* 0x10 */ {
46 { 0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
47 }, // linear code segment descriptor
48 /* 0x18 */ {
49 { 0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
50 }, // system data segment descriptor
51 /* 0x20 */ {
52 { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
53 }, // system code segment descriptor
54 /* 0x28 */ {
55 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
56 }, // spare segment descriptor
57 /* 0x30 */ {
58 { 0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0 }
59 }, // system data segment descriptor
60 /* 0x38 */ {
61 { 0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0 }
62 }, // system code segment descriptor
63 /* 0x40 */ {
64 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
65 }, // spare segment descriptor
66};
67
68//
69// IA32 Gdt register
70//
71GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt = {
72 sizeof (gGdtEntries) - 1,
73 (UINTN)gGdtEntries
74};
75
76GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {
77 sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,
78 0
79};
80
81/**
82 Allocates and fills in the Page Directory and Page Table Entries to
83 establish a 4G page table.
84
85 @param[in] StackBase Stack base address.
86 @param[in] StackSize Stack size.
87
88 @return The address of page table.
89
90**/
91UINTN
92Create4GPageTablesIa32Pae (
93 IN EFI_PHYSICAL_ADDRESS StackBase,
94 IN UINTN StackSize
95 )
96{
97 UINT8 PhysicalAddressBits;
98 EFI_PHYSICAL_ADDRESS PhysicalAddress;
99 UINTN IndexOfPdpEntries;
100 UINTN IndexOfPageDirectoryEntries;
101 UINT32 NumberOfPdpEntriesNeeded;
102 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
103 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
104 PAGE_TABLE_ENTRY *PageDirectoryEntry;
105 UINTN TotalPagesNum;
106 UINTN PageAddress;
107 UINT64 AddressEncMask;
108
109 //
110 // Make sure AddressEncMask is contained to smallest supported address field
111 //
112 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
113
114 PhysicalAddressBits = 32;
115
116 //
117 // Calculate the table entries needed.
118 //
119 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
120
121 TotalPagesNum = NumberOfPdpEntriesNeeded + 1;
122 PageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);
123 ASSERT (PageAddress != 0);
124
125 PageMap = (VOID *)PageAddress;
126 PageAddress += SIZE_4KB;
127
128 PageDirectoryPointerEntry = PageMap;
129 PhysicalAddress = 0;
130
131 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
132 //
133 // Each Directory Pointer entries points to a page of Page Directory entires.
134 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
135 //
136 PageDirectoryEntry = (VOID *)PageAddress;
137 PageAddress += SIZE_4KB;
138
139 //
140 // Fill in a Page Directory Pointer Entries
141 //
142 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
143 PageDirectoryPointerEntry->Bits.Present = 1;
144
145 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {
146 if ( (IsNullDetectionEnabled () && (PhysicalAddress == 0))
147 || ( (PhysicalAddress < StackBase + StackSize)
148 && ((PhysicalAddress + SIZE_2MB) > StackBase)))
149 {
150 //
151 // Need to split this 2M page that covers stack range.
152 //
153 Split2MPageTo4K (PhysicalAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, 0, 0);
154 } else {
155 //
156 // Fill in the Page Directory entries
157 //
158 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress | AddressEncMask;
159 PageDirectoryEntry->Bits.ReadWrite = 1;
160 PageDirectoryEntry->Bits.Present = 1;
161 PageDirectoryEntry->Bits.MustBe1 = 1;
162 }
163 }
164 }
165
166 for ( ; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
167 ZeroMem (
168 PageDirectoryPointerEntry,
169 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
170 );
171 }
172
173 //
174 // Protect the page table by marking the memory used for page table to be
175 // read-only.
176 //
177 EnablePageTableProtection ((UINTN)PageMap, FALSE);
178
179 return (UINTN)PageMap;
180}
181
182/**
183 The function will check if IA32 PAE is supported.
184
185 @retval TRUE IA32 PAE is supported.
186 @retval FALSE IA32 PAE is not supported.
187
188**/
189BOOLEAN
190IsIa32PaeSupport (
191 VOID
192 )
193{
194 UINT32 RegEax;
195 UINT32 RegEdx;
196 BOOLEAN Ia32PaeSupport;
197
198 Ia32PaeSupport = FALSE;
199 AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);
200 if (RegEax >= 0x1) {
201 AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);
202 if ((RegEdx & BIT6) != 0) {
203 Ia32PaeSupport = TRUE;
204 }
205 }
206
207 return Ia32PaeSupport;
208}
209
210/**
211 The function will check if page table should be setup or not.
212
213 @retval TRUE Page table should be created.
214 @retval FALSE Page table should not be created.
215
216**/
217BOOLEAN
218ToBuildPageTable (
219 VOID
220 )
221{
222 if (!IsIa32PaeSupport ()) {
223 return FALSE;
224 }
225
226 if (IsNullDetectionEnabled ()) {
227 return TRUE;
228 }
229
230 if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {
231 return TRUE;
232 }
233
234 if (PcdGetBool (PcdCpuStackGuard)) {
235 return TRUE;
236 }
237
238 if (IsEnableNonExecNeeded ()) {
239 return TRUE;
240 }
241
242 return FALSE;
243}
244
245/**
246 Transfers control to DxeCore.
247
248 This function performs a CPU architecture specific operations to execute
249 the entry point of DxeCore with the parameters of HobList.
250
251 @param DxeCoreEntryPoint The entry point of DxeCore.
252 @param HobList The start of HobList passed to DxeCore.
253
254**/
255VOID
256HandOffToDxeCore (
257 IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,
258 IN EFI_PEI_HOB_POINTERS HobList
259 )
260{
261 EFI_PHYSICAL_ADDRESS BaseOfStack;
262 EFI_PHYSICAL_ADDRESS TopOfStack;
263 UINTN PageTables;
264 X64_IDT_GATE_DESCRIPTOR *IdtTable;
265 UINTN SizeOfTemplate;
266 VOID *TemplateBase;
267 EFI_PHYSICAL_ADDRESS VectorAddress;
268 UINT32 Index;
269 X64_IDT_TABLE *IdtTableForX64;
270
271 //
272 // Clear page 0 and mark it as allocated if NULL pointer detection is enabled.
273 //
274 if (IsNullDetectionEnabled ()) {
275 ClearFirst4KPage (HobList.Raw);
276 BuildMemoryAllocationHob (0, EFI_PAGES_TO_SIZE (1), EfiBootServicesData);
277 }
278
279 BaseOfStack = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES (STACK_SIZE));
280 ASSERT (BaseOfStack != 0);
281
282 if (FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {
283 //
284 // Compute the top of the stack we were allocated, which is used to load X64 dxe core.
285 // Pre-allocate a 32 bytes which confroms to x64 calling convention.
286 //
287 // The first four parameters to a function are passed in rcx, rdx, r8 and r9.
288 // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the
289 // register parameters is reserved on the stack, in case the called function
290 // wants to spill them; this is important if the function is variadic.
291 //
292 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;
293
294 //
295 // x64 Calling Conventions requires that the stack must be aligned to 16 bytes
296 //
297 TopOfStack = (EFI_PHYSICAL_ADDRESS)(UINTN)ALIGN_POINTER (TopOfStack, 16);
298
299 //
300 // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA
301 // memory, it may be corrupted when copying FV to high-end memory
302 //
303 AsmWriteGdtr (&gGdt);
304 //
305 // Create page table and save PageMapLevel4 to CR3
306 //
307 PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE, 0, 0);
308
309 //
310 // Paging might be already enabled. To avoid conflict configuration,
311 // disable paging first anyway.
312 //
313 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));
314 AsmWriteCr3 (PageTables);
315
316 //
317 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
318 //
319 UpdateStackHob (BaseOfStack, STACK_SIZE);
320
321 SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);
322
323 VectorAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES (sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT));
324 ASSERT (VectorAddress != 0);
325
326 //
327 // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that
328 // it may not be gotten correctly after IDT register is re-written.
329 //
330 IdtTableForX64 = (X64_IDT_TABLE *)(UINTN)VectorAddress;
331 IdtTableForX64->PeiService = NULL;
332
333 VectorAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)(IdtTableForX64 + 1);
334 IdtTable = IdtTableForX64->IdtTable;
335 for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {
336 IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;
337 IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;
338 IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;
339
340 IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16)VectorAddress;
341 IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16)(RShiftU64 (VectorAddress, 16));
342 IdtTable[Index].Offset32To63 = (UINT32)(RShiftU64 (VectorAddress, 32));
343 IdtTable[Index].Reserved = 0;
344
345 CopyMem ((VOID *)(UINTN)VectorAddress, TemplateBase, SizeOfTemplate);
346 AsmVectorFixup ((VOID *)(UINTN)VectorAddress, (UINT8)Index);
347
348 VectorAddress += SizeOfTemplate;
349 }
350
351 gLidtDescriptor.Base = (UINTN)IdtTable;
352
353 AsmWriteIdtr (&gLidtDescriptor);
354
355 DEBUG ((
356 DEBUG_INFO,
357 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
358 __func__,
359 BaseOfStack,
360 STACK_SIZE
361 ));
362
363 //
364 // Go to Long Mode and transfer control to DxeCore.
365 // Interrupts will not get turned on until the CPU AP is loaded.
366 // Call x64 drivers passing in single argument, a pointer to the HOBs.
367 //
368 AsmEnablePaging64 (
369 SYS_CODE64_SEL,
370 DxeCoreEntryPoint,
371 (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw),
372 0,
373 TopOfStack
374 );
375 } else {
376 // 32bit UEFI payload could be supported if required later.
377 DEBUG ((DEBUG_ERROR, "NOT support 32bit UEFI payload\n"));
378 ASSERT (FALSE);
379 CpuDeadLoop ();
380 }
381}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette