1 | #------------------------------------------------------------------------------
|
---|
2 | #
|
---|
3 | # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
|
---|
4 | # This program and the accompanying materials
|
---|
5 | # are licensed and made available under the terms and conditions of the BSD License
|
---|
6 | # which accompanies this distribution. The full text of the license may be found at
|
---|
7 | # http://opensource.org/licenses/bsd-license.php.
|
---|
8 | #
|
---|
9 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
---|
10 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
---|
11 | #
|
---|
12 | # Module Name:
|
---|
13 | #
|
---|
14 | # SmiEntry.S
|
---|
15 | #
|
---|
16 | # Abstract:
|
---|
17 | #
|
---|
18 | # Code template of the SMI handler for a particular processor
|
---|
19 | #
|
---|
20 | #------------------------------------------------------------------------------
|
---|
21 |
|
---|
22 | ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)
|
---|
23 | ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
|
---|
24 | ASM_GLOBAL ASM_PFX(gSmiCr3)
|
---|
25 | ASM_GLOBAL ASM_PFX(gSmiStack)
|
---|
26 | ASM_GLOBAL ASM_PFX(gSmbase)
|
---|
27 | ASM_GLOBAL ASM_PFX(mXdSupported)
|
---|
28 | ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
|
---|
29 |
|
---|
30 | .equ MSR_IA32_MISC_ENABLE, 0x1A0
|
---|
31 | .equ MSR_EFER, 0xc0000080
|
---|
32 | .equ MSR_EFER_XD, 0x800
|
---|
33 |
|
---|
34 | #
|
---|
35 | # Constants relating to PROCESSOR_SMM_DESCRIPTOR
|
---|
36 | #
|
---|
37 | .equ DSC_OFFSET, 0xfb00
|
---|
38 | .equ DSC_GDTPTR, 0x30
|
---|
39 | .equ DSC_GDTSIZ, 0x38
|
---|
40 | .equ DSC_CS, 14
|
---|
41 | .equ DSC_DS, 16
|
---|
42 | .equ DSC_SS, 18
|
---|
43 | .equ DSC_OTHERSEG, 20
|
---|
44 | #
|
---|
45 | # Constants relating to CPU State Save Area
|
---|
46 | #
|
---|
47 | .equ SSM_DR6, 0xffd0
|
---|
48 | .equ SSM_DR7, 0xffc8
|
---|
49 |
|
---|
50 | .equ PROTECT_MODE_CS, 0x08
|
---|
51 | .equ PROTECT_MODE_DS, 0x20
|
---|
52 | .equ LONG_MODE_CS, 0x38
|
---|
53 | .equ TSS_SEGMENT, 0x40
|
---|
54 | .equ GDT_SIZE, 0x50
|
---|
55 |
|
---|
56 | .text
|
---|
57 |
|
---|
58 | ASM_PFX(gcSmiHandlerTemplate):
|
---|
59 |
|
---|
60 | _SmiEntryPoint:
|
---|
61 | #
|
---|
62 | # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
|
---|
63 | # bit addressing mode. And that coincidence has been used in the following
|
---|
64 | # "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
|
---|
65 | # base address register, it is actually BX that is referenced.
|
---|
66 | #
|
---|
67 | .byte 0xbb # mov bx, imm16
|
---|
68 | .word _GdtDesc - _SmiEntryPoint + 0x8000
|
---|
69 | #
|
---|
70 | # fix GDT descriptor
|
---|
71 | #
|
---|
72 | .byte 0x2e,0xa1 # mov ax, cs:[offset16]
|
---|
73 | .word DSC_OFFSET + DSC_GDTSIZ
|
---|
74 | .byte 0x48 # dec ax
|
---|
75 | .byte 0x2e
|
---|
76 | movl %eax, (%rdi) # mov cs:[bx], ax
|
---|
77 | .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
|
---|
78 | .word DSC_OFFSET + DSC_GDTPTR
|
---|
79 | .byte 0x2e
|
---|
80 | movw %ax, 2(%rdi)
|
---|
81 | .byte 0x66,0x2e
|
---|
82 | lgdt (%rdi)
|
---|
83 | #
|
---|
84 | # Patch ProtectedMode Segment
|
---|
85 | #
|
---|
86 | .byte 0xb8
|
---|
87 | .word PROTECT_MODE_CS
|
---|
88 | .byte 0x2e
|
---|
89 | movl %eax, -2(%rdi)
|
---|
90 | #
|
---|
91 | # Patch ProtectedMode entry
|
---|
92 | #
|
---|
93 | .byte 0x66, 0xbf # mov edi, SMBASE
|
---|
94 | ASM_PFX(gSmbase): .space 4
|
---|
95 | lea ((ProtectedMode - _SmiEntryPoint) + 0x8000)(%edi), %ax
|
---|
96 | .byte 0x2e
|
---|
97 | movw %ax, -6(%rdi)
|
---|
98 | #
|
---|
99 | # Switch into ProtectedMode
|
---|
100 | #
|
---|
101 | movq %cr0, %rbx
|
---|
102 | .byte 0x66
|
---|
103 | andl $0x9ffafff3, %ebx
|
---|
104 | .byte 0x66
|
---|
105 | orl $0x00000023, %ebx
|
---|
106 |
|
---|
107 | movq %rbx, %cr0
|
---|
108 | .byte 0x66, 0xea
|
---|
109 | .space 6
|
---|
110 |
|
---|
111 | _GdtDesc: .space 6
|
---|
112 |
|
---|
113 | ProtectedMode:
|
---|
114 | movw $PROTECT_MODE_DS, %ax
|
---|
115 | movl %eax, %ds
|
---|
116 | movl %eax, %es
|
---|
117 | movl %eax, %fs
|
---|
118 | movl %eax, %gs
|
---|
119 | movl %eax, %ss
|
---|
120 | .byte 0xbc # mov esp, imm32
|
---|
121 | ASM_PFX(gSmiStack): .space 4
|
---|
122 | jmp ProtFlatMode
|
---|
123 |
|
---|
124 | ProtFlatMode:
|
---|
125 | .byte 0xb8
|
---|
126 | ASM_PFX(gSmiCr3): .space 4
|
---|
127 | movq %rax, %cr3
|
---|
128 | movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3
|
---|
129 | movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.
|
---|
130 | # Load TSS
|
---|
131 | subl $8, %esp # reserve room in stack
|
---|
132 | sgdt (%rsp)
|
---|
133 | movl 2(%rsp), %eax # eax = GDT base
|
---|
134 | addl $8, %esp
|
---|
135 | movb $0x89, %dl
|
---|
136 | movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
|
---|
137 | movl $TSS_SEGMENT, %eax
|
---|
138 | ltr %ax
|
---|
139 |
|
---|
140 | # enable NXE if supported
|
---|
141 | .byte 0xb0 # mov al, imm8
|
---|
142 | ASM_PFX(mXdSupported): .byte 1
|
---|
143 | cmpb $0, %al
|
---|
144 | jz SkipNxe
|
---|
145 | #
|
---|
146 | # Check XD disable bit
|
---|
147 | #
|
---|
148 | movl $MSR_IA32_MISC_ENABLE, %ecx
|
---|
149 | rdmsr
|
---|
150 | subl $4, %esp
|
---|
151 | pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]
|
---|
152 | testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
|
---|
153 | jz L13
|
---|
154 | andw $0x0FFFB, %dx # clear XD Disable bit if it is set
|
---|
155 | wrmsr
|
---|
156 | L13:
|
---|
157 | movl $MSR_EFER, %ecx
|
---|
158 | rdmsr
|
---|
159 | orw $MSR_EFER_XD,%ax # enable NXE
|
---|
160 | wrmsr
|
---|
161 | jmp NxeDone
|
---|
162 | SkipNxe:
|
---|
163 | subl $8, %esp
|
---|
164 | NxeDone:
|
---|
165 |
|
---|
166 | #
|
---|
167 | # Switch to LongMode
|
---|
168 | #
|
---|
169 | pushq $LONG_MODE_CS # push cs hardcore here
|
---|
170 | call Base # push return address for retf later
|
---|
171 | Base:
|
---|
172 | addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg
|
---|
173 |
|
---|
174 | movl $MSR_EFER, %ecx
|
---|
175 | rdmsr
|
---|
176 | orb $1,%ah # enable LME
|
---|
177 | wrmsr
|
---|
178 | movq %cr0, %rbx
|
---|
179 | orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
|
---|
180 | movq %rbx, %cr0
|
---|
181 | retf
|
---|
182 | LongMode: # long mode (64-bit code) starts here
|
---|
183 | movabsq $ASM_PFX(gSmiHandlerIdtr), %rax
|
---|
184 | lidt (%rax)
|
---|
185 | lea (DSC_OFFSET)(%rdi), %ebx
|
---|
186 | movw DSC_DS(%rbx), %ax
|
---|
187 | movl %eax,%ds
|
---|
188 | movw DSC_OTHERSEG(%rbx), %ax
|
---|
189 | movl %eax,%es
|
---|
190 | movl %eax,%fs
|
---|
191 | movl %eax,%gs
|
---|
192 | movw DSC_SS(%rbx), %ax
|
---|
193 | movl %eax,%ss
|
---|
194 | # jmp _SmiHandler ; instruction is not needed
|
---|
195 |
|
---|
196 | _SmiHandler:
|
---|
197 | movq 8(%rsp), %rbx
|
---|
198 | # Save FP registers
|
---|
199 |
|
---|
200 | subq $0x200, %rsp
|
---|
201 | .byte 0x48 # FXSAVE64
|
---|
202 | fxsave (%rsp)
|
---|
203 |
|
---|
204 | addq $-0x20, %rsp
|
---|
205 |
|
---|
206 | movq %rbx, %rcx
|
---|
207 | movabsq $ASM_PFX(CpuSmmDebugEntry), %rax
|
---|
208 | call *%rax
|
---|
209 |
|
---|
210 | movq %rbx, %rcx
|
---|
211 | movabsq $ASM_PFX(SmiRendezvous), %rax
|
---|
212 | call *%rax
|
---|
213 |
|
---|
214 | movq %rbx, %rcx
|
---|
215 | movabsq $ASM_PFX(CpuSmmDebugExit), %rax
|
---|
216 | call *%rax
|
---|
217 |
|
---|
218 | addq $0x20, %rsp
|
---|
219 |
|
---|
220 | #
|
---|
221 | # Restore FP registers
|
---|
222 | #
|
---|
223 | .byte 0x48 # FXRSTOR64
|
---|
224 | fxrstor (%rsp)
|
---|
225 |
|
---|
226 | addq $0x200, %rsp
|
---|
227 |
|
---|
228 | movabsq $ASM_PFX(mXdSupported), %rax
|
---|
229 | movb (%rax), %al
|
---|
230 | cmpb $0, %al
|
---|
231 | jz L16
|
---|
232 | popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
|
---|
233 | testl $BIT2, %edx
|
---|
234 | jz L16
|
---|
235 | movl $MSR_IA32_MISC_ENABLE, %ecx
|
---|
236 | rdmsr
|
---|
237 | orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
|
---|
238 | wrmsr
|
---|
239 |
|
---|
240 | L16:
|
---|
241 | rsm
|
---|
242 |
|
---|
243 | ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
|
---|