VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 104255

Last change on this file since 104255 was 103181, checked in by vboxsync, 10 months ago

VMM/IEM: Liveness analysis, part 1. bugref:10372

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.5 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 103181 2024-02-03 02:13:06Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that does absolutely nothing - for debugging.
86 *
87 * This can be used for artifically increasing the number of calls generated, or
88 * for triggering flushes associated with threaded calls.
89 */
90IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop)
91{
92 RT_NOREF(pVCpu, uParam0, uParam1, uParam2);
93 return VINF_SUCCESS;
94}
95
96
97
98/**
99 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState.
100 */
101DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu)
102{
103#ifdef LOG_ENABLED
104 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
105 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
106 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n"
107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
111 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0,
112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK ));
117#else
118 RT_NOREF(pVCpu);
119#endif
120}
121
122
123/**
124 * Built-in function that logs the current CPU state - for debugging.
125 */
126IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState)
127{
128 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu);
129 RT_NOREF(uParam0, uParam1, uParam2);
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Built-in function that calls a C-implemention function taking zero arguments.
136 */
137IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
138{
139 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
140 uint8_t const cbInstr = (uint8_t)uParam1;
141 RT_NOREF(uParam2);
142 return pfnCImpl(pVCpu, cbInstr);
143}
144
145
146/**
147 * Built-in function that checks for pending interrupts that can be delivered or
148 * forced action flags.
149 *
150 * This triggers after the completion of an instruction, so EIP is already at
151 * the next instruction. If an IRQ or important FF is pending, this will return
152 * a non-zero status that stops TB execution.
153 */
154IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
155{
156 RT_NOREF(uParam0, uParam1, uParam2);
157
158 /*
159 * Check for IRQs and other FFs that needs servicing.
160 */
161 uint64_t fCpu = pVCpu->fLocalForcedActions;
162 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
163 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
164 | VMCPU_FF_TLB_FLUSH
165 | VMCPU_FF_UNHALT );
166 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
167 if (RT_LIKELY( ( !fCpu
168 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
169 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
170 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
171 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
172 return VINF_SUCCESS;
173
174 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
175 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
176 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
177 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
178 return VINF_IEM_REEXEC_BREAK;
179}
180
181
182/**
183 * Built-in function that compares the fExec mask against uParam0.
184 *
185 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
186 * an instruction.
187 */
188IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
189{
190 uint32_t const fExpectedExec = (uint32_t)uParam0;
191 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
192 return VINF_SUCCESS;
193 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
195 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
196 RT_NOREF(uParam1, uParam2);
197 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
198 return VINF_IEM_REEXEC_BREAK;
199}
200
201
202/**
203 * Built-in function that checks for hardware instruction breakpoints.
204 */
205IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
206{
207 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
208 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
209 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
210 return VINF_SUCCESS;
211
212 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
213 {
214 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
215 rcStrict = iemRaiseDebugException(pVCpu);
216 Assert(rcStrict != VINF_SUCCESS);
217 }
218 else
219 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
220 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
221 RT_NOREF(uParam0, uParam1, uParam2);
222 return rcStrict;
223}
224
225
226DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
227{
228 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
229 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
230 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
231 if (idxPage == 0)
232 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
233 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
234 return pTb->aGCPhysPages[idxPage - 1];
235}
236
237
238/**
239 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
240 * number of functions.
241 */
242/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
243 * test, since it would require replacing the default firmware. */
244#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
245 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
246 { /* likely */ } \
247 else \
248 { \
249 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
250 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
251 return iemRaiseGeneralProtectionFault0(pVCpu); \
252 } \
253 } while(0)
254
255/**
256 * Macro that considers whether we need CS.LIM checking after a branch or
257 * crossing over to a new page.
258 */
259#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
260 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
261 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
262 { /* likely */ } \
263 else \
264 { \
265 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
266 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
267 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
268 RT_NOREF(a_pTb, a_cbInstr); \
269 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
270 return VINF_IEM_REEXEC_BREAK; \
271 } \
272 } while(0)
273
274/**
275 * Macro that implements opcode (re-)checking.
276 */
277#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
278 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
279 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
280 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
281 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
282 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
283 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
284 { /* likely */ } \
285 else \
286 { \
287 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
288 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
289 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
290 RT_NOREF(a_cbInstr); \
291 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
292 } \
293 } while(0)
294
295/**
296 * Macro that implements TLB loading and updating pbInstrBuf updating for an
297 * instruction crossing into a new page.
298 *
299 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
300 */
301#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
302 pVCpu->iem.s.pbInstrBuf = NULL; \
303 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
304 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
305 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
306 \
307 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
308 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
309 && pVCpu->iem.s.pbInstrBuf)) \
310 { /* likely */ } \
311 else \
312 { \
313 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
314 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
315 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
316 RT_NOREF(a_cbInstr); \
317 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
318 } \
319 } while(0)
320
321/**
322 * Macro that implements TLB loading and updating pbInstrBuf updating when
323 * branching or when crossing a page on an instruction boundrary.
324 *
325 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
326 * it is an inter-page branch and also check the page offset.
327 *
328 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
329 */
330#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
331 /* Is RIP within the current code page? */ \
332 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
333 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
334 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
335 if (off < pVCpu->iem.s.cbInstrBufTotal) \
336 { \
337 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
338 Assert(pVCpu->iem.s.pbInstrBuf); \
339 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
340 | pTb->aRanges[(a_idxRange)].offPhysPage; \
341 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
342 { /* we're good */ } \
343 /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
344 * If we're preceeded by an indirect jump, there is no reason why the TB \
345 * would be 'obsolete' just because this time around the indirect jump ends \
346 * up at the same offset in a different page. This would be real bad for \
347 * indirect trampolines/validators. */ \
348 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
349 { \
350 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
351 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
352 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
353 RT_NOREF(a_cbInstr); \
354 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
355 return VINF_IEM_REEXEC_BREAK; \
356 } \
357 else \
358 { \
359 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
360 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
361 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
362 RT_NOREF(a_cbInstr); \
363 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
364 } \
365 } \
366 else \
367 { \
368 /* Must translate new RIP. */ \
369 pVCpu->iem.s.pbInstrBuf = NULL; \
370 pVCpu->iem.s.offCurInstrStart = 0; \
371 pVCpu->iem.s.offInstrNextByte = 0; \
372 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
373 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
374 \
375 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
376 | pTb->aRanges[(a_idxRange)].offPhysPage; \
377 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
378 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
379 && pVCpu->iem.s.pbInstrBuf) \
380 { /* likely */ } \
381 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
382 && pVCpu->iem.s.pbInstrBuf) \
383 { \
384 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
385 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
386 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
387 RT_NOREF(a_cbInstr); \
388 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
389 return VINF_IEM_REEXEC_BREAK; \
390 } \
391 else \
392 { \
393 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
394 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
395 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
396 RT_NOREF(a_cbInstr); \
397 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
398 } \
399 } \
400 } while(0)
401
402/**
403 * Macro that implements PC check after a conditional branch.
404 */
405#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
406 /* Is RIP within the current code page? */ \
407 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
408 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
409 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
410 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
411 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
412 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \
413 + (a_offRange); \
414 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
415 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \
416 { /* we're good */ } \
417 else \
418 { \
419 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
420 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
421 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
422 RT_NOREF(a_cbInstr); \
423 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
424 return VINF_IEM_REEXEC_BREAK; \
425 } \
426 } while(0)
427
428
429
430/**
431 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
432 * raising a \#GP(0) if this isn't the case.
433 */
434IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
435{
436 uint32_t const cbInstr = (uint8_t)uParam0;
437 RT_NOREF(uParam1, uParam2);
438 BODY_CHECK_CS_LIM(cbInstr);
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Built-in function for re-checking opcodes and CS.LIM after an instruction
445 * that may have modified them.
446 */
447IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
448{
449 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
450 uint32_t const cbInstr = (uint8_t)uParam0;
451 uint32_t const idxRange = (uint32_t)uParam1;
452 uint32_t const offRange = (uint32_t)uParam2;
453 BODY_CHECK_CS_LIM(cbInstr);
454 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
455 return VINF_SUCCESS;
456}
457
458
459/**
460 * Built-in function for re-checking opcodes after an instruction that may have
461 * modified them.
462 */
463IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
464{
465 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
466 uint32_t const cbInstr = (uint8_t)uParam0;
467 uint32_t const idxRange = (uint32_t)uParam1;
468 uint32_t const offRange = (uint32_t)uParam2;
469 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * Built-in function for re-checking opcodes and considering the need for CS.LIM
476 * checking after an instruction that may have modified them.
477 */
478IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
479{
480 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
481 uint32_t const cbInstr = (uint8_t)uParam0;
482 uint32_t const idxRange = (uint32_t)uParam1;
483 uint32_t const offRange = (uint32_t)uParam2;
484 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
485 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
486 return VINF_SUCCESS;
487}
488
489
490/*
491 * Post-branching checkers.
492 */
493
494/**
495 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
496 * after conditional branching within the same page.
497 *
498 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
499 */
500IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
501{
502 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
503 uint32_t const cbInstr = (uint8_t)uParam0;
504 uint32_t const idxRange = (uint32_t)uParam1;
505 uint32_t const offRange = (uint32_t)uParam2;
506 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
507 BODY_CHECK_CS_LIM(cbInstr);
508 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
509 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
510 //LogFunc(("okay\n"));
511 return VINF_SUCCESS;
512}
513
514
515/**
516 * Built-in function for checking the PC and checking opcodes after conditional
517 * branching within the same page.
518 *
519 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
520 */
521IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
522{
523 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
524 uint32_t const cbInstr = (uint8_t)uParam0;
525 uint32_t const idxRange = (uint32_t)uParam1;
526 uint32_t const offRange = (uint32_t)uParam2;
527 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
528 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
529 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
530 //LogFunc(("okay\n"));
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Built-in function for checking the PC and checking opcodes and considering
537 * the need for CS.LIM checking after conditional branching within the same
538 * page.
539 *
540 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
541 */
542IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
543{
544 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
545 uint32_t const cbInstr = (uint8_t)uParam0;
546 uint32_t const idxRange = (uint32_t)uParam1;
547 uint32_t const offRange = (uint32_t)uParam2;
548 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
549 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
550 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr);
551 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
552 //LogFunc(("okay\n"));
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
559 * transitioning to a different code page.
560 *
561 * The code page transition can either be natural over onto the next page (with
562 * the instruction starting at page offset zero) or by means of branching.
563 *
564 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
565 */
566IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
567{
568 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
569 uint32_t const cbInstr = (uint8_t)uParam0;
570 uint32_t const idxRange = (uint32_t)uParam1;
571 uint32_t const offRange = (uint32_t)uParam2;
572 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
573 BODY_CHECK_CS_LIM(cbInstr);
574 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
575 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
576 //LogFunc(("okay\n"));
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Built-in function for loading TLB and checking opcodes when transitioning to
583 * a different code page.
584 *
585 * The code page transition can either be natural over onto the next page (with
586 * the instruction starting at page offset zero) or by means of branching.
587 *
588 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
589 */
590IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
591{
592 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
593 uint32_t const cbInstr = (uint8_t)uParam0;
594 uint32_t const idxRange = (uint32_t)uParam1;
595 uint32_t const offRange = (uint32_t)uParam2;
596 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
597 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
598 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
599 //LogFunc(("okay\n"));
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * Built-in function for loading TLB and checking opcodes and considering the
606 * need for CS.LIM checking when transitioning to a different code page.
607 *
608 * The code page transition can either be natural over onto the next page (with
609 * the instruction starting at page offset zero) or by means of branching.
610 *
611 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
612 */
613IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
614{
615 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
616 uint32_t const cbInstr = (uint8_t)uParam0;
617 uint32_t const idxRange = (uint32_t)uParam1;
618 uint32_t const offRange = (uint32_t)uParam2;
619 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
620 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
621 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
622 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
623 //LogFunc(("okay\n"));
624 return VINF_SUCCESS;
625}
626
627
628
629/*
630 * Natural page crossing checkers.
631 */
632
633/**
634 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
635 * both pages when transitioning to a different code page.
636 *
637 * This is used when the previous instruction requires revalidation of opcodes
638 * bytes and the current instruction stries a page boundrary with opcode bytes
639 * in both the old and new page.
640 *
641 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
642 */
643IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
644{
645 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
646 uint32_t const cbInstr = (uint8_t)uParam0;
647 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
648 uint32_t const idxRange1 = (uint32_t)uParam1;
649 uint32_t const offRange1 = (uint32_t)uParam2;
650 uint32_t const idxRange2 = idxRange1 + 1;
651 BODY_CHECK_CS_LIM(cbInstr);
652 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
653 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
654 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Built-in function for loading TLB and checking opcodes on both pages when
661 * transitioning to a different code page.
662 *
663 * This is used when the previous instruction requires revalidation of opcodes
664 * bytes and the current instruction stries a page boundrary with opcode bytes
665 * in both the old and new page.
666 *
667 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
668 */
669IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
670{
671 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
672 uint32_t const cbInstr = (uint8_t)uParam0;
673 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
674 uint32_t const idxRange1 = (uint32_t)uParam1;
675 uint32_t const offRange1 = (uint32_t)uParam2;
676 uint32_t const idxRange2 = idxRange1 + 1;
677 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
678 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
679 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
680 return VINF_SUCCESS;
681}
682
683
684/**
685 * Built-in function for loading TLB and checking opcodes on both pages and
686 * considering the need for CS.LIM checking when transitioning to a different
687 * code page.
688 *
689 * This is used when the previous instruction requires revalidation of opcodes
690 * bytes and the current instruction stries a page boundrary with opcode bytes
691 * in both the old and new page.
692 *
693 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
694 */
695IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
696{
697 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
698 uint32_t const cbInstr = (uint8_t)uParam0;
699 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
700 uint32_t const idxRange1 = (uint32_t)uParam1;
701 uint32_t const offRange1 = (uint32_t)uParam2;
702 uint32_t const idxRange2 = idxRange1 + 1;
703 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
704 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
705 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
706 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
707 return VINF_SUCCESS;
708}
709
710
711/**
712 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
713 * advancing naturally to a different code page.
714 *
715 * Only opcodes on the new page is checked.
716 *
717 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
718 */
719IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
720{
721 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
722 uint32_t const cbInstr = (uint8_t)uParam0;
723 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
724 uint32_t const idxRange1 = (uint32_t)uParam1;
725 //uint32_t const offRange1 = (uint32_t)uParam2;
726 uint32_t const idxRange2 = idxRange1 + 1;
727 BODY_CHECK_CS_LIM(cbInstr);
728 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
729 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
730 RT_NOREF(uParam2);
731 return VINF_SUCCESS;
732}
733
734
735/**
736 * Built-in function for loading TLB and checking opcodes when advancing
737 * naturally to a different code page.
738 *
739 * Only opcodes on the new page is checked.
740 *
741 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
742 */
743IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
744{
745 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
746 uint32_t const cbInstr = (uint8_t)uParam0;
747 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
748 uint32_t const idxRange1 = (uint32_t)uParam1;
749 //uint32_t const offRange1 = (uint32_t)uParam2;
750 uint32_t const idxRange2 = idxRange1 + 1;
751 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
752 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
753 RT_NOREF(uParam2);
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * Built-in function for loading TLB and checking opcodes and considering the
760 * need for CS.LIM checking when advancing naturally to a different code page.
761 *
762 * Only opcodes on the new page is checked.
763 *
764 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
765 */
766IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
767{
768 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
769 uint32_t const cbInstr = (uint8_t)uParam0;
770 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
771 uint32_t const idxRange1 = (uint32_t)uParam1;
772 //uint32_t const offRange1 = (uint32_t)uParam2;
773 uint32_t const idxRange2 = idxRange1 + 1;
774 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
775 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
776 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
777 RT_NOREF(uParam2);
778 return VINF_SUCCESS;
779}
780
781
782/**
783 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
784 * advancing naturally to a different code page with first instr at byte 0.
785 *
786 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
787 */
788IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
789{
790 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
791 uint32_t const cbInstr = (uint8_t)uParam0;
792 uint32_t const idxRange = (uint32_t)uParam1;
793 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
794 BODY_CHECK_CS_LIM(cbInstr);
795 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
796 Assert(pVCpu->iem.s.offCurInstrStart == 0);
797 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
798 return VINF_SUCCESS;
799}
800
801
802/**
803 * Built-in function for loading TLB and checking opcodes when advancing
804 * naturally to a different code page with first instr at byte 0.
805 *
806 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
807 */
808IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
809{
810 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
811 uint32_t const cbInstr = (uint8_t)uParam0;
812 uint32_t const idxRange = (uint32_t)uParam1;
813 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
814 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
815 Assert(pVCpu->iem.s.offCurInstrStart == 0);
816 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
817 return VINF_SUCCESS;
818}
819
820
821/**
822 * Built-in function for loading TLB and checking opcodes and considering the
823 * need for CS.LIM checking when advancing naturally to a different code page
824 * with first instr at byte 0.
825 *
826 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
827 */
828IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
829{
830 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
831 uint32_t const cbInstr = (uint8_t)uParam0;
832 uint32_t const idxRange = (uint32_t)uParam1;
833 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
834 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
835 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
836 Assert(pVCpu->iem.s.offCurInstrStart == 0);
837 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
838 return VINF_SUCCESS;
839}
840
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette