VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 73299

Last change on this file since 73299 was 73293, checked in by vboxsync, 6 years ago

VMM, SUPDrv: Nested VMX: bugref:9180 Read VMX true control MSRs, dump them. Remove pVM->hm.cpuid as we for a long time now
have cpum.ro.HostFeatures available. Related cleanups and simplifications.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.7 KB
Line 
1/* $Id: HMAll.cpp 73293 2018-07-21 15:11:53Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/hm_vmx.h>
29#include <VBox/vmm/hm_svm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <iprt/param.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/thread.h>
37#include <iprt/x86.h>
38#include <iprt/asm-amd64-x86.h>
39
40
41/**
42 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
43 *
44 * @retval true if used.
45 * @retval false if software virtualization (raw-mode) is used.
46 * @param pVM The cross context VM structure.
47 * @sa HMIsEnabled, HMR3IsEnabled
48 * @internal
49 */
50VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
51{
52 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
53 return pVM->fHMEnabled;
54}
55
56
57/**
58 * Queues a guest page for invalidation.
59 *
60 * @returns VBox status code.
61 * @param pVCpu The cross context virtual CPU structure.
62 * @param GCVirt Page to invalidate.
63 */
64static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
65{
66 /* Nothing to do if a TLB flush is already pending */
67 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
68 return;
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
70 NOREF(GCVirt);
71}
72
73
74/**
75 * Invalidates a guest page.
76 *
77 * @returns VBox status code.
78 * @param pVCpu The cross context virtual CPU structure.
79 * @param GCVirt Page to invalidate.
80 */
81VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
82{
83 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
84#ifdef IN_RING0
85 return HMR0InvalidatePage(pVCpu, GCVirt);
86#else
87 hmQueueInvlPage(pVCpu, GCVirt);
88 return VINF_SUCCESS;
89#endif
90}
91
92
93#ifdef IN_RING0
94
95/**
96 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
97 *
98 */
99static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
100{
101 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
102 return;
103}
104
105
106/**
107 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
108 */
109static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
110{
111 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
112
113 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
114 int rc = RTMpPokeCpu(idHostCpu);
115 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
116
117 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
118 back to a less efficient implementation (broadcast). */
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
122 /* synchronous. */
123 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
124 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
125 }
126 else
127 {
128 if (rc == VINF_SUCCESS)
129 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
130 else
131 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
132
133/** @todo If more than one CPU is going to be poked, we could optimize this
134 * operation by poking them first and wait afterwards. Would require
135 * recording who to poke and their current cWorldSwitchExits values,
136 * that's something not suitable for stack... So, pVCpu->hm.s.something
137 * then. */
138 /* Spin until the VCPU has switched back (poking is async). */
139 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
140 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
141 ASMNopPause();
142
143 if (rc == VINF_SUCCESS)
144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
145 else
146 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
147 }
148}
149
150#endif /* IN_RING0 */
151#ifndef IN_RC
152
153/**
154 * Flushes the guest TLB.
155 *
156 * @returns VBox status code.
157 * @param pVCpu The cross context virtual CPU structure.
158 */
159VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
160{
161 LogFlow(("HMFlushTLB\n"));
162
163 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
164 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
165 return VINF_SUCCESS;
166}
167
168/**
169 * Poke an EMT so it can perform the appropriate TLB shootdowns.
170 *
171 * @param pVCpu The cross context virtual CPU structure of the
172 * EMT poke.
173 * @param fAccountFlushStat Whether to account the call to
174 * StatTlbShootdownFlush or StatTlbShootdown.
175 */
176static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
177{
178 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
179 {
180 if (fAccountFlushStat)
181 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
182 else
183 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
184#ifdef IN_RING0
185 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
186 if (idHostCpu != NIL_RTCPUID)
187 hmR0PokeCpu(pVCpu, idHostCpu);
188#else
189 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
190#endif
191 }
192 else
193 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
194}
195
196
197/**
198 * Invalidates a guest page on all VCPUs.
199 *
200 * @returns VBox status code.
201 * @param pVM The cross context VM structure.
202 * @param GCVirt Page to invalidate.
203 */
204VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
205{
206 /*
207 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
208 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
209 *
210 * This is the reason why we do not care about thread preemption here and just
211 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
212 */
213 VMCPUID idCurCpu = VMMGetCpuId(pVM);
214 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
215
216 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
217 {
218 PVMCPU pVCpu = &pVM->aCpus[idCpu];
219
220 /* Nothing to do if a TLB flush is already pending; the VCPU should
221 have already been poked if it were active. */
222 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
223 continue;
224
225 if (pVCpu->idCpu == idCurCpu)
226 HMInvalidatePage(pVCpu, GCVirt);
227 else
228 {
229 hmQueueInvlPage(pVCpu, GCVirt);
230 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
231 }
232 }
233
234 return VINF_SUCCESS;
235}
236
237
238/**
239 * Flush the TLBs of all VCPUs.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 */
244VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
245{
246 if (pVM->cCpus == 1)
247 return HMFlushTLB(&pVM->aCpus[0]);
248
249 VMCPUID idThisCpu = VMMGetCpuId(pVM);
250
251 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
252
253 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
254 {
255 PVMCPU pVCpu = &pVM->aCpus[idCpu];
256
257 /* Nothing to do if a TLB flush is already pending; the VCPU should
258 have already been poked if it were active. */
259 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
260 {
261 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
262 if (idThisCpu != idCpu)
263 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
264 }
265 }
266
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * Invalidates a guest page by physical address.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 * @param GCPhys Page to invalidate.
277 *
278 * @remarks Assumes the current instruction references this physical page
279 * though a virtual address!
280 */
281VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
282{
283 if (!HMIsNestedPagingActive(pVM))
284 return VINF_SUCCESS;
285
286 /*
287 * AMD-V: Doesn't support invalidation with guest physical addresses.
288 *
289 * VT-x: Doesn't support invalidation with guest physical addresses.
290 * INVVPID instruction takes only a linear address while invept only flushes by EPT
291 * not individual addresses.
292 *
293 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
294 */
295 RT_NOREF(GCPhys);
296 /** @todo Remove or figure out to way to update the Phys STAT counter. */
297 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
298 return HMFlushTLBOnAllVCpus(pVM);
299}
300
301
302/**
303 * Checks if nested paging is enabled.
304 *
305 * @returns true if nested paging is active, false otherwise.
306 * @param pVM The cross context VM structure.
307 *
308 * @remarks Works before hmR3InitFinalizeR0.
309 */
310VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
311{
312 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
313}
314
315
316/**
317 * Checks if both nested paging and unhampered guest execution are enabled.
318 *
319 * The almost complete guest execution in hardware is only applicable to VT-x.
320 *
321 * @returns true if we have both enabled, otherwise false.
322 * @param pVM The cross context VM structure.
323 *
324 * @remarks Works before hmR3InitFinalizeR0.
325 */
326VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
327{
328 return HMIsEnabled(pVM)
329 && pVM->hm.s.fNestedPaging
330 && ( pVM->hm.s.vmx.fUnrestrictedGuest
331 || pVM->hm.s.svm.fSupported);
332}
333
334
335/**
336 * Checks if this VM is using HM and is long-mode capable.
337 *
338 * Use VMR3IsLongModeAllowed() instead of this, when possible.
339 *
340 * @returns true if long mode is allowed, false otherwise.
341 * @param pVM The cross context VM structure.
342 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
343 */
344VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
345{
346 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
347}
348
349
350/**
351 * Checks if MSR bitmaps are available. It is assumed that when it's available
352 * it will be used as well.
353 *
354 * @returns true if MSR bitmaps are available, false otherwise.
355 * @param pVM The cross context VM structure.
356 */
357VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
358{
359 if (HMIsEnabled(pVM))
360 {
361 if (pVM->hm.s.svm.fSupported)
362 return true;
363
364 if ( pVM->hm.s.vmx.fSupported
365 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
366 {
367 return true;
368 }
369 }
370 return false;
371}
372
373
374/**
375 * Checks if AMD-V is active.
376 *
377 * @returns true if AMD-V is active.
378 * @param pVM The cross context VM structure.
379 *
380 * @remarks Works before hmR3InitFinalizeR0.
381 */
382VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
383{
384 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
385}
386
387
388/**
389 * Checks if VT-x is active.
390 *
391 * @returns true if AMD-V is active.
392 * @param pVM The cross context VM structure.
393 *
394 * @remarks Works before hmR3InitFinalizeR0.
395 */
396VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
397{
398 return pVM->hm.s.vmx.fSupported && HMIsEnabled(pVM);
399}
400
401#endif /* !IN_RC */
402
403/**
404 * Checks if an interrupt event is currently pending.
405 *
406 * @returns Interrupt event pending state.
407 * @param pVM The cross context VM structure.
408 */
409VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
410{
411 PVMCPU pVCpu = VMMGetCpu(pVM);
412 return !!pVCpu->hm.s.Event.fPending;
413}
414
415
416/**
417 * Return the PAE PDPE entries.
418 *
419 * @returns Pointer to the PAE PDPE array.
420 * @param pVCpu The cross context virtual CPU structure.
421 */
422VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
423{
424 return &pVCpu->hm.s.aPdpes[0];
425}
426
427
428/**
429 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
430 * incorrect code bytes may be fetched after a world-switch".
431 *
432 * @param pu32Family Where to store the CPU family (can be NULL).
433 * @param pu32Model Where to store the CPU model (can be NULL).
434 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
435 * @returns true if the erratum applies, false otherwise.
436 */
437VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
438{
439 /*
440 * Erratum 170 which requires a forced TLB flush for each world switch:
441 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
442 *
443 * All BH-G1/2 and DH-G1/2 models include a fix:
444 * Athlon X2: 0x6b 1/2
445 * 0x68 1/2
446 * Athlon 64: 0x7f 1
447 * 0x6f 2
448 * Sempron: 0x7f 1/2
449 * 0x6f 2
450 * 0x6c 2
451 * 0x7c 2
452 * Turion 64: 0x68 2
453 */
454 uint32_t u32Dummy;
455 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
456 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
457 u32BaseFamily = (u32Version >> 8) & 0xf;
458 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
459 u32Model = ((u32Version >> 4) & 0xf);
460 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
461 u32Stepping = u32Version & 0xf;
462
463 bool fErratumApplies = false;
464 if ( u32Family == 0xf
465 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
466 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
467 {
468 fErratumApplies = true;
469 }
470
471 if (pu32Family)
472 *pu32Family = u32Family;
473 if (pu32Model)
474 *pu32Model = u32Model;
475 if (pu32Stepping)
476 *pu32Stepping = u32Stepping;
477
478 return fErratumApplies;
479}
480
481
482/**
483 * Sets or clears the single instruction flag.
484 *
485 * When set, HM will try its best to return to ring-3 after executing a single
486 * instruction. This can be used for debugging. See also
487 * EMR3HmSingleInstruction.
488 *
489 * @returns The old flag state.
490 * @param pVM The cross context VM structure.
491 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
492 * @param fEnable The new flag state.
493 */
494VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
495{
496 VMCPU_ASSERT_EMT(pVCpu);
497 bool fOld = pVCpu->hm.s.fSingleInstruction;
498 pVCpu->hm.s.fSingleInstruction = fEnable;
499 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
500 return fOld;
501}
502
503
504/**
505 * Notifies HM that GIM provider wants to trap \#UD.
506 *
507 * @param pVCpu The cross context virtual CPU structure.
508 */
509VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
510{
511 pVCpu->hm.s.fGIMTrapXcptUD = true;
512 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
513 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
514 else
515 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
516}
517
518
519/**
520 * Notifies HM that GIM provider no longer wants to trap \#UD.
521 *
522 * @param pVCpu The cross context virtual CPU structure.
523 */
524VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
525{
526 pVCpu->hm.s.fGIMTrapXcptUD = false;
527 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
528 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
529 else
530 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
531}
532
533
534/**
535 * VMX nested-guest VM-exit handler.
536 *
537 * @param pVCpu The cross context virtual CPU structure.
538 * @param uBasicExitReason The basic exit reason.
539 */
540VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
541{
542 RT_NOREF2(pVCpu, uBasicExitReason);
543}
544
545
546#ifndef IN_RC
547/**
548 * Notification callback which is called whenever there is a chance that a CR3
549 * value might have changed.
550 *
551 * This is called by PGM.
552 *
553 * @param pVM The cross context VM structure.
554 * @param pVCpu The cross context virtual CPU structure.
555 * @param enmShadowMode New shadow paging mode.
556 * @param enmGuestMode New guest paging mode.
557 */
558VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
559{
560# ifdef IN_RING3
561 /* Ignore page mode changes during state loading. */
562 if (VMR3GetState(pVM) == VMSTATE_LOADING)
563 return;
564# endif
565
566 pVCpu->hm.s.enmShadowMode = enmShadowMode;
567
568 /*
569 * If the guest left protected mode VMX execution, we'll have to be
570 * extra careful if/when the guest switches back to protected mode.
571 */
572 if (enmGuestMode == PGMMODE_REAL)
573 pVCpu->hm.s.vmx.fWasInRealMode = true;
574
575# ifdef IN_RING0
576 /*
577 * We need to tickle SVM and VT-x state updates.
578 *
579 * Note! We could probably reduce this depending on what exactly changed.
580 */
581 if (VM_IS_HM_ENABLED(pVM))
582 {
583 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); /* No recursion! */
584 uint64_t fChanged = HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 | HM_CHANGED_GUEST_EFER_MSR;
585 if (pVM->hm.s.svm.fSupported)
586 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
587 else
588 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
589 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
590 }
591# endif
592
593 Log4(("HMHCPagingModeChanged: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
594 PGMGetModeName(enmShadowMode)));
595}
596#endif /* !IN_RC */
597
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette