VirtualBox

source: kStuff/trunk/kLdr/kLdrModLX.c@ 75

Last change on this file since 75 was 58, checked in by bird, 11 years ago

Mach-O: Carve segments from the mach-o segments instead of the sections inside them. This works better for non-object files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.2 KB
Line 
1/* $Id: kLdrModLX.c 58 2013-10-12 20:18:21Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <bird-kStuff-spamix@anduin.net>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <k/kLdr.h>
35#include "kLdrInternal.h"
36#include <k/kLdrFmts/lx.h>
37
38
39/*******************************************************************************
40* Defined Constants And Macros *
41*******************************************************************************/
42/** @def KLDRMODLX_STRICT
43 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
44#define KLDRMODLX_STRICT 1
45
46/** @def KLDRMODLX_ASSERT
47 * Assert that an expression is true when KLDR_STRICT is defined.
48 */
49#ifdef KLDRMODLX_STRICT
50# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
51#else
52# define KLDRMODLX_ASSERT(expr) do {} while (0)
53#endif
54
55
56/*******************************************************************************
57* Structures and Typedefs *
58*******************************************************************************/
59/**
60 * Instance data for the LX module interpreter.
61 */
62typedef struct KLDRMODLX
63{
64 /** Pointer to the module. (Follows the section table.) */
65 PKLDRMOD pMod;
66 /** Pointer to the user mapping. */
67 const void *pvMapping;
68 /** The size of the mapped LX image. */
69 KSIZE cbMapped;
70 /** Reserved flags. */
71 KU32 f32Reserved;
72
73 /** The offset of the LX header. */
74 KLDRFOFF offHdr;
75 /** Copy of the LX header. */
76 struct e32_exe Hdr;
77
78 /** Pointer to the loader section.
79 * Allocated together with this strcture. */
80 const KU8 *pbLoaderSection;
81 /** Pointer to the last byte in the loader section. */
82 const KU8 *pbLoaderSectionLast;
83 /** Pointer to the object table in the loader section. */
84 const struct o32_obj *paObjs;
85 /** Pointer to the object page map table in the loader section. */
86 const struct o32_map *paPageMappings;
87 /** Pointer to the resource table in the loader section. */
88 const struct rsrc32 *paRsrcs;
89 /** Pointer to the resident name table in the loader section. */
90 const KU8 *pbResNameTab;
91 /** Pointer to the entry table in the loader section. */
92 const KU8 *pbEntryTab;
93
94 /** Pointer to the non-resident name table. */
95 KU8 *pbNonResNameTab;
96 /** Pointer to the last byte in the non-resident name table. */
97 const KU8 *pbNonResNameTabLast;
98
99 /** Pointer to the fixup section. */
100 KU8 *pbFixupSection;
101 /** Pointer to the last byte in the fixup section. */
102 const KU8 *pbFixupSectionLast;
103 /** Pointer to the fixup page table within pvFixupSection. */
104 const KU32 *paoffPageFixups;
105 /** Pointer to the fixup record table within pvFixupSection. */
106 const KU8 *pbFixupRecs;
107 /** Pointer to the import module name table within pvFixupSection. */
108 const KU8 *pbImportMods;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportProcs;
111} KLDRMODLX, *PKLDRMODLX;
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
118static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
119 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
120static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
121static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal);
122static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol);
123static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
124 const char *pchSymbol, KSIZE cchSymbol);
125static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
126static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
127static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
128static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
129static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
130static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle);
131static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
132 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
133static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
134static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
135static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
136 int iSelector, KLDRADDR uValue, KU32 fKind);
137
138
139/**
140 * Create a loader module instance interpreting the executable image found
141 * in the specified file provider instance.
142 *
143 * @returns 0 on success and *ppMod pointing to a module instance.
144 * On failure, a non-zero OS specific error code is returned.
145 * @param pOps Pointer to the registered method table.
146 * @param pRdr The file provider instance to use.
147 * @param fFlags Flags, MBZ.
148 * @param enmCpuArch The desired CPU architecture. KCPUARCH_UNKNOWN means
149 * anything goes, but with a preference for the current
150 * host architecture.
151 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
152 * @param ppMod Where to store the module instance pointer.
153 */
154static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KU32 fFlags, KCPUARCH enmCpuArch, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
155{
156 PKLDRMODLX pModLX;
157 int rc;
158
159 /*
160 * Create the instance data and do a minimal header validation.
161 */
162 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
163 if (!rc)
164 {
165 /*
166 * Match up against the requested CPU architecture.
167 */
168 if ( enmCpuArch == KCPUARCH_UNKNOWN
169 || pModLX->pMod->enmArch == enmCpuArch)
170 {
171 pModLX->pMod->pOps = pOps;
172 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
173 *ppMod = pModLX->pMod;
174 return 0;
175 }
176 rc = KLDR_ERR_CPU_ARCH_MISMATCH;
177 }
178 kHlpFree(pModLX);
179 return rc;
180}
181
182
183/**
184 * Separate function for reading creating the LX module instance to
185 * simplify cleanup on failure.
186 */
187static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
188{
189 struct e32_exe Hdr;
190 PKLDRMODLX pModLX;
191 PKLDRMOD pMod;
192 KSIZE cb;
193 KSIZE cchFilename;
194 KU32 off, offEnd;
195 KU32 i;
196 int rc;
197 int fCanOptimizeMapping;
198 KU32 NextRVA;
199 *ppModLX = NULL;
200
201 /*
202 * Read the signature and file header.
203 */
204 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
205 if (rc)
206 return rc;
207 if ( Hdr.e32_magic[0] != E32MAGIC1
208 || Hdr.e32_magic[1] != E32MAGIC2)
209 return KLDR_ERR_UNKNOWN_FORMAT;
210
211 /* We're not interested in anything but x86 images. */
212 if ( Hdr.e32_level != E32LEVEL
213 || Hdr.e32_border != E32LEBO
214 || Hdr.e32_worder != E32LEWO
215 || Hdr.e32_cpu < E32CPU286
216 || Hdr.e32_cpu > E32CPU486
217 || Hdr.e32_pagesize != OBJPAGELEN
218 )
219 return KLDR_ERR_LX_BAD_HEADER;
220
221 /* Some rough sanity checks. */
222 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
223 if ( Hdr.e32_itermap > offEnd
224 || Hdr.e32_datapage > offEnd
225 || Hdr.e32_nrestab > offEnd
226 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
227 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
228 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
229 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
230 return KLDR_ERR_LX_BAD_HEADER;
231
232 /* Verify the loader section. */
233 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
234 if (Hdr.e32_objtab < sizeof(Hdr))
235 return KLDR_ERR_LX_BAD_LOADER_SECTION;
236 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
237 if (off > offEnd)
238 return KLDR_ERR_LX_BAD_LOADER_SECTION;
239 if ( Hdr.e32_objmap
240 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
241 return KLDR_ERR_LX_BAD_LOADER_SECTION;
242 if ( Hdr.e32_rsrccnt
243 && ( Hdr.e32_rsrctab < off
244 || Hdr.e32_rsrctab > offEnd
245 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
246 return KLDR_ERR_LX_BAD_LOADER_SECTION;
247 if ( Hdr.e32_restab
248 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
249 return KLDR_ERR_LX_BAD_LOADER_SECTION;
250 if ( Hdr.e32_enttab
251 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
252 return KLDR_ERR_LX_BAD_LOADER_SECTION;
253 if ( Hdr.e32_dircnt
254 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
255 return KLDR_ERR_LX_BAD_LOADER_SECTION;
256
257 /* Verify the fixup section. */
258 off = offEnd;
259 offEnd = off + Hdr.e32_fixupsize;
260 if ( Hdr.e32_fpagetab
261 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
262 {
263 /*
264 * wlink mixes the fixup section and the loader section.
265 */
266 off = Hdr.e32_fpagetab;
267 offEnd = off + Hdr.e32_fixupsize;
268 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
269 }
270 if ( Hdr.e32_frectab
271 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
272 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
273 if ( Hdr.e32_impmod
274 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
275 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
276 if ( Hdr.e32_impproc
277 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
278 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
279
280 /*
281 * Calc the instance size, allocate and initialize it.
282 */
283 cchFilename = kHlpStrLen(kRdrName(pRdr));
284 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
285 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
286 + K_ALIGN_Z(cchFilename + 1, 8)
287 + Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
288 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
289 if (!pModLX)
290 return KERR_NO_MEMORY;
291 *ppModLX = pModLX;
292
293 /* KLDRMOD */
294 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
295 pMod->pvData = pModLX;
296 pMod->pRdr = pRdr;
297 pMod->pOps = NULL; /* set upon success. */
298 pMod->cSegments = Hdr.e32_objcnt;
299 pMod->cchFilename = cchFilename;
300 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
301 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
302 pMod->pszName = NULL; /* finalized further down */
303 pMod->cchName = 0;
304 pMod->fFlags = 0;
305 switch (Hdr.e32_cpu)
306 {
307 case E32CPU286:
308 pMod->enmCpu = KCPU_I80286;
309 pMod->enmArch = KCPUARCH_X86_16;
310 break;
311 case E32CPU386:
312 pMod->enmCpu = KCPU_I386;
313 pMod->enmArch = KCPUARCH_X86_32;
314 break;
315 case E32CPU486:
316 pMod->enmCpu = KCPU_I486;
317 pMod->enmArch = KCPUARCH_X86_32;
318 break;
319 }
320 pMod->enmEndian = KLDRENDIAN_LITTLE;
321 pMod->enmFmt = KLDRFMT_LX;
322 switch (Hdr.e32_mflags & E32MODMASK)
323 {
324 case E32MODEXE:
325 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
326 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
327 : KLDRTYPE_EXECUTABLE_FIXED;
328 break;
329
330 case E32MODDLL:
331 case E32PROTDLL:
332 case E32MODPROTDLL:
333 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
334 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
335 : KLDRTYPE_SHARED_LIBRARY_FIXED;
336 break;
337
338 case E32MODPDEV:
339 case E32MODVDEV:
340 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
341 break;
342 }
343 pMod->u32Magic = 0; /* set upon success. */
344
345 /* KLDRMODLX */
346 pModLX->pMod = pMod;
347 pModLX->pvMapping = 0;
348 pModLX->cbMapped = 0;
349 pModLX->f32Reserved = 0;
350
351 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
352 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
353
354 pModLX->pbLoaderSection = K_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
355 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
356 pModLX->paObjs = NULL;
357 pModLX->paPageMappings = NULL;
358 pModLX->paRsrcs = NULL;
359 pModLX->pbResNameTab = NULL;
360 pModLX->pbEntryTab = NULL;
361
362 pModLX->pbNonResNameTab = NULL;
363 pModLX->pbNonResNameTabLast = NULL;
364
365 pModLX->pbFixupSection = NULL;
366 pModLX->pbFixupSectionLast = NULL;
367 pModLX->paoffPageFixups = NULL;
368 pModLX->pbFixupRecs = NULL;
369 pModLX->pbImportMods = NULL;
370 pModLX->pbImportProcs = NULL;
371
372 /*
373 * Read the loader data.
374 */
375 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
376 if (rc)
377 return rc;
378 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
379 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
380 if (pModLX->Hdr.e32_objcnt)
381 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
382 if (pModLX->Hdr.e32_objmap)
383 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
384 if (pModLX->Hdr.e32_rsrccnt)
385 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
386 if (pModLX->Hdr.e32_restab)
387 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
388 if (pModLX->Hdr.e32_enttab)
389 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
390
391 /*
392 * Get the soname from the resident name table.
393 * Very convenient that it's the 0 ordinal, because then we get a
394 * free string terminator.
395 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
396 */
397 if (pModLX->pbResNameTab)
398 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
399 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
400 0);
401 if (!pMod->pszName)
402 return KLDR_ERR_LX_NO_SONAME;
403 pMod->cchName = *(const KU8 *)pMod->pszName++;
404 if (pMod->cchName != kHlpStrLen(pMod->pszName))
405 return KLDR_ERR_LX_BAD_SONAME;
406
407 /*
408 * Quick validation of the object table.
409 */
410 cb = 0;
411 for (i = 0; i < pMod->cSegments; i++)
412 {
413 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
414 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
415 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
416 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
417 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
418 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
419 if ( pModLX->paObjs[i].o32_mapsize
420 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
421 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
422 > pModLX->pbLoaderSectionLast))
423 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
424 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
425 {
426 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
427 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
428 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
429 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
430 }
431 }
432
433 /*
434 * Check if we can optimize the mapping by using a different
435 * object alignment. The linker typically uses 64KB alignment,
436 * we can easily get away with page alignment in most cases.
437 */
438 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
439 NextRVA = 0;
440
441 /*
442 * Setup the KLDRMOD segment array.
443 */
444 for (i = 0; i < pMod->cSegments; i++)
445 {
446 /* unused */
447 pMod->aSegments[i].pvUser = NULL;
448 pMod->aSegments[i].MapAddress = 0;
449 pMod->aSegments[i].pchName = NULL;
450 pMod->aSegments[i].cchName = 0;
451 pMod->aSegments[i].offFile = -1;
452 pMod->aSegments[i].cbFile = -1;
453 pMod->aSegments[i].SelFlat = 0;
454 pMod->aSegments[i].Sel16bit = 0;
455
456 /* flags */
457 pMod->aSegments[i].fFlags = 0;
458 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
459 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
460 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
461 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
462 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
463 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
464 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
465 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
466
467 /* size and addresses */
468 pMod->aSegments[i].Alignment = OBJPAGELEN;
469 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
470 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
471 pMod->aSegments[i].RVA = NextRVA;
472 if ( fCanOptimizeMapping
473 || i + 1 >= pMod->cSegments
474 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
475 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
476 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
477 else
478 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
479 NextRVA += pMod->aSegments[i].cbMapped;
480
481 /* protection */
482 switch ( pModLX->paObjs[i].o32_flags
483 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
484 {
485 case 0:
486 case OBJSHARED:
487 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
488 break;
489 case OBJREAD:
490 case OBJREAD | OBJSHARED:
491 pMod->aSegments[i].enmProt = KPROT_READONLY;
492 break;
493 case OBJWRITE:
494 case OBJWRITE | OBJREAD:
495 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
496 break;
497 case OBJWRITE | OBJSHARED:
498 case OBJWRITE | OBJSHARED | OBJREAD:
499 pMod->aSegments[i].enmProt = KPROT_READWRITE;
500 break;
501 case OBJEXEC:
502 case OBJEXEC | OBJSHARED:
503 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
504 break;
505 case OBJEXEC | OBJREAD:
506 case OBJEXEC | OBJREAD | OBJSHARED:
507 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
508 break;
509 case OBJEXEC | OBJWRITE:
510 case OBJEXEC | OBJWRITE | OBJREAD:
511 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
512 break;
513 case OBJEXEC | OBJWRITE | OBJSHARED:
514 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
515 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
516 break;
517 }
518 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
519 pMod->aSegments[i].enmProt = KPROT_READONLY;
520 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
521 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
522 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
523 }
524
525 /* set the mapping size */
526 pModLX->cbMapped = NextRVA;
527
528 /*
529 * We're done.
530 */
531 *ppModLX = pModLX;
532 return 0;
533}
534
535
536/** @copydoc KLDRMODOPS::pfnDestroy */
537static int kldrModLXDestroy(PKLDRMOD pMod)
538{
539 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
540 int rc = 0;
541 KLDRMODLX_ASSERT(!pModLX->pvMapping);
542
543 if (pMod->pRdr)
544 {
545 rc = kRdrClose(pMod->pRdr);
546 pMod->pRdr = NULL;
547 }
548 if (pModLX->pbNonResNameTab)
549 {
550 kHlpFree(pModLX->pbNonResNameTab);
551 pModLX->pbNonResNameTab = NULL;
552 }
553 if (pModLX->pbFixupSection)
554 {
555 kHlpFree(pModLX->pbFixupSection);
556 pModLX->pbFixupSection = NULL;
557 }
558 pMod->u32Magic = 0;
559 pMod->pOps = NULL;
560 kHlpFree(pModLX);
561 return rc;
562}
563
564
565/**
566 * Resolved base address aliases.
567 *
568 * @param pModLX The interpreter module instance
569 * @param pBaseAddress The base address, IN & OUT.
570 */
571static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
572{
573 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
574 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
575 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
576 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
577}
578
579
580/** @copydoc kLdrModQuerySymbol */
581static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
582 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
583 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
584{
585 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
586 KU32 iOrdinal;
587 int rc;
588 const struct b32_bundle *pBundle;
589
590
591 /*
592 * Give up at once if there is no entry table.
593 */
594 if (!pModLX->Hdr.e32_enttab)
595 return KLDR_ERR_SYMBOL_NOT_FOUND;
596
597 /*
598 * Translate the symbol name into an ordinal.
599 */
600 if (pchSymbol)
601 {
602 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
603 if (rc)
604 return rc;
605 }
606
607 /*
608 * Iterate the entry table.
609 * (The entry table is made up of bundles of similar exports.)
610 */
611 iOrdinal = 1;
612 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
613 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
614 {
615 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
616
617 /*
618 * Check for a hit first.
619 */
620 iOrdinal += pBundle->b32_cnt;
621 if (iSymbol < iOrdinal)
622 {
623 KU32 offObject;
624 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
625 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
626 * s_cbEntry[pBundle->b32_type]);
627
628 /*
629 * Calculate the return address.
630 */
631 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
632 switch (pBundle->b32_type)
633 {
634 /* empty bundles are place holders unused ordinal ranges. */
635 case EMPTY:
636 return KLDR_ERR_SYMBOL_NOT_FOUND;
637
638 /* e32_flags + a 16-bit offset. */
639 case ENTRY16:
640 offObject = pEntry->e32_variant.e32_offset.offset16;
641 if (pfKind)
642 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
643 break;
644
645 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
646 case GATE16:
647 offObject = pEntry->e32_variant.e32_callgate.offset;
648 if (pfKind)
649 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
650 break;
651
652 /* e32_flags + a 32-bit offset. */
653 case ENTRY32:
654 offObject = pEntry->e32_variant.e32_offset.offset32;
655 if (pfKind)
656 *pfKind = KLDRSYMKIND_32BIT;
657 break;
658
659 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
660 case ENTRYFWD:
661 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
662
663 default:
664 /* anyone actually using TYPEINFO will end up here. */
665 KLDRMODLX_ASSERT(!"Bad bundle type");
666 return KLDR_ERR_LX_BAD_BUNDLE;
667 }
668
669 /*
670 * Validate the object number and calc the return address.
671 */
672 if ( pBundle->b32_obj <= 0
673 || pBundle->b32_obj > pMod->cSegments)
674 return KLDR_ERR_LX_BAD_BUNDLE;
675 if (puValue)
676 *puValue = BaseAddress
677 + offObject
678 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
679 return 0;
680 }
681
682 /*
683 * Skip the bundle.
684 */
685 if (pBundle->b32_type > ENTRYFWD)
686 {
687 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
688 return KLDR_ERR_LX_BAD_BUNDLE;
689 }
690 if (pBundle->b32_type == 0)
691 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
692 else
693 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
694 }
695
696 return KLDR_ERR_SYMBOL_NOT_FOUND;
697}
698
699
700/**
701 * Do name lookup.
702 *
703 * @returns See kLdrModQuerySymbol.
704 * @param pModLX The module to lookup the symbol in.
705 * @param pchSymbol The symbol to lookup.
706 * @param cchSymbol The symbol name length.
707 * @param piSymbol Where to store the symbol ordinal.
708 */
709static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol)
710{
711
712 /*
713 * First do a hash table lookup.
714 */
715 /** @todo hash name table for speed. */
716
717 /*
718 * Search the name tables.
719 */
720 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
721 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
722 pchSymbol, cchSymbol);
723 if (!pbName)
724 {
725 if (!pModLX->pbNonResNameTab)
726 {
727 /* lazy load it */
728 /** @todo non-resident name table. */
729 }
730 if (pModLX->pbNonResNameTab)
731 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
732 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
733 pchSymbol, cchSymbol);
734 }
735 if (!pbName)
736 return KLDR_ERR_SYMBOL_NOT_FOUND;
737
738 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
739 return 0;
740}
741
742
743#if 0
744/**
745 * Hash a symbol using the algorithm from sdbm.
746 *
747 * The following was is the documenation of the orignal sdbm functions:
748 *
749 * This algorithm was created for sdbm (a public-domain reimplementation of
750 * ndbm) database library. it was found to do well in scrambling bits,
751 * causing better distribution of the keys and fewer splits. it also happens
752 * to be a good general hashing function with good distribution. the actual
753 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
754 * is the faster version used in gawk. [there is even a faster, duff-device
755 * version] the magic constant 65599 was picked out of thin air while
756 * experimenting with different constants, and turns out to be a prime.
757 * this is one of the algorithms used in berkeley db (see sleepycat) and
758 * elsewhere.
759 */
760static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
761{
762 KU32 hash = 0;
763 int ch;
764
765 while ( cchSymbol-- > 0
766 && (ch = *(unsigned const char *)pchSymbol++))
767 hash = ch + (hash << 6) + (hash << 16) - hash;
768
769 return hash;
770}
771#endif
772
773
774/**
775 * Lookup a name table entry by name.
776 *
777 * @returns Pointer to the name table entry if found.
778 * @returns NULL if not found.
779 * @param pbNameTable Pointer to the name table that should be searched.
780 * @param cbNameTable The size of the name table.
781 * @param pchSymbol The name of the symbol we're looking for.
782 * @param cchSymbol The length of the symbol name.
783 */
784static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
785 const char *pchSymbol, KSIZE cchSymbol)
786{
787 /*
788 * Determin the namelength up front so we can skip anything which doesn't matches the length.
789 */
790 KU8 cbSymbol8Bit = (KU8)cchSymbol;
791 if (cbSymbol8Bit != cchSymbol)
792 return NULL; /* too long. */
793
794 /*
795 * Walk the name table.
796 */
797 while (*pbNameTable != 0 && cbNameTable > 0)
798 {
799 const KU8 cbName = *pbNameTable;
800
801 cbNameTable -= cbName + 1 + 2;
802 if (cbNameTable < 0)
803 break;
804
805 if ( cbName == cbSymbol8Bit
806 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
807 return pbNameTable;
808
809 /* next entry */
810 pbNameTable += cbName + 1 + 2;
811 }
812
813 return NULL;
814}
815
816
817/**
818 * Deal with a forwarder entry.
819 *
820 * @returns See kLdrModQuerySymbol.
821 * @param pModLX The PE module interpreter instance.
822 * @param pEntry The forwarder entry.
823 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
824 * @param pvUser The user argument for the callback.
825 * @param puValue Where to put the value. (optional)
826 * @param pfKind Where to put the symbol kind. (optional)
827 */
828static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
829 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
830{
831 int rc;
832 KU32 iSymbol;
833 const char *pchSymbol;
834 KU8 cchSymbol;
835
836 if (!pfnGetForwarder)
837 return KLDR_ERR_FORWARDER_SYMBOL;
838
839 /*
840 * Validate the entry import module ordinal.
841 */
842 if ( !pEntry->e32_variant.e32_fwd.modord
843 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
844 return KLDR_ERR_LX_BAD_FORWARDER;
845
846 /*
847 * Figure out the parameters.
848 */
849 if (pEntry->e32_flags & FWD_ORDINAL)
850 {
851 iSymbol = pEntry->e32_variant.e32_fwd.value;
852 pchSymbol = NULL; /* no symbol name. */
853 cchSymbol = 0;
854 }
855 else
856 {
857 const KU8 *pbName;
858
859 /* load the fixup section if necessary. */
860 if (!pModLX->pbImportProcs)
861 {
862 rc = kldrModLXDoLoadFixupSection(pModLX);
863 if (rc)
864 return rc;
865 }
866
867 /* Make name pointer. */
868 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
869 if ( pbName >= pModLX->pbFixupSectionLast
870 || pbName < pModLX->pbFixupSection
871 || !*pbName)
872 return KLDR_ERR_LX_BAD_FORWARDER;
873
874
875 /* check for '#' name. */
876 if (pbName[1] == '#')
877 {
878 KU8 cbLeft = *pbName;
879 const KU8 *pb = pbName + 1;
880 unsigned uBase;
881
882 /* base detection */
883 uBase = 10;
884 if ( cbLeft > 1
885 && pb[1] == '0'
886 && (pb[2] == 'x' || pb[2] == 'X'))
887 {
888 uBase = 16;
889 pb += 2;
890 cbLeft -= 2;
891 }
892
893 /* ascii to integer */
894 iSymbol = 0;
895 while (cbLeft-- > 0)
896 {
897 /* convert char to digit. */
898 unsigned uDigit = *pb++;
899 if (uDigit >= '0' && uDigit <= '9')
900 uDigit -= '0';
901 else if (uDigit >= 'a' && uDigit <= 'z')
902 uDigit -= 'a' + 10;
903 else if (uDigit >= 'A' && uDigit <= 'Z')
904 uDigit -= 'A' + 10;
905 else if (!uDigit)
906 break;
907 else
908 return KLDR_ERR_LX_BAD_FORWARDER;
909 if (uDigit >= uBase)
910 return KLDR_ERR_LX_BAD_FORWARDER;
911
912 /* insert the digit */
913 iSymbol *= uBase;
914 iSymbol += uDigit;
915 }
916 if (!iSymbol)
917 return KLDR_ERR_LX_BAD_FORWARDER;
918
919 pchSymbol = NULL; /* no symbol name. */
920 cchSymbol = 0;
921 }
922 else
923 {
924 pchSymbol = (char *)pbName + 1;
925 cchSymbol = *pbName;
926 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
927 }
928 }
929
930 /*
931 * Resolve the forwarder.
932 */
933 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
934 if (!rc && pfKind)
935 *pfKind |= KLDRSYMKIND_FORWARDER;
936 return rc;
937}
938
939
940/**
941 * Loads the fixup section from the executable image.
942 *
943 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
944 *
945 * @returns 0 on success, non-zero kLdr or native status code on failure.
946 * @param pModLX The PE module interpreter instance.
947 */
948static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
949{
950 int rc;
951 KU32 off;
952 void *pv;
953
954 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
955 if (!pv)
956 return KERR_NO_MEMORY;
957
958 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
959 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
960 off + pModLX->offHdr);
961 if (!rc)
962 {
963 pModLX->pbFixupSection = pv;
964 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
965 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
966 if (pModLX->Hdr.e32_fpagetab)
967 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
968 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
969 if (pModLX->Hdr.e32_frectab)
970 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
971 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
972 if (pModLX->Hdr.e32_impmod)
973 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
974 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
975 if (pModLX->Hdr.e32_impproc)
976 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
977 }
978 else
979 kHlpFree(pv);
980 return rc;
981}
982
983
984/** @copydoc kLdrModEnumSymbols */
985static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
986 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
987{
988 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
989 const struct b32_bundle *pBundle;
990 KU32 iOrdinal;
991 int rc = 0;
992
993 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
994
995 /*
996 * Enumerate the entry table.
997 * (The entry table is made up of bundles of similar exports.)
998 */
999 iOrdinal = 1;
1000 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
1001 while (pBundle->b32_cnt && iOrdinal)
1002 {
1003 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
1004
1005 /*
1006 * Enum the entries in the bundle.
1007 */
1008 if (pBundle->b32_type != EMPTY)
1009 {
1010 const struct e32_entry *pEntry;
1011 KSIZE cbEntry;
1012 KLDRADDR BundleRVA;
1013 unsigned cLeft;
1014
1015
1016 /* Validate the bundle. */
1017 switch (pBundle->b32_type)
1018 {
1019 case ENTRY16:
1020 case GATE16:
1021 case ENTRY32:
1022 if ( pBundle->b32_obj <= 0
1023 || pBundle->b32_obj > pMod->cSegments)
1024 return KLDR_ERR_LX_BAD_BUNDLE;
1025 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1026 break;
1027
1028 case ENTRYFWD:
1029 BundleRVA = 0;
1030 break;
1031
1032 default:
1033 /* anyone actually using TYPEINFO will end up here. */
1034 KLDRMODLX_ASSERT(!"Bad bundle type");
1035 return KLDR_ERR_LX_BAD_BUNDLE;
1036 }
1037
1038 /* iterate the bundle entries. */
1039 cbEntry = s_cbEntry[pBundle->b32_type];
1040 pEntry = (const struct e32_entry *)(pBundle + 1);
1041 cLeft = pBundle->b32_cnt;
1042 while (cLeft-- > 0)
1043 {
1044 KLDRADDR uValue;
1045 KU32 fKind;
1046 int fFoundName;
1047 const KU8 *pbName;
1048
1049 /*
1050 * Calc the symbol value and kind.
1051 */
1052 switch (pBundle->b32_type)
1053 {
1054 /* e32_flags + a 16-bit offset. */
1055 case ENTRY16:
1056 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1057 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1058 break;
1059
1060 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1061 case GATE16:
1062 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1063 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1064 break;
1065
1066 /* e32_flags + a 32-bit offset. */
1067 case ENTRY32:
1068 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1069 fKind = KLDRSYMKIND_32BIT;
1070 break;
1071
1072 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1073 case ENTRYFWD:
1074 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1075 fKind = KLDRSYMKIND_FORWARDER;
1076 break;
1077
1078 default: /* shut up gcc. */
1079 uValue = 0;
1080 fKind = KLDRSYMKIND_NO_BIT | KLDRSYMKIND_NO_TYPE;
1081 break;
1082 }
1083
1084 /*
1085 * Any symbol names?
1086 */
1087 fFoundName = 0;
1088
1089 /* resident name table. */
1090 pbName = pModLX->pbResNameTab;
1091 if (pbName)
1092 {
1093 do
1094 {
1095 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1096 if (!pbName)
1097 break;
1098 fFoundName = 1;
1099 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1100 if (rc)
1101 return rc;
1102
1103 /* skip to the next entry */
1104 pbName += 1 + *pbName + 2;
1105 } while (pbName < pModLX->pbLoaderSectionLast);
1106 }
1107
1108 /* resident name table. */
1109 pbName = pModLX->pbNonResNameTab;
1110 /** @todo lazy load the non-resident name table. */
1111 if (pbName)
1112 {
1113 do
1114 {
1115 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1116 if (!pbName)
1117 break;
1118 fFoundName = 1;
1119 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1120 if (rc)
1121 return rc;
1122
1123 /* skip to the next entry */
1124 pbName += 1 + *pbName + 2;
1125 } while (pbName < pModLX->pbLoaderSectionLast);
1126 }
1127
1128 /*
1129 * If no names, call once with the ordinal only.
1130 */
1131 if (!fFoundName)
1132 {
1133 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1134 if (rc)
1135 return rc;
1136 }
1137
1138 /* next */
1139 iOrdinal++;
1140 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1141 }
1142 }
1143
1144 /*
1145 * The next bundle.
1146 */
1147 if (pBundle->b32_type > ENTRYFWD)
1148 {
1149 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1150 return KLDR_ERR_LX_BAD_BUNDLE;
1151 }
1152 if (pBundle->b32_type == 0)
1153 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1154 else
1155 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1156 }
1157
1158 return 0;
1159}
1160
1161
1162/**
1163 * Lookup a name table entry by ordinal.
1164 *
1165 * @returns Pointer to the name table entry if found.
1166 * @returns NULL if not found.
1167 * @param pbNameTable Pointer to the name table that should be searched.
1168 * @param cbNameTable The size of the name table.
1169 * @param iOrdinal The ordinal to search for.
1170 */
1171static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal)
1172{
1173 while (*pbNameTable != 0 && cbNameTable > 0)
1174 {
1175 const KU8 cbName = *pbNameTable;
1176 KU32 iName;
1177
1178 cbNameTable -= cbName + 1 + 2;
1179 if (cbNameTable < 0)
1180 break;
1181
1182 iName = *(pbNameTable + cbName + 1)
1183 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1184 if (iName == iOrdinal)
1185 return pbNameTable;
1186
1187 /* next entry */
1188 pbNameTable += cbName + 1 + 2;
1189 }
1190
1191 return NULL;
1192}
1193
1194
1195/** @copydoc kLdrModGetImport */
1196static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1197{
1198 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1199 const KU8 *pb;
1200 int rc;
1201
1202 /*
1203 * Validate
1204 */
1205 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1206 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1207
1208 /*
1209 * Lazy loading the fixup section.
1210 */
1211 if (!pModLX->pbImportMods)
1212 {
1213 rc = kldrModLXDoLoadFixupSection(pModLX);
1214 if (rc)
1215 return rc;
1216 }
1217
1218 /*
1219 * Iterate the module import table until we reach the requested import ordinal.
1220 */
1221 pb = pModLX->pbImportMods;
1222 while (iImport-- > 0)
1223 pb += *pb + 1;
1224
1225 /*
1226 * Copy out the result.
1227 */
1228 if (*pb < cchName)
1229 {
1230 kHlpMemCopy(pszName, pb + 1, *pb);
1231 pszName[*pb] = '\0';
1232 rc = 0;
1233 }
1234 else
1235 {
1236 kHlpMemCopy(pszName, pb + 1, cchName);
1237 if (cchName)
1238 pszName[cchName - 1] = '\0';
1239 rc = KERR_BUFFER_OVERFLOW;
1240 }
1241
1242 return rc;
1243}
1244
1245
1246/** @copydoc kLdrModNumberOfImports */
1247static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1248{
1249 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1250 return pModLX->Hdr.e32_impmodcnt;
1251}
1252
1253
1254/** @copydoc kLdrModGetStackInfo */
1255static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1256{
1257 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1258 const KU32 i = pModLX->Hdr.e32_stackobj;
1259
1260 if ( i
1261 && i <= pMod->cSegments
1262 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1263 && pModLX->Hdr.e32_stacksize
1264 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1265 {
1266
1267 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1268 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1269 pStackInfo->Address = BaseAddress
1270 + pMod->aSegments[i - 1].RVA
1271 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1272 }
1273 else
1274 {
1275 pStackInfo->Address = NIL_KLDRADDR;
1276 pStackInfo->LinkAddress = NIL_KLDRADDR;
1277 }
1278 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1279 pStackInfo->cbStackThread = 0;
1280
1281 return 0;
1282}
1283
1284
1285/** @copydoc kLdrModQueryMainEntrypoint */
1286static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1287{
1288 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1289
1290 /*
1291 * Convert the address from the header.
1292 */
1293 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1294 *pMainEPAddress = pModLX->Hdr.e32_startobj
1295 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1296 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1297 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1298 : NIL_KLDRADDR;
1299 return 0;
1300}
1301
1302
1303/** @copydoc kLdrModEnumDbgInfo */
1304static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1305{
1306 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1307
1308 /*
1309 * Quit immediately if no debug info.
1310 */
1311 if (kldrModLXHasDbgInfo(pMod, pvBits))
1312 return 0;
1313#if 0
1314 /*
1315 * Read the debug info and look for familiar magics and structures.
1316 */
1317 /** @todo */
1318#endif
1319
1320 return 0;
1321}
1322
1323
1324/** @copydoc kLdrModHasDbgInfo */
1325static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1326{
1327 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1328
1329 /*
1330 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1331 */
1332 if ( !pModLX->Hdr.e32_debuginfo
1333 || !pModLX->Hdr.e32_debuglen)
1334 return KLDR_ERR_NO_DEBUG_INFO;
1335 return 0;
1336}
1337
1338
1339/** @copydoc kLdrModMap */
1340static int kldrModLXMap(PKLDRMOD pMod)
1341{
1342 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1343 unsigned fFixed;
1344 void *pvBase;
1345 int rc;
1346
1347 /*
1348 * Already mapped?
1349 */
1350 if (pModLX->pvMapping)
1351 return KLDR_ERR_ALREADY_MAPPED;
1352
1353 /*
1354 * Allocate memory for it.
1355 */
1356 /* fixed image? */
1357 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1358 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1359 if (!fFixed)
1360 pvBase = NULL;
1361 else
1362 {
1363 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1364 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1365 return KLDR_ERR_ADDRESS_OVERFLOW;
1366 }
1367 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1368 if (rc)
1369 return rc;
1370
1371 /*
1372 * Load the bits, apply page protection, and update the segment table.
1373 */
1374 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1375 if (!rc)
1376 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1377 if (!rc)
1378 {
1379 KU32 i;
1380 for (i = 0; i < pMod->cSegments; i++)
1381 {
1382 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1383 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1384 }
1385 pModLX->pvMapping = pvBase;
1386 }
1387 else
1388 kHlpPageFree(pvBase, pModLX->cbMapped);
1389 return rc;
1390}
1391
1392
1393/**
1394 * Loads the LX pages into the specified memory mapping.
1395 *
1396 * @returns 0 on success.
1397 * @returns non-zero kLdr or OS status code on failure.
1398 *
1399 * @param pModLX The LX module interpreter instance.
1400 * @param pvBits Where to load the bits.
1401 */
1402static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1403{
1404 const PKRDR pRdr = pModLX->pMod->pRdr;
1405 KU8 *pbTmpPage = NULL;
1406 int rc = 0;
1407 KU32 i;
1408
1409 /*
1410 * Iterate the segments.
1411 */
1412 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1413 {
1414 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1415 const KU32 cPages = pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN;
1416 KU32 iPage;
1417 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1418
1419 /*
1420 * Iterate the page map pages.
1421 */
1422 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1423 {
1424 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1425 switch (pMap->o32_pageflags)
1426 {
1427 case VALID:
1428 if (pMap->o32_pagesize == OBJPAGELEN)
1429 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1430 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1431 else if (pMap->o32_pagesize < OBJPAGELEN)
1432 {
1433 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1434 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1435 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1436 }
1437 else
1438 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1439 break;
1440
1441 case ITERDATA:
1442 case ITERDATA2:
1443 /* make sure we've got a temp page .*/
1444 if (!pbTmpPage)
1445 {
1446 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1447 if (!pbTmpPage)
1448 break;
1449 }
1450 /* validate the size. */
1451 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1452 {
1453 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1454 break;
1455 }
1456
1457 /* read it and ensure 4 extra zero bytes. */
1458 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1459 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1460 if (rc)
1461 break;
1462 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1463
1464 /* unpack it into the image page. */
1465 if (pMap->o32_pageflags == ITERDATA2)
1466 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1467 else
1468 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1469 break;
1470
1471 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1472 case ZEROED:
1473 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1474 break;
1475
1476 case RANGE:
1477 KLDRMODLX_ASSERT(!"RANGE");
1478 default:
1479 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1480 break;
1481 }
1482 }
1483 if (rc)
1484 break;
1485
1486 /*
1487 * Zero the remaining pages.
1488 */
1489 if (iPage < cPages)
1490 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1491 }
1492
1493 if (pbTmpPage)
1494 kHlpFree(pbTmpPage);
1495 return rc;
1496}
1497
1498
1499/**
1500 * Unpacks iterdata (aka EXEPACK).
1501 *
1502 * @returns 0 on success, non-zero kLdr status code on failure.
1503 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1504 * @param pbSrc The compressed source data.
1505 * @param cbSrc The file size of the compressed data. The source buffer
1506 * contains 4 additional zero bytes.
1507 */
1508static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1509{
1510 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1511 int cbDst = OBJPAGELEN;
1512
1513 /* Validate size of data. */
1514 if (cbSrc >= OBJPAGELEN - 2)
1515 return KLDR_ERR_LX_BAD_ITERDATA;
1516
1517 /*
1518 * Expand the page.
1519 */
1520 while (cbSrc > 0 && pIter->LX_nIter)
1521 {
1522 if (pIter->LX_nBytes == 1)
1523 {
1524 /*
1525 * Special case - one databyte.
1526 */
1527 cbDst -= pIter->LX_nIter;
1528 if (cbDst < 0)
1529 return KLDR_ERR_LX_BAD_ITERDATA;
1530
1531 cbSrc -= 4 + 1;
1532 if (cbSrc < -4)
1533 return KLDR_ERR_LX_BAD_ITERDATA;
1534
1535 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1536 pbDst += pIter->LX_nIter;
1537 pIter++;
1538 }
1539 else
1540 {
1541 /*
1542 * General.
1543 */
1544 int i;
1545
1546 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1547 if (cbDst < 0)
1548 return KLDR_ERR_LX_BAD_ITERDATA;
1549
1550 cbSrc -= 4 + pIter->LX_nBytes;
1551 if (cbSrc < -4)
1552 return KLDR_ERR_LX_BAD_ITERDATA;
1553
1554 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1555 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1556 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1557 }
1558 }
1559
1560 /*
1561 * Zero remainder of the page.
1562 */
1563 if (cbDst > 0)
1564 kHlpMemSet(pbDst, 0, cbDst);
1565
1566 return 0;
1567}
1568
1569
1570/**
1571 * Unpacks iterdata (aka EXEPACK).
1572 *
1573 * @returns 0 on success, non-zero kLdr status code on failure.
1574 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1575 * @param pbSrc The compressed source data.
1576 * @param cbSrc The file size of the compressed data. The source buffer
1577 * contains 4 additional zero bytes.
1578 */
1579static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1580{
1581 int cbDst = OBJPAGELEN;
1582
1583 while (cbSrc > 0)
1584 {
1585 /*
1586 * Bit 0 and 1 is the encoding type.
1587 */
1588 switch (*pbSrc & 0x03)
1589 {
1590 /*
1591 *
1592 * 0 1 2 3 4 5 6 7
1593 * type | |
1594 * ----------------
1595 * cb <cb bytes of data>
1596 *
1597 * Bits 2-7 is, if not zero, the length of an uncompressed run
1598 * starting at the following byte.
1599 *
1600 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1601 * type | | | | | |
1602 * ---------------- ---------------------- -----------------------
1603 * zero cb char to multiply
1604 *
1605 * If the bits are zero, the following two bytes describes a 1 byte interation
1606 * run. First byte is count, second is the byte to copy. A count of zero is
1607 * means end of data, and we simply stops. In that case the rest of the data
1608 * should be zero.
1609 */
1610 case 0:
1611 {
1612 if (*pbSrc)
1613 {
1614 const int cb = *pbSrc >> 2;
1615 cbDst -= cb;
1616 if (cbDst < 0)
1617 return KLDR_ERR_LX_BAD_ITERDATA2;
1618 cbSrc -= cb + 1;
1619 if (cbSrc < 0)
1620 return KLDR_ERR_LX_BAD_ITERDATA2;
1621 kHlpMemCopy(pbDst, ++pbSrc, cb);
1622 pbDst += cb;
1623 pbSrc += cb;
1624 }
1625 else if (cbSrc < 2)
1626 return KLDR_ERR_LX_BAD_ITERDATA2;
1627 else
1628 {
1629 const int cb = pbSrc[1];
1630 if (!cb)
1631 goto l_endloop;
1632 cbDst -= cb;
1633 if (cbDst < 0)
1634 return KLDR_ERR_LX_BAD_ITERDATA2;
1635 cbSrc -= 3;
1636 if (cbSrc < 0)
1637 return KLDR_ERR_LX_BAD_ITERDATA2;
1638 kHlpMemSet(pbDst, pbSrc[2], cb);
1639 pbDst += cb;
1640 pbSrc += 3;
1641 }
1642 break;
1643 }
1644
1645
1646 /*
1647 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1648 * type | | | | | |
1649 * ---- ------- -------------------------
1650 * cb1 cb2 - 3 offset <cb1 bytes of data>
1651 *
1652 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1653 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1654 * data relative to the current position. The data copied as you would expect it to be.
1655 */
1656 case 1:
1657 {
1658 cbSrc -= 2;
1659 if (cbSrc < 0)
1660 return KLDR_ERR_LX_BAD_ITERDATA2;
1661 else
1662 {
1663 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1664 const int cb1 = (*pbSrc >> 2) & 3;
1665 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1666
1667 pbSrc += 2;
1668 cbSrc -= cb1;
1669 if (cbSrc < 0)
1670 return KLDR_ERR_LX_BAD_ITERDATA2;
1671 cbDst -= cb1;
1672 if (cbDst < 0)
1673 return KLDR_ERR_LX_BAD_ITERDATA2;
1674 kHlpMemCopy(pbDst, pbSrc, cb1);
1675 pbDst += cb1;
1676 pbSrc += cb1;
1677
1678 if (off > OBJPAGELEN - (unsigned)cbDst)
1679 return KLDR_ERR_LX_BAD_ITERDATA2;
1680 cbDst -= cb2;
1681 if (cbDst < 0)
1682 return KLDR_ERR_LX_BAD_ITERDATA2;
1683 kHlpMemMove(pbDst, pbDst - off, cb2);
1684 pbDst += cb2;
1685 }
1686 break;
1687 }
1688
1689
1690 /*
1691 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1692 * type | | | |
1693 * ---- ----------------------------------
1694 * cb-3 offset
1695 *
1696 * Two bytes layed out as described above.
1697 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1698 * data relative to the current position.
1699 *
1700 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1701 */
1702 case 2:
1703 {
1704 cbSrc -= 2;
1705 if (cbSrc < 0)
1706 return KLDR_ERR_LX_BAD_ITERDATA2;
1707 else
1708 {
1709 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1710 const int cb = ((*pbSrc >> 2) & 3) + 3;
1711
1712 pbSrc += 2;
1713 if (off > OBJPAGELEN - (unsigned)cbDst)
1714 return KLDR_ERR_LX_BAD_ITERDATA2;
1715 cbDst -= cb;
1716 if (cbDst < 0)
1717 return KLDR_ERR_LX_BAD_ITERDATA2;
1718 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1719 pbDst += cb;
1720 }
1721 break;
1722 }
1723
1724
1725 /*
1726 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1727 * type | | | | | |
1728 * ---------- ---------------- ----------------------------------
1729 * cb1 cb2 offset <cb1 bytes of data>
1730 *
1731 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1732 * The cb2 and offset describes an amount of data to be copied from the expanded
1733 * data relative to the current position.
1734 *
1735 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1736 */
1737 case 3:
1738 {
1739 cbSrc -= 3;
1740 if (cbSrc < 0)
1741 return KLDR_ERR_LX_BAD_ITERDATA2;
1742 else
1743 {
1744 const int cb1 = (*pbSrc >> 2) & 0xf;
1745 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1746 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1747
1748 pbSrc += 3;
1749 cbSrc -= cb1;
1750 if (cbSrc < 0)
1751 return KLDR_ERR_LX_BAD_ITERDATA2;
1752 cbDst -= cb1;
1753 if (cbDst < 0)
1754 return KLDR_ERR_LX_BAD_ITERDATA2;
1755 kHlpMemCopy(pbDst, pbSrc, cb1);
1756 pbDst += cb1;
1757 pbSrc += cb1;
1758
1759 if (off > OBJPAGELEN - (unsigned)cbDst)
1760 return KLDR_ERR_LX_BAD_ITERDATA2;
1761 cbDst -= cb2;
1762 if (cbDst < 0)
1763 return KLDR_ERR_LX_BAD_ITERDATA2;
1764 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1765 pbDst += cb2;
1766 }
1767 break;
1768 }
1769 } /* type switch. */
1770 } /* unpack loop */
1771
1772l_endloop:
1773
1774
1775 /*
1776 * Zero remainder of the page.
1777 */
1778 if (cbDst > 0)
1779 kHlpMemSet(pbDst, 0, cbDst);
1780
1781 return 0;
1782}
1783
1784
1785/**
1786 * Special memcpy employed by the iterdata2 algorithm.
1787 *
1788 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1789 * has if src is very close to the destination.
1790 *
1791 * @param pbDst Destination pointer.
1792 * @param pbSrc Source pointer. Will always be <= pbDst.
1793 * @param cb Amount of data to be copied.
1794 * @remark This assumes that unaligned word and dword access is fine.
1795 */
1796static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1797{
1798 switch (pbDst - pbSrc)
1799 {
1800 case 0:
1801 case 1:
1802 case 2:
1803 case 3:
1804 /* 16-bit copy (unaligned) */
1805 if (cb & 1)
1806 *pbDst++ = *pbSrc++;
1807 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1808 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1809 break;
1810
1811 default:
1812 /* 32-bit copy (unaligned) */
1813 if (cb & 1)
1814 *pbDst++ = *pbSrc++;
1815 if (cb & 2)
1816 {
1817 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1818 pbDst += 2;
1819 pbSrc += 2;
1820 }
1821 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1822 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1823 break;
1824 }
1825}
1826
1827
1828/**
1829 * Unprotects or protects the specified image mapping.
1830 *
1831 * @returns 0 on success.
1832 * @returns non-zero kLdr or OS status code on failure.
1833 *
1834 * @param pModLX The LX module interpreter instance.
1835 * @param pvBits The mapping to protect.
1836 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1837 * protect according to the object table.
1838 */
1839static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1840{
1841 KU32 i;
1842 PKLDRMOD pMod = pModLX->pMod;
1843
1844 /*
1845 * Change object protection.
1846 */
1847 for (i = 0; i < pMod->cSegments; i++)
1848 {
1849 int rc;
1850 void *pv;
1851 KPROT enmProt;
1852
1853 /* calc new protection. */
1854 enmProt = pMod->aSegments[i].enmProt;
1855 if (fUnprotectOrProtect)
1856 {
1857 switch (enmProt)
1858 {
1859 case KPROT_NOACCESS:
1860 case KPROT_READONLY:
1861 case KPROT_READWRITE:
1862 case KPROT_WRITECOPY:
1863 enmProt = KPROT_READWRITE;
1864 break;
1865 case KPROT_EXECUTE:
1866 case KPROT_EXECUTE_READ:
1867 case KPROT_EXECUTE_READWRITE:
1868 case KPROT_EXECUTE_WRITECOPY:
1869 enmProt = KPROT_EXECUTE_READWRITE;
1870 break;
1871 default:
1872 KLDRMODLX_ASSERT(!"bad enmProt");
1873 return -1;
1874 }
1875 }
1876 else
1877 {
1878 /* copy on write -> normal write. */
1879 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1880 enmProt = KPROT_EXECUTE_READWRITE;
1881 else if (enmProt == KPROT_WRITECOPY)
1882 enmProt = KPROT_READWRITE;
1883 }
1884
1885
1886 /* calc the address and set page protection. */
1887 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1888
1889 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1890 if (rc)
1891 break;
1892
1893 /** @todo the gap page should be marked NOACCESS! */
1894 }
1895
1896 return 0;
1897}
1898
1899
1900/** @copydoc kLdrModUnmap */
1901static int kldrModLXUnmap(PKLDRMOD pMod)
1902{
1903 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1904 KU32 i;
1905 int rc;
1906
1907 /*
1908 * Mapped?
1909 */
1910 if (!pModLX->pvMapping)
1911 return KLDR_ERR_NOT_MAPPED;
1912
1913 /*
1914 * Free the mapping and update the segments.
1915 */
1916 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1917 KLDRMODLX_ASSERT(!rc);
1918 pModLX->pvMapping = NULL;
1919
1920 for (i = 0; i < pMod->cSegments; i++)
1921 pMod->aSegments[i].MapAddress = 0;
1922
1923 return rc;
1924}
1925
1926
1927/** @copydoc kLdrModAllocTLS */
1928static int kldrModLXAllocTLS(PKLDRMOD pMod)
1929{
1930 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1931
1932 /* no tls, just do the error checking. */
1933 if (!pModLX->pvMapping)
1934 return KLDR_ERR_NOT_MAPPED;
1935 return 0;
1936}
1937
1938
1939/** @copydoc kLdrModFreeTLS */
1940static void kldrModLXFreeTLS(PKLDRMOD pMod)
1941{
1942 /* no tls. */
1943}
1944
1945
1946/** @copydoc kLdrModReload */
1947static int kldrModLXReload(PKLDRMOD pMod)
1948{
1949 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1950 int rc, rc2;
1951
1952 /*
1953 * Mapped?
1954 */
1955 if (!pModLX->pvMapping)
1956 return KLDR_ERR_NOT_MAPPED;
1957
1958 /*
1959 * Before doing anything we'll have to make all pages writable.
1960 */
1961 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1962 if (rc)
1963 return rc;
1964
1965 /*
1966 * Load the bits again.
1967 */
1968 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1969
1970 /*
1971 * Restore protection.
1972 */
1973 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1974 if (!rc && rc2)
1975 rc = rc2;
1976 return rc;
1977}
1978
1979
1980/** @copydoc kLdrModFixupMapping */
1981static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1982{
1983 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1984 int rc, rc2;
1985
1986 /*
1987 * Mapped?
1988 */
1989 if (!pModLX->pvMapping)
1990 return KLDR_ERR_NOT_MAPPED;
1991
1992 /*
1993 * Before doing anything we'll have to make all pages writable.
1994 */
1995 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1996 if (rc)
1997 return rc;
1998
1999 /*
2000 * Apply fixups and resolve imports.
2001 */
2002 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
2003 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2004
2005 /*
2006 * Restore protection.
2007 */
2008 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
2009 if (!rc && rc2)
2010 rc = rc2;
2011 return rc;
2012}
2013
2014
2015/** @copydoc kLdrModCallInit */
2016static int kldrModLXCallInit(PKLDRMOD pMod, KUPTR uHandle)
2017{
2018 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2019 int rc;
2020
2021 /*
2022 * Mapped?
2023 */
2024 if (!pModLX->pvMapping)
2025 return KLDR_ERR_NOT_MAPPED;
2026
2027 /*
2028 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2029 */
2030 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2031 rc = kldrModLXDoCallDLL(pModLX, 0 /* attach */, uHandle);
2032 else
2033 rc = 0;
2034 return rc;
2035}
2036
2037
2038/**
2039 * Call the DLL entrypoint.
2040 *
2041 * @returns 0 on success.
2042 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2043 * @param pModLX The LX module interpreter instance.
2044 * @param uOp The operation (DLL_*).
2045 * @param uHandle The module handle to present.
2046 */
2047static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle)
2048{
2049 int rc;
2050
2051 /*
2052 * If no entrypoint there isn't anything to be done.
2053 */
2054 if ( !pModLX->Hdr.e32_startobj
2055 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2056 return 0;
2057
2058 /*
2059 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2060 */
2061 rc = kldrModLXDoCall((KUPTR)pModLX->pvMapping
2062 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2063 + pModLX->Hdr.e32_eip,
2064 uHandle, uOp, NULL);
2065 if (rc)
2066 rc = 0;
2067 else if (uOp == 0 /* attach */)
2068 rc = KLDR_ERR_MODULE_INIT_FAILED;
2069 else /* detach: ignore failures */
2070 rc = 0;
2071 return rc;
2072}
2073
2074
2075/**
2076 * Do a 3 parameter callback.
2077 *
2078 * @returns 32-bit callback return.
2079 * @param uEntrypoint The address of the function to be called.
2080 * @param uHandle The first argument, the module handle.
2081 * @param uOp The second argumnet, the reason we're calling.
2082 * @param pvReserved The third argument, reserved argument. (figure this one out)
2083 */
2084static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2085{
2086#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2087 KI32 rc;
2088/** @todo try/except */
2089
2090 /*
2091 * Paranoia.
2092 */
2093# ifdef __GNUC__
2094 __asm__ __volatile__(
2095 "pushl %2\n\t"
2096 "pushl %1\n\t"
2097 "pushl %0\n\t"
2098 "lea 12(%%esp), %2\n\t"
2099 "call *%3\n\t"
2100 "movl %2, %%esp\n\t"
2101 : "=a" (rc)
2102 : "d" (uOp),
2103 "S" (0),
2104 "c" (uEntrypoint),
2105 "0" (uHandle));
2106# elif defined(_MSC_VER)
2107 __asm {
2108 mov eax, [uHandle]
2109 mov edx, [uOp]
2110 mov ecx, 0
2111 mov ebx, [uEntrypoint]
2112 push edi
2113 mov edi, esp
2114 push ecx
2115 push edx
2116 push eax
2117 call ebx
2118 mov esp, edi
2119 pop edi
2120 mov [rc], eax
2121 }
2122# else
2123# error "port me!"
2124# endif
2125 return rc;
2126
2127#else
2128 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2129#endif
2130}
2131
2132
2133/** @copydoc kLdrModCallTerm */
2134static int kldrModLXCallTerm(PKLDRMOD pMod, KUPTR uHandle)
2135{
2136 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2137
2138 /*
2139 * Mapped?
2140 */
2141 if (!pModLX->pvMapping)
2142 return KLDR_ERR_NOT_MAPPED;
2143
2144 /*
2145 * Do the call.
2146 */
2147 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2148 kldrModLXDoCallDLL(pModLX, 1 /* detach */, uHandle);
2149
2150 return 0;
2151}
2152
2153
2154/** @copydoc kLdrModCallThread */
2155static int kldrModLXCallThread(PKLDRMOD pMod, KUPTR uHandle, unsigned fAttachingOrDetaching)
2156{
2157 /* no thread attach/detach callout. */
2158 return 0;
2159}
2160
2161
2162/** @copydoc kLdrModSize */
2163static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2164{
2165 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2166 return pModLX->cbMapped;
2167}
2168
2169
2170/** @copydoc kLdrModGetBits */
2171static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2172{
2173 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2174 int rc;
2175
2176 /*
2177 * Load the image bits.
2178 */
2179 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2180 if (rc)
2181 return rc;
2182
2183 /*
2184 * Perform relocations.
2185 */
2186 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2187
2188}
2189
2190
2191/** @copydoc kLdrModRelocateBits */
2192static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2193 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2194{
2195 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2196 KU32 iSeg;
2197 int rc;
2198
2199 /*
2200 * Do we need to to *anything*?
2201 */
2202 if ( NewBaseAddress == OldBaseAddress
2203 && NewBaseAddress == pModLX->paObjs[0].o32_base
2204 && !pModLX->Hdr.e32_impmodcnt)
2205 return 0;
2206
2207 /*
2208 * Load the fixup section.
2209 */
2210 if (!pModLX->pbFixupSection)
2211 {
2212 rc = kldrModLXDoLoadFixupSection(pModLX);
2213 if (rc)
2214 return rc;
2215 }
2216
2217 /*
2218 * Iterate the segments.
2219 */
2220 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2221 {
2222 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2223 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2224 KU32 iPage;
2225 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2226
2227 /*
2228 * Iterate the page map pages.
2229 */
2230 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2231 {
2232 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2233 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2234 KLDRADDR uValue;
2235 int iSelector;
2236 KU32 fKind;
2237
2238 /* sanity */
2239 if (pbFixupRecEnd < pb)
2240 return KLDR_ERR_BAD_FIXUP;
2241 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2242 return KLDR_ERR_BAD_FIXUP;
2243 if (pb < pModLX->pbFixupSection)
2244 return KLDR_ERR_BAD_FIXUP;
2245
2246 /*
2247 * Iterate the fixup record.
2248 */
2249 while (pb < pbFixupRecEnd)
2250 {
2251 union _rel
2252 {
2253 const KU8 * pb;
2254 const struct r32_rlc *prlc;
2255 } u;
2256
2257 u.pb = pb;
2258 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2259
2260 /*
2261 * Figure out the target.
2262 */
2263 switch (u.prlc->nr_flags & NRRTYP)
2264 {
2265 /*
2266 * Internal fixup.
2267 */
2268 case NRRINT:
2269 {
2270 KU16 iTrgObject;
2271 KU32 offTrgObject;
2272
2273 /* the object */
2274 if (u.prlc->nr_flags & NR16OBJMOD)
2275 {
2276 iTrgObject = *(const KU16 *)pb;
2277 pb += 2;
2278 }
2279 else
2280 iTrgObject = *pb++;
2281 iTrgObject--;
2282 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2283 return KLDR_ERR_BAD_FIXUP;
2284
2285 /* the target */
2286 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2287 {
2288 if (u.prlc->nr_flags & NR32BITOFF)
2289 {
2290 offTrgObject = *(const KU32 *)pb;
2291 pb += 4;
2292 }
2293 else
2294 {
2295 offTrgObject = *(const KU16 *)pb;
2296 pb += 2;
2297 }
2298
2299 /* calculate the symbol info. */
2300 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2301 }
2302 else
2303 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2304 if ( (u.prlc->nr_stype & NRALIAS)
2305 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2306 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2307 else
2308 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2309 fKind = 0;
2310 break;
2311 }
2312
2313 /*
2314 * Import by symbol ordinal.
2315 */
2316 case NRRORD:
2317 {
2318 KU16 iModule;
2319 KU32 iSymbol;
2320
2321 /* the module ordinal */
2322 if (u.prlc->nr_flags & NR16OBJMOD)
2323 {
2324 iModule = *(const KU16 *)pb;
2325 pb += 2;
2326 }
2327 else
2328 iModule = *pb++;
2329 iModule--;
2330 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2331 return KLDR_ERR_BAD_FIXUP;
2332#if 1
2333 if (u.prlc->nr_flags & NRICHAIN)
2334 return KLDR_ERR_BAD_FIXUP;
2335#endif
2336
2337 /* . */
2338 if (u.prlc->nr_flags & NR32BITOFF)
2339 {
2340 iSymbol = *(const KU32 *)pb;
2341 pb += 4;
2342 }
2343 else if (!(u.prlc->nr_flags & NR8BITORD))
2344 {
2345 iSymbol = *(const KU16 *)pb;
2346 pb += 2;
2347 }
2348 else
2349 iSymbol = *pb++;
2350
2351 /* resolve it. */
2352 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2353 if (rc)
2354 return rc;
2355 iSelector = -1;
2356 break;
2357 }
2358
2359 /*
2360 * Import by symbol name.
2361 */
2362 case NRRNAM:
2363 {
2364 KU32 iModule;
2365 KU16 offSymbol;
2366 const KU8 *pbSymbol;
2367
2368 /* the module ordinal */
2369 if (u.prlc->nr_flags & NR16OBJMOD)
2370 {
2371 iModule = *(const KU16 *)pb;
2372 pb += 2;
2373 }
2374 else
2375 iModule = *pb++;
2376 iModule--;
2377 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2378 return KLDR_ERR_BAD_FIXUP;
2379#if 1
2380 if (u.prlc->nr_flags & NRICHAIN)
2381 return KLDR_ERR_BAD_FIXUP;
2382#endif
2383
2384 /* . */
2385 if (u.prlc->nr_flags & NR32BITOFF)
2386 {
2387 offSymbol = *(const KU32 *)pb;
2388 pb += 4;
2389 }
2390 else if (!(u.prlc->nr_flags & NR8BITORD))
2391 {
2392 offSymbol = *(const KU16 *)pb;
2393 pb += 2;
2394 }
2395 else
2396 offSymbol = *pb++;
2397 pbSymbol = pModLX->pbImportProcs + offSymbol;
2398 if ( pbSymbol < pModLX->pbImportProcs
2399 || pbSymbol > pModLX->pbFixupSectionLast)
2400 return KLDR_ERR_BAD_FIXUP;
2401
2402 /* resolve it. */
2403 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2404 &uValue, &fKind, pvUser);
2405 if (rc)
2406 return rc;
2407 iSelector = -1;
2408 break;
2409 }
2410
2411 case NRRENT:
2412 KLDRMODLX_ASSERT(!"NRRENT");
2413 default:
2414 iSelector = -1;
2415 break;
2416 }
2417
2418 /* addend */
2419 if (u.prlc->nr_flags & NRADD)
2420 {
2421 if (u.prlc->nr_flags & NR32BITADD)
2422 {
2423 uValue += *(const KU32 *)pb;
2424 pb += 4;
2425 }
2426 else
2427 {
2428 uValue += *(const KU16 *)pb;
2429 pb += 2;
2430 }
2431 }
2432
2433
2434 /*
2435 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2436 */
2437 if (!(u.prlc->nr_stype & NRCHAIN))
2438 {
2439 int off = u.prlc->r32_soff;
2440
2441 /* common / simple */
2442 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2443 && off >= 0
2444 && off <= OBJPAGELEN - 4)
2445 *(KU32 *)&pbPage[off] = uValue;
2446 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2447 && off >= 0
2448 && off <= OBJPAGELEN - 4)
2449 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2450 else
2451 {
2452 /* generic */
2453 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2454 if (rc)
2455 return rc;
2456 }
2457 }
2458 else if (!(u.prlc->nr_flags & NRICHAIN))
2459 {
2460 const KI16 *poffSrc = (const KI16 *)pb;
2461 KU8 c = u.pb[2];
2462
2463 /* common / simple */
2464 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2465 {
2466 while (c-- > 0)
2467 {
2468 int off = *poffSrc++;
2469 if (off >= 0 && off <= OBJPAGELEN - 4)
2470 *(KU32 *)&pbPage[off] = uValue;
2471 else
2472 {
2473 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2474 if (rc)
2475 return rc;
2476 }
2477 }
2478 }
2479 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2480 {
2481 while (c-- > 0)
2482 {
2483 int off = *poffSrc++;
2484 if (off >= 0 && off <= OBJPAGELEN - 4)
2485 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2486 else
2487 {
2488 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2489 if (rc)
2490 return rc;
2491 }
2492 }
2493 }
2494 else
2495 {
2496 while (c-- > 0)
2497 {
2498 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2499 if (rc)
2500 return rc;
2501 }
2502 }
2503 pb = (const KU8 *)poffSrc;
2504 }
2505 else
2506 {
2507 /* This is a pain because it will require virgin pages on a relocation. */
2508 KLDRMODLX_ASSERT(!"NRICHAIN");
2509 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2510 }
2511 }
2512 }
2513 }
2514
2515 return 0;
2516}
2517
2518
2519/**
2520 * Applies the relocation to one 'source' in a page.
2521 *
2522 * This takes care of the more esotic case while the common cases
2523 * are dealt with seperately.
2524 *
2525 * @returns 0 on success, non-zero kLdr status code on failure.
2526 * @param pbPage The page in which to apply the fixup.
2527 * @param off Page relative offset of where to apply the offset.
2528 * @param uValue The target value.
2529 * @param fKind The target kind.
2530 */
2531static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2532 int iSelector, KLDRADDR uValue, KU32 fKind)
2533{
2534#pragma pack(1) /* just to be sure */
2535 union
2536 {
2537 KU8 ab[6];
2538 KU32 off32;
2539 KU16 off16;
2540 KU8 off8;
2541 struct
2542 {
2543 KU16 off;
2544 KU16 Sel;
2545 } Far16;
2546 struct
2547 {
2548 KU32 off;
2549 KU16 Sel;
2550 } Far32;
2551 } uData;
2552#pragma pack()
2553 const KU8 *pbSrc;
2554 KU8 *pbDst;
2555 KU8 cb;
2556
2557 /*
2558 * Compose the fixup data.
2559 */
2560 switch (prlc->nr_stype & NRSRCMASK)
2561 {
2562 case NRSBYT:
2563 uData.off8 = (KU8)uValue;
2564 cb = 1;
2565 break;
2566 case NRSSEG:
2567 if (iSelector == -1)
2568 {
2569 /* fixme */
2570 }
2571 uData.off16 = iSelector;
2572 cb = 2;
2573 break;
2574 case NRSPTR:
2575 if (iSelector == -1)
2576 {
2577 /* fixme */
2578 }
2579 uData.Far16.off = (KU16)uValue;
2580 uData.Far16.Sel = iSelector;
2581 cb = 4;
2582 break;
2583 case NRSOFF:
2584 uData.off16 = (KU16)uValue;
2585 cb = 2;
2586 break;
2587 case NRPTR48:
2588 if (iSelector == -1)
2589 {
2590 /* fixme */
2591 }
2592 uData.Far32.off = (KU32)uValue;
2593 uData.Far32.Sel = iSelector;
2594 cb = 6;
2595 break;
2596 case NROFF32:
2597 uData.off32 = (KU32)uValue;
2598 cb = 4;
2599 break;
2600 case NRSOFF32:
2601 uData.off32 = (KU32)uValue - (PageAddress + off + 4);
2602 cb = 4;
2603 break;
2604 default:
2605 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2606 }
2607
2608 /*
2609 * Apply it. This is sloooow...
2610 */
2611 pbSrc = &uData.ab[0];
2612 pbDst = pbPage + off;
2613 while (cb-- > 0)
2614 {
2615 if (off > OBJPAGELEN)
2616 break;
2617 if (off >= 0)
2618 *pbDst = *pbSrc;
2619 pbSrc++;
2620 pbDst++;
2621 }
2622
2623 return 0;
2624}
2625
2626
2627/**
2628 * The LX module interpreter method table.
2629 */
2630KLDRMODOPS g_kLdrModLXOps =
2631{
2632 "LX",
2633 NULL,
2634 kldrModLXCreate,
2635 kldrModLXDestroy,
2636 kldrModLXQuerySymbol,
2637 kldrModLXEnumSymbols,
2638 kldrModLXGetImport,
2639 kldrModLXNumberOfImports,
2640 NULL /* can execute one is optional */,
2641 kldrModLXGetStackInfo,
2642 kldrModLXQueryMainEntrypoint,
2643 NULL /* pfnQueryImageUuid */,
2644 NULL /* fixme */,
2645 NULL /* fixme */,
2646 kldrModLXEnumDbgInfo,
2647 kldrModLXHasDbgInfo,
2648 kldrModLXMap,
2649 kldrModLXUnmap,
2650 kldrModLXAllocTLS,
2651 kldrModLXFreeTLS,
2652 kldrModLXReload,
2653 kldrModLXFixupMapping,
2654 kldrModLXCallInit,
2655 kldrModLXCallTerm,
2656 kldrModLXCallThread,
2657 kldrModLXSize,
2658 kldrModLXGetBits,
2659 kldrModLXRelocateBits,
2660 NULL /* fixme: pfnMostlyDone */,
2661 42 /* the end */
2662};
2663
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette