1 | # -*- coding: utf-8 -*-
|
---|
2 | # $Id: storagecfg.py 93115 2022-01-01 11:31:46Z vboxsync $
|
---|
3 |
|
---|
4 | """
|
---|
5 | VirtualBox Validation Kit - Storage test configuration API.
|
---|
6 | """
|
---|
7 |
|
---|
8 | __copyright__ = \
|
---|
9 | """
|
---|
10 | Copyright (C) 2016-2022 Oracle Corporation
|
---|
11 |
|
---|
12 | This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
13 | available from http://www.virtualbox.org. This file is free software;
|
---|
14 | you can redistribute it and/or modify it under the terms of the GNU
|
---|
15 | General Public License (GPL) as published by the Free Software
|
---|
16 | Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
17 | VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
18 | hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
19 |
|
---|
20 | The contents of this file may alternatively be used under the terms
|
---|
21 | of the Common Development and Distribution License Version 1.0
|
---|
22 | (CDDL) only, as it comes in the "COPYING.CDDL" file of the
|
---|
23 | VirtualBox OSE distribution, in which case the provisions of the
|
---|
24 | CDDL are applicable instead of those of the GPL.
|
---|
25 |
|
---|
26 | You may elect to license modified versions of this file under the
|
---|
27 | terms and conditions of either the GPL or the CDDL or both.
|
---|
28 | """
|
---|
29 | __version__ = "$Revision: 93115 $"
|
---|
30 |
|
---|
31 | # Standard Python imports.
|
---|
32 | import os;
|
---|
33 | import re;
|
---|
34 |
|
---|
35 |
|
---|
36 | class StorageDisk(object):
|
---|
37 | """
|
---|
38 | Class representing a disk for testing.
|
---|
39 | """
|
---|
40 |
|
---|
41 | def __init__(self, sPath, fRamDisk = False):
|
---|
42 | self.sPath = sPath;
|
---|
43 | self.fUsed = False;
|
---|
44 | self.fRamDisk = fRamDisk;
|
---|
45 |
|
---|
46 | def getPath(self):
|
---|
47 | """
|
---|
48 | Return the disk path.
|
---|
49 | """
|
---|
50 | return self.sPath;
|
---|
51 |
|
---|
52 | def isUsed(self):
|
---|
53 | """
|
---|
54 | Returns whether the disk is currently in use.
|
---|
55 | """
|
---|
56 | return self.fUsed;
|
---|
57 |
|
---|
58 | def isRamDisk(self):
|
---|
59 | """
|
---|
60 | Returns whether the disk objecthas a RAM backing.
|
---|
61 | """
|
---|
62 | return self.fRamDisk;
|
---|
63 |
|
---|
64 | def setUsed(self, fUsed):
|
---|
65 | """
|
---|
66 | Sets the used flag for the disk.
|
---|
67 | """
|
---|
68 | if fUsed:
|
---|
69 | if self.fUsed:
|
---|
70 | return False;
|
---|
71 |
|
---|
72 | self.fUsed = True;
|
---|
73 | else:
|
---|
74 | self.fUsed = fUsed;
|
---|
75 |
|
---|
76 | return True;
|
---|
77 |
|
---|
78 | class StorageConfigOs(object):
|
---|
79 | """
|
---|
80 | Base class for a single hosts OS storage configuration.
|
---|
81 | """
|
---|
82 |
|
---|
83 | def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
|
---|
84 | """
|
---|
85 | Adds new disks to the config matching the given regular expression.
|
---|
86 | """
|
---|
87 |
|
---|
88 | lstDisks = [];
|
---|
89 | oRegExp = re.compile(sRegExp);
|
---|
90 | asFiles = os.listdir(sPath);
|
---|
91 | for sFile in asFiles:
|
---|
92 | if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
|
---|
93 | lstDisks.append(StorageDisk(sPath + '/' + sFile));
|
---|
94 |
|
---|
95 | return lstDisks;
|
---|
96 |
|
---|
97 | class StorageConfigOsSolaris(StorageConfigOs):
|
---|
98 | """
|
---|
99 | Class implementing the Solaris specifics for a storage configuration.
|
---|
100 | """
|
---|
101 |
|
---|
102 | def __init__(self):
|
---|
103 | StorageConfigOs.__init__(self);
|
---|
104 | self.idxRamDisk = 0;
|
---|
105 |
|
---|
106 | def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
|
---|
107 | """
|
---|
108 | Returns a list of pools starting with the given ID or None on failure.
|
---|
109 | """
|
---|
110 | lstPools = None;
|
---|
111 | fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
|
---|
112 | if fRc:
|
---|
113 | lstPools = [];
|
---|
114 | asPools = sOutput.splitlines();
|
---|
115 | for sPool in asPools:
|
---|
116 | if sPool.startswith(sPoolIdStart):
|
---|
117 | # Extract the whole name and add it to the list.
|
---|
118 | asItems = sPool.split('\t');
|
---|
119 | lstPools.append(asItems[0]);
|
---|
120 | return lstPools;
|
---|
121 |
|
---|
122 | def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
|
---|
123 | """
|
---|
124 | Returns a list of active volumes for the given pool starting with the given
|
---|
125 | identifier or None on failure.
|
---|
126 | """
|
---|
127 | lstVolumes = None;
|
---|
128 | fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
|
---|
129 | if fRc:
|
---|
130 | lstVolumes = [];
|
---|
131 | asVolumes = sOutput.splitlines();
|
---|
132 | for sVolume in asVolumes:
|
---|
133 | if sVolume.startswith(sPool + '/' + sVolumeIdStart):
|
---|
134 | # Extract the whole name and add it to the list.
|
---|
135 | asItems = sVolume.split('\t');
|
---|
136 | lstVolumes.append(asItems[0]);
|
---|
137 | return lstVolumes;
|
---|
138 |
|
---|
139 | def getDisksMatchingRegExp(self, sRegExp):
|
---|
140 | """
|
---|
141 | Returns a list of disks matching the regular expression.
|
---|
142 | """
|
---|
143 | return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
|
---|
144 |
|
---|
145 | def getMntBase(self):
|
---|
146 | """
|
---|
147 | Returns the mountpoint base for the host.
|
---|
148 | """
|
---|
149 | return '/pools';
|
---|
150 |
|
---|
151 | def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
|
---|
152 | """
|
---|
153 | Creates a new storage pool with the given disks and the given RAID level.
|
---|
154 | """
|
---|
155 | sZPoolRaid = None;
|
---|
156 | if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
|
---|
157 | sZPoolRaid = 'raidz';
|
---|
158 |
|
---|
159 | fRc = True;
|
---|
160 | if sZPoolRaid is not None:
|
---|
161 | fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
|
---|
162 | else:
|
---|
163 | fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
|
---|
164 |
|
---|
165 | return fRc;
|
---|
166 |
|
---|
167 | def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
|
---|
168 | """
|
---|
169 | Creates and mounts a filesystem at the given mountpoint using the
|
---|
170 | given pool and volume IDs.
|
---|
171 | """
|
---|
172 | fRc = True;
|
---|
173 | if cbVol is not None:
|
---|
174 | fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
|
---|
175 | else:
|
---|
176 | fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
|
---|
177 |
|
---|
178 | # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
|
---|
179 | # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
|
---|
180 | # No one noticed though because testboxstor1 went out of action before...
|
---|
181 | # Will get fixed as soon as I'm back home.
|
---|
182 | if fRc:
|
---|
183 | fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
|
---|
184 |
|
---|
185 | return fRc;
|
---|
186 |
|
---|
187 | def destroyVolume(self, oExec, sPool, sVol):
|
---|
188 | """
|
---|
189 | Destroys the given volume.
|
---|
190 | """
|
---|
191 | fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
|
---|
192 | return fRc;
|
---|
193 |
|
---|
194 | def destroyPool(self, oExec, sPool):
|
---|
195 | """
|
---|
196 | Destroys the given storage pool.
|
---|
197 | """
|
---|
198 | fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
|
---|
199 | return fRc;
|
---|
200 |
|
---|
201 | def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
|
---|
202 | """
|
---|
203 | Cleans up any pools and volumes starting with the name in the given
|
---|
204 | parameters.
|
---|
205 | """
|
---|
206 | fRc = True;
|
---|
207 | lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
|
---|
208 | if lstPools is not None:
|
---|
209 | for sPool in lstPools:
|
---|
210 | lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
|
---|
211 | if lstVolumes is not None:
|
---|
212 | # Destroy all the volumes first
|
---|
213 | for sVolume in lstVolumes:
|
---|
214 | fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
|
---|
215 | if not fRc2:
|
---|
216 | fRc = fRc2;
|
---|
217 |
|
---|
218 | # Destroy the pool
|
---|
219 | fRc2 = self.destroyPool(oExec, sPool);
|
---|
220 | if not fRc2:
|
---|
221 | fRc = fRc2;
|
---|
222 | else:
|
---|
223 | fRc = False;
|
---|
224 | else:
|
---|
225 | fRc = False;
|
---|
226 |
|
---|
227 | return fRc;
|
---|
228 |
|
---|
229 | def createRamDisk(self, oExec, cbRamDisk):
|
---|
230 | """
|
---|
231 | Creates a RAM backed disk with the given size.
|
---|
232 | """
|
---|
233 | oDisk = None;
|
---|
234 | sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
|
---|
235 | fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
|
---|
236 | if fRc:
|
---|
237 | self.idxRamDisk += 1;
|
---|
238 | oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
|
---|
239 |
|
---|
240 | return oDisk;
|
---|
241 |
|
---|
242 | def destroyRamDisk(self, oExec, oDisk):
|
---|
243 | """
|
---|
244 | Destroys the given ramdisk object.
|
---|
245 | """
|
---|
246 | sRamDiskName = os.path.basename(oDisk.getPath());
|
---|
247 | return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
|
---|
248 |
|
---|
249 | class StorageConfigOsLinux(StorageConfigOs):
|
---|
250 | """
|
---|
251 | Class implementing the Linux specifics for a storage configuration.
|
---|
252 | """
|
---|
253 |
|
---|
254 | def __init__(self):
|
---|
255 | StorageConfigOs.__init__(self);
|
---|
256 | self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
|
---|
257 | self.dMounts = { }; # Pool/Volume to mountpoint mapping.
|
---|
258 |
|
---|
259 | def _getDmRaidLevelFromLvl(self, sRaidLvl):
|
---|
260 | """
|
---|
261 | Converts our raid level indicators to something mdadm can understand.
|
---|
262 | """
|
---|
263 | if sRaidLvl is None or sRaidLvl == 'raid0':
|
---|
264 | return 'stripe';
|
---|
265 | if sRaidLvl == 'raid5':
|
---|
266 | return '5';
|
---|
267 | if sRaidLvl == 'raid1':
|
---|
268 | return 'mirror';
|
---|
269 | return 'stripe';
|
---|
270 |
|
---|
271 | def getDisksMatchingRegExp(self, sRegExp):
|
---|
272 | """
|
---|
273 | Returns a list of disks matching the regular expression.
|
---|
274 | """
|
---|
275 | return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
|
---|
276 |
|
---|
277 | def getMntBase(self):
|
---|
278 | """
|
---|
279 | Returns the mountpoint base for the host.
|
---|
280 | """
|
---|
281 | return '/mnt';
|
---|
282 |
|
---|
283 | def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
|
---|
284 | """
|
---|
285 | Creates a new storage pool with the given disks and the given RAID level.
|
---|
286 | """
|
---|
287 | fRc = True;
|
---|
288 | if len(asDisks) == 1 and sRaidLvl is None:
|
---|
289 | # Doesn't require LVM, put into the simple pools dictionary so we can
|
---|
290 | # use it when creating a volume later.
|
---|
291 | self.dSimplePools[sPool] = asDisks[0];
|
---|
292 | else:
|
---|
293 | # If a RAID is required use dm-raid first to create one.
|
---|
294 | asLvmPvDisks = asDisks;
|
---|
295 | fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
|
---|
296 | '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
|
---|
297 | '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
|
---|
298 | if fRc:
|
---|
299 | # /dev/md0 is the only block device to use for our volume group.
|
---|
300 | asLvmPvDisks = [ '/dev/md0' ];
|
---|
301 |
|
---|
302 | # Create a physical volume on every disk first.
|
---|
303 | for sLvmPvDisk in asLvmPvDisks:
|
---|
304 | fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
|
---|
305 | if not fRc:
|
---|
306 | break;
|
---|
307 |
|
---|
308 | if fRc:
|
---|
309 | # Create volume group with all physical volumes included
|
---|
310 | fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
|
---|
311 | return fRc;
|
---|
312 |
|
---|
313 | def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
|
---|
314 | """
|
---|
315 | Creates and mounts a filesystem at the given mountpoint using the
|
---|
316 | given pool and volume IDs.
|
---|
317 | """
|
---|
318 | fRc = True;
|
---|
319 | sBlkDev = None;
|
---|
320 | if sPool in self.dSimplePools:
|
---|
321 | sDiskPath = self.dSimplePools.get(sPool);
|
---|
322 | if sDiskPath.find('zram') != -1:
|
---|
323 | sBlkDev = sDiskPath;
|
---|
324 | else:
|
---|
325 | # Create a partition with the requested size
|
---|
326 | sFdiskScript = ';\n'; # Single partition filling everything
|
---|
327 | if cbVol is not None:
|
---|
328 | sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
|
---|
329 | fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
|
---|
330 | sFdiskScript);
|
---|
331 | if fRc:
|
---|
332 | if sDiskPath.find('nvme') != -1:
|
---|
333 | sBlkDev = sDiskPath + 'p1';
|
---|
334 | else:
|
---|
335 | sBlkDev = sDiskPath + '1';
|
---|
336 | else:
|
---|
337 | if cbVol is None:
|
---|
338 | fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
|
---|
339 | else:
|
---|
340 | fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
|
---|
341 | if fRc:
|
---|
342 | sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
|
---|
343 |
|
---|
344 | if fRc is True and sBlkDev is not None:
|
---|
345 | # Create a filesystem and mount it
|
---|
346 | fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
|
---|
347 | fRc = fRc and oExec.mkDir(sMountPoint);
|
---|
348 | fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
|
---|
349 | if fRc:
|
---|
350 | self.dMounts[sPool + '/' + sVol] = sMountPoint;
|
---|
351 | return fRc;
|
---|
352 |
|
---|
353 | def destroyVolume(self, oExec, sPool, sVol):
|
---|
354 | """
|
---|
355 | Destroys the given volume.
|
---|
356 | """
|
---|
357 | # Unmount first
|
---|
358 | sMountPoint = self.dMounts[sPool + '/' + sVol];
|
---|
359 | fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
|
---|
360 | self.dMounts.pop(sPool + '/' + sVol);
|
---|
361 | oExec.rmDir(sMountPoint);
|
---|
362 | if sPool in self.dSimplePools:
|
---|
363 | # Wipe partition table
|
---|
364 | sDiskPath = self.dSimplePools.get(sPool);
|
---|
365 | if sDiskPath.find('zram') == -1:
|
---|
366 | fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
|
---|
367 | sDiskPath));
|
---|
368 | else:
|
---|
369 | fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
|
---|
370 | return fRc;
|
---|
371 |
|
---|
372 | def destroyPool(self, oExec, sPool):
|
---|
373 | """
|
---|
374 | Destroys the given storage pool.
|
---|
375 | """
|
---|
376 | fRc = True;
|
---|
377 | if sPool in self.dSimplePools:
|
---|
378 | self.dSimplePools.pop(sPool);
|
---|
379 | else:
|
---|
380 | fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
|
---|
381 | return fRc;
|
---|
382 |
|
---|
383 | def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
|
---|
384 | """
|
---|
385 | Cleans up any pools and volumes starting with the name in the given
|
---|
386 | parameters.
|
---|
387 | """
|
---|
388 | # @todo: Needs implementation, for LVM based configs a similar approach can be used
|
---|
389 | # as for Solaris.
|
---|
390 | _ = oExec;
|
---|
391 | _ = sPoolIdStart;
|
---|
392 | _ = sVolIdStart;
|
---|
393 | return True;
|
---|
394 |
|
---|
395 | def createRamDisk(self, oExec, cbRamDisk):
|
---|
396 | """
|
---|
397 | Creates a RAM backed disk with the given size.
|
---|
398 | """
|
---|
399 | # Make sure the ZRAM module is loaded.
|
---|
400 | oDisk = None;
|
---|
401 | fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
|
---|
402 | if fRc:
|
---|
403 | fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
|
---|
404 | if fRc:
|
---|
405 | oDisk = StorageDisk(sOut.rstrip(), True);
|
---|
406 |
|
---|
407 | return oDisk;
|
---|
408 |
|
---|
409 | def destroyRamDisk(self, oExec, oDisk):
|
---|
410 | """
|
---|
411 | Destroys the given ramdisk object.
|
---|
412 | """
|
---|
413 | return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
|
---|
414 |
|
---|
415 | ## @name Host disk config types.
|
---|
416 | ## @{
|
---|
417 | g_ksDiskCfgStatic = 'StaticDir';
|
---|
418 | g_ksDiskCfgRegExp = 'RegExp';
|
---|
419 | g_ksDiskCfgList = 'DiskList';
|
---|
420 | ## @}
|
---|
421 |
|
---|
422 | class DiskCfg(object):
|
---|
423 | """
|
---|
424 | Host disk configuration.
|
---|
425 | """
|
---|
426 |
|
---|
427 | def __init__(self, sTargetOs, sCfgType, oDisks):
|
---|
428 | self.sTargetOs = sTargetOs;
|
---|
429 | self.sCfgType = sCfgType;
|
---|
430 | self.oDisks = oDisks;
|
---|
431 |
|
---|
432 | def getTargetOs(self):
|
---|
433 | return self.sTargetOs;
|
---|
434 |
|
---|
435 | def getCfgType(self):
|
---|
436 | return self.sCfgType;
|
---|
437 |
|
---|
438 | def isCfgStaticDir(self):
|
---|
439 | return self.sCfgType == g_ksDiskCfgStatic;
|
---|
440 |
|
---|
441 | def isCfgRegExp(self):
|
---|
442 | return self.sCfgType == g_ksDiskCfgRegExp;
|
---|
443 |
|
---|
444 | def isCfgList(self):
|
---|
445 | return self.sCfgType == g_ksDiskCfgList;
|
---|
446 |
|
---|
447 | def getDisks(self):
|
---|
448 | return self.oDisks;
|
---|
449 |
|
---|
450 | class StorageCfg(object):
|
---|
451 | """
|
---|
452 | Storage configuration helper class taking care of the different host OS.
|
---|
453 | """
|
---|
454 |
|
---|
455 | def __init__(self, oExec, oDiskCfg):
|
---|
456 | self.oExec = oExec;
|
---|
457 | self.lstDisks = [ ]; # List of disks present in the system.
|
---|
458 | self.dPools = { }; # Dictionary of storage pools.
|
---|
459 | self.dVols = { }; # Dictionary of volumes.
|
---|
460 | self.iPoolId = 0;
|
---|
461 | self.iVolId = 0;
|
---|
462 | self.oDiskCfg = oDiskCfg;
|
---|
463 |
|
---|
464 | fRc = True;
|
---|
465 | oStorOs = None;
|
---|
466 | if oDiskCfg.getTargetOs() == 'solaris':
|
---|
467 | oStorOs = StorageConfigOsSolaris();
|
---|
468 | elif oDiskCfg.getTargetOs() == 'linux':
|
---|
469 | oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
|
---|
470 | elif not oDiskCfg.isCfgStaticDir():
|
---|
471 | # For unknown hosts only allow a static testing directory we don't care about setting up
|
---|
472 | fRc = False;
|
---|
473 |
|
---|
474 | if fRc:
|
---|
475 | self.oStorOs = oStorOs;
|
---|
476 | if oDiskCfg.isCfgRegExp():
|
---|
477 | self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
|
---|
478 | elif oDiskCfg.isCfgList():
|
---|
479 | # Assume a list of of disks and add.
|
---|
480 | for sDisk in oDiskCfg.getDisks():
|
---|
481 | self.lstDisks.append(StorageDisk(sDisk));
|
---|
482 | elif oDiskCfg.isCfgStaticDir():
|
---|
483 | if not os.path.exists(oDiskCfg.getDisks()):
|
---|
484 | self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
|
---|
485 |
|
---|
486 | def __del__(self):
|
---|
487 | self.cleanup();
|
---|
488 | self.oDiskCfg = None;
|
---|
489 |
|
---|
490 | def cleanup(self):
|
---|
491 | """
|
---|
492 | Cleans up any created storage configs.
|
---|
493 | """
|
---|
494 |
|
---|
495 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
496 | # Destroy all volumes first.
|
---|
497 | for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
|
---|
498 | self.destroyVolume(sMountPoint);
|
---|
499 |
|
---|
500 | # Destroy all pools.
|
---|
501 | for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
|
---|
502 | self.destroyStoragePool(sPool);
|
---|
503 |
|
---|
504 | self.dVols.clear();
|
---|
505 | self.dPools.clear();
|
---|
506 | self.iPoolId = 0;
|
---|
507 | self.iVolId = 0;
|
---|
508 |
|
---|
509 | def getRawDisk(self):
|
---|
510 | """
|
---|
511 | Returns a raw disk device from the list of free devices for use.
|
---|
512 | """
|
---|
513 |
|
---|
514 | for oDisk in self.lstDisks:
|
---|
515 | if oDisk.isUsed() is False:
|
---|
516 | oDisk.setUsed(True);
|
---|
517 | return oDisk.getPath();
|
---|
518 |
|
---|
519 | return None;
|
---|
520 |
|
---|
521 | def getUnusedDiskCount(self):
|
---|
522 | """
|
---|
523 | Returns the number of unused disks.
|
---|
524 | """
|
---|
525 |
|
---|
526 | cDisksUnused = 0;
|
---|
527 | for oDisk in self.lstDisks:
|
---|
528 | if not oDisk.isUsed():
|
---|
529 | cDisksUnused += 1;
|
---|
530 |
|
---|
531 | return cDisksUnused;
|
---|
532 |
|
---|
533 | def createStoragePool(self, cDisks = 0, sRaidLvl = None,
|
---|
534 | cbPool = None, fRamDisk = False):
|
---|
535 | """
|
---|
536 | Create a new storage pool
|
---|
537 | """
|
---|
538 | lstDisks = [ ];
|
---|
539 | fRc = True;
|
---|
540 | sPool = None;
|
---|
541 |
|
---|
542 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
543 | if fRamDisk:
|
---|
544 | oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
|
---|
545 | if oDisk is not None:
|
---|
546 | lstDisks.append(oDisk);
|
---|
547 | cDisks = 1;
|
---|
548 | else:
|
---|
549 | if cDisks == 0:
|
---|
550 | cDisks = self.getUnusedDiskCount();
|
---|
551 |
|
---|
552 | for oDisk in self.lstDisks:
|
---|
553 | if not oDisk.isUsed():
|
---|
554 | oDisk.setUsed(True);
|
---|
555 | lstDisks.append(oDisk);
|
---|
556 | if len(lstDisks) == cDisks:
|
---|
557 | break;
|
---|
558 |
|
---|
559 | # Enough drives to satisfy the request?
|
---|
560 | if len(lstDisks) == cDisks:
|
---|
561 | # Create a list of all device paths
|
---|
562 | lstDiskPaths = [ ];
|
---|
563 | for oDisk in lstDisks:
|
---|
564 | lstDiskPaths.append(oDisk.getPath());
|
---|
565 |
|
---|
566 | # Find a name for the pool
|
---|
567 | sPool = 'pool' + str(self.iPoolId);
|
---|
568 | self.iPoolId += 1;
|
---|
569 |
|
---|
570 | fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
|
---|
571 | if fRc:
|
---|
572 | self.dPools[sPool] = lstDisks;
|
---|
573 | else:
|
---|
574 | self.iPoolId -= 1;
|
---|
575 | else:
|
---|
576 | fRc = False;
|
---|
577 |
|
---|
578 | # Cleanup in case of error.
|
---|
579 | if not fRc:
|
---|
580 | for oDisk in lstDisks:
|
---|
581 | oDisk.setUsed(False);
|
---|
582 | if oDisk.isRamDisk():
|
---|
583 | self.oStorOs.destroyRamDisk(self.oExec, oDisk);
|
---|
584 | else:
|
---|
585 | sPool = 'StaticDummy';
|
---|
586 |
|
---|
587 | return fRc, sPool;
|
---|
588 |
|
---|
589 | def destroyStoragePool(self, sPool):
|
---|
590 | """
|
---|
591 | Destroys the storage pool with the given ID.
|
---|
592 | """
|
---|
593 |
|
---|
594 | fRc = True;
|
---|
595 |
|
---|
596 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
597 | lstDisks = self.dPools.get(sPool);
|
---|
598 | if lstDisks is not None:
|
---|
599 | fRc = self.oStorOs.destroyPool(self.oExec, sPool);
|
---|
600 | if fRc:
|
---|
601 | # Mark disks as unused
|
---|
602 | self.dPools.pop(sPool);
|
---|
603 | for oDisk in lstDisks:
|
---|
604 | oDisk.setUsed(False);
|
---|
605 | if oDisk.isRamDisk():
|
---|
606 | self.oStorOs.destroyRamDisk(self.oExec, oDisk);
|
---|
607 | else:
|
---|
608 | fRc = False;
|
---|
609 |
|
---|
610 | return fRc;
|
---|
611 |
|
---|
612 | def createVolume(self, sPool, cbVol = None):
|
---|
613 | """
|
---|
614 | Creates a new volume from the given pool returning the mountpoint.
|
---|
615 | """
|
---|
616 |
|
---|
617 | fRc = True;
|
---|
618 | sMountPoint = None;
|
---|
619 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
620 | if sPool in self.dPools:
|
---|
621 | sVol = 'vol' + str(self.iVolId);
|
---|
622 | sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
|
---|
623 | self.iVolId += 1;
|
---|
624 | fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
|
---|
625 | if fRc:
|
---|
626 | self.dVols[sMountPoint] = (sVol, sPool);
|
---|
627 | else:
|
---|
628 | self.iVolId -= 1;
|
---|
629 | else:
|
---|
630 | fRc = False;
|
---|
631 | else:
|
---|
632 | sMountPoint = self.oDiskCfg.getDisks();
|
---|
633 |
|
---|
634 | return fRc, sMountPoint;
|
---|
635 |
|
---|
636 | def destroyVolume(self, sMountPoint):
|
---|
637 | """
|
---|
638 | Destroy the volume at the given mount point.
|
---|
639 | """
|
---|
640 |
|
---|
641 | fRc = True;
|
---|
642 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
643 | sVol, sPool = self.dVols.get(sMountPoint);
|
---|
644 | if sVol is not None:
|
---|
645 | fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
|
---|
646 | if fRc:
|
---|
647 | self.dVols.pop(sMountPoint);
|
---|
648 | else:
|
---|
649 | fRc = False;
|
---|
650 |
|
---|
651 | return fRc;
|
---|
652 |
|
---|
653 | def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
|
---|
654 | """
|
---|
655 | Creates a new directory on the volume pointed to by the given mount point.
|
---|
656 | """
|
---|
657 | return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
|
---|
658 |
|
---|
659 | def cleanupLeftovers(self):
|
---|
660 | """
|
---|
661 | Tries to cleanup any leftover pools and volumes from a failed previous run.
|
---|
662 | """
|
---|
663 | if not self.oDiskCfg.isCfgStaticDir():
|
---|
664 | return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
|
---|
665 |
|
---|
666 | fRc = True;
|
---|
667 | if os.path.exists(self.oDiskCfg.getDisks()):
|
---|
668 | for sEntry in os.listdir(self.oDiskCfg.getDisks()):
|
---|
669 | fRc = fRc and self.oExec.rmTree(os.path.join(self.oDiskCfg.getDisks(), sEntry));
|
---|
670 |
|
---|
671 | return fRc;
|
---|