On 2/3/21 4:21 PM, John Paul Adrian Glaubitz wrote: > On 2/3/21 2:52 PM, John Paul Adrian Glaubitz wrote: >> I quickly hacked that together now, but it's not really working yet as some >> size >> calculations are incorrect: > > Works:
In case someone is interested, attaching the preliminary patch. Adrian -- .''`. John Paul Adrian Glaubitz : :' : Debian Developer - glaub...@debian.org `. `' Freie Universitaet Berlin - glaub...@physik.fu-berlin.de `- GPG: 62FF 8A75 84E0 2956 9546 0006 7426 3B37 F5B5 F913
diff --git a/newfs_hfs.tproj/makehfs.c b/newfs_hfs.tproj/makehfs.c index 7315dbc..e4abc56 100644 --- a/newfs_hfs.tproj/makehfs.c +++ b/newfs_hfs.tproj/makehfs.c @@ -84,7 +84,7 @@ extern Boolean _CFStringGetFileSystemRepresentation(CFStringRef string, UInt8 *b #include "hfs_endian.h" #include "newfs_hfs.h" - +#include "readme.h" #define HFS_BOOT_DATA "/usr/share/misc/hfsbootdata" @@ -105,15 +105,23 @@ struct filefork { struct filefork gDTDBFork, gSystemFork, gReadMeFork; +static void WriteMDB __P((const DriveInfo *driveInfo, HFS_MDB *mdbp)); +static void InitMDB __P((hfsparams_t *defaults, UInt32 driveBlocks, HFS_MDB *mdbp)); + static void WriteVH __P((const DriveInfo *driveInfo, HFSPlusVolumeHeader *hp)); static void InitVH __P((hfsparams_t *defaults, UInt64 sectors, HFSPlusVolumeHeader *header)); +static void WriteBitmap __P((const DriveInfo *dip, UInt32 startingSector, + UInt32 alBlksUsed, UInt8 *buffer)); + static void AllocateExtent(UInt8 *buffer, UInt32 startBlock, UInt32 blockCount); static void WriteExtentsFile __P((const DriveInfo *dip, UInt64 startingSector, const hfsparams_t *dp, HFSExtentDescriptor *bbextp, void *buffer, UInt32 *bytesUsed, UInt32 *mapNodes)); +static void InitExtentsRoot __P((UInt16 btNodeSize, HFSExtentDescriptor *bbextp, + void *buffer)); static void WriteAttributesFile(const DriveInfo *driveInfo, UInt64 startingSector, const hfsparams_t *dp, HFSExtentDescriptor *bbextp, void *buffer, @@ -126,7 +134,18 @@ static int WriteJournalInfo(const DriveInfo *driveInfo, UInt64 startingSector, const hfsparams_t *dp, HFSPlusVolumeHeader *header, void *buffer); static void InitCatalogRoot_HFSPlus __P((const hfsparams_t *dp, const HFSPlusVolumeHeader *header, void * buffer)); - +static void InitCatalogRoot_HFS __P((const hfsparams_t *dp, void * buffer)); +static void InitFirstCatalogLeaf __P((const hfsparams_t *dp, void * buffer, + int wrapper)); +static void InitSecondCatalogLeaf __P((const hfsparams_t *dp, void *buffer)); + +static void WriteDesktopDB(const hfsparams_t *dp, const DriveInfo *driveInfo, + UInt32 startingSector, void *buffer, UInt32 *mapNodes); + +static void WriteSystemFile __P((const DriveInfo *dip, UInt32 startingSector, + UInt32 *filesize)); +static void WriteReadMeFile __P((const DriveInfo *dip, UInt32 startingSector, + UInt32 *filesize)); static void WriteMapNodes __P((const DriveInfo *driveInfo, UInt64 diskStart, UInt32 firstMapNode, UInt32 mapNodes, UInt16 btNodeSize, void *buffer)); static void WriteBuffer __P((const DriveInfo *driveInfo, UInt64 startingSector, @@ -142,6 +161,8 @@ static UInt32 GetDefaultEncoding(); static UInt32 UTCToLocal __P((UInt32 utcTime)); +static UInt32 DivideAndRoundUp __P((UInt32 numerator, UInt32 denominator)); + static int ConvertUTF8toUnicode __P((const UInt8* source, size_t bufsize, UniChar* unibuf, UInt16 *charcount)); @@ -203,6 +224,146 @@ dowipefs(int fd) } #endif +/* + * make_hfs + * + * This routine writes an initial HFS volume structure onto a volume. + * It is assumed that the disk has already been formatted and verified. + * + * For information on the HFS volume format see "Data Organization on Volumes" + * in "Inside Macintosh: Files" (p. 2-52). + * + */ +int +make_hfs(const DriveInfo *driveInfo, + hfsparams_t *defaults, + UInt32 *plusSectors, + UInt32 *plusOffset) +{ + UInt32 sector; + UInt32 diskBlocksUsed; + UInt32 mapNodes; + UInt32 sectorsPerBlock; + void *nodeBuffer = NULL; + HFS_MDB *mdbp = NULL; + UInt32 bytesUsed; + + *plusSectors = 0; + *plusOffset = 0; + + /* assume sectorSize <= blockSize */ + sectorsPerBlock = defaults->blockSize / driveInfo->sectorSize; + + + /*--- CREATE A MASTER DIRECTORY BLOCK: */ + + mdbp = (HFS_MDB*)malloc((size_t)kBytesPerSector); + nodeBuffer = malloc(8192); /* max bitmap bytes is 8192 bytes */ + if (nodeBuffer == NULL || mdbp == NULL) + err(1, NULL); + + defaults->encodingHint = getencodinghint(defaults->volumeName); + + /* MDB Initialized in native byte order */ + InitMDB(defaults, driveInfo->totalSectors, mdbp); + + + /*--- ZERO OUT BEGINNING OF DISK (bitmap and b-trees): */ + + diskBlocksUsed = (mdbp->drAlBlSt + 1) + + (mdbp->drXTFlSize + mdbp->drCTFlSize) / kBytesPerSector; + if (defaults->flags & kMakeHFSWrapper) { + diskBlocksUsed += MAX(kDTDB_Size, mdbp->drAlBlkSiz) / kBytesPerSector; + diskBlocksUsed += MAX(sizeof(hfswrap_readme), mdbp->drAlBlkSiz) / kBytesPerSector; + diskBlocksUsed += MAX(24 * 1024, mdbp->drAlBlkSiz) / kBytesPerSector; + } + WriteBuffer(driveInfo, 0, diskBlocksUsed * kBytesPerSector, NULL); + /* also clear out last 8 sectors (4K) */ + WriteBuffer(driveInfo, driveInfo->totalSectors - 8, 4 * 1024, NULL); + + /* If this is a wrapper, add boot files... */ + if (defaults->flags & kMakeHFSWrapper) { + + sector = mdbp->drAlBlSt + + mdbp->drXTFlSize/kBytesPerSector + + mdbp->drCTFlSize/kBytesPerSector; + + WriteDesktopDB(defaults, driveInfo, sector, nodeBuffer, &mapNodes); + + if (mapNodes > 0) + WriteMapNodes(driveInfo, (sector + 1), 1, mapNodes, kHFSNodeSize, nodeBuffer); + + gDTDBFork.logicalSize = MAX(kDTDB_Size, mdbp->drAlBlkSiz); + gDTDBFork.startBlock = (sector - mdbp->drAlBlSt) / sectorsPerBlock; + gDTDBFork.blockCount = BYTESTOBLKS(gDTDBFork.logicalSize, mdbp->drAlBlkSiz); + gDTDBFork.physicalSize = gDTDBFork.blockCount * mdbp->drAlBlkSiz; + + sector += gDTDBFork.physicalSize / kBytesPerSector; + + WriteReadMeFile(driveInfo, sector, &gReadMeFork.logicalSize); + gReadMeFork.startBlock = gDTDBFork.startBlock + gDTDBFork.blockCount; + gReadMeFork.blockCount = BYTESTOBLKS(gReadMeFork.logicalSize, mdbp->drAlBlkSiz); + gReadMeFork.physicalSize = gReadMeFork.blockCount * mdbp->drAlBlkSiz; + + sector += gReadMeFork.physicalSize / kBytesPerSector; + + WriteSystemFile(driveInfo, sector, &gSystemFork.logicalSize); + gSystemFork.startBlock = gReadMeFork.startBlock + gReadMeFork.blockCount; + gSystemFork.blockCount = BYTESTOBLKS(gSystemFork.logicalSize, mdbp->drAlBlkSiz); + gSystemFork.physicalSize = gSystemFork.blockCount * mdbp->drAlBlkSiz; + + mdbp->drFreeBks -= gDTDBFork.blockCount + gReadMeFork.blockCount + gSystemFork.blockCount; + mdbp->drEmbedExtent.startBlock = mdbp->drNmAlBlks - (UInt16)mdbp->drFreeBks; + mdbp->drEmbedExtent.blockCount = (UInt16)mdbp->drFreeBks; + mdbp->drFreeBks = 0; + } + + + /*--- WRITE ALLOCATION BITMAP TO DISK: */ + + WriteBitmap(driveInfo, mdbp->drVBMSt, mdbp->drNmAlBlks - (UInt16)mdbp->drFreeBks, nodeBuffer); + + + /*--- WRITE FILE EXTENTS B*-TREE TO DISK: */ + + sector = mdbp->drAlBlSt; /* reset */ + WriteExtentsFile(driveInfo, sector, defaults, &mdbp->drEmbedExtent, nodeBuffer, &bytesUsed, &mapNodes); + + if (mapNodes > 0) + WriteMapNodes(driveInfo, (sector + bytesUsed/kBytesPerSector), + bytesUsed/kHFSNodeSize, mapNodes, kHFSNodeSize, nodeBuffer); + + sector += (mdbp->drXTFlSize/kBytesPerSector); + + + /*--- WRITE CATALOG B*-TREE TO DISK: */ + + WriteCatalogFile(driveInfo, sector, defaults, NULL, nodeBuffer, &bytesUsed, &mapNodes); + + if (mapNodes > 0) + WriteMapNodes(driveInfo, (sector + bytesUsed/kBytesPerSector), + bytesUsed/kHFSNodeSize, mapNodes, kHFSNodeSize, nodeBuffer); + + + /*--- WRITE MASTER DIRECTORY BLOCK TO DISK: */ + + *plusSectors = mdbp->drEmbedExtent.blockCount * + (mdbp->drAlBlkSiz / driveInfo->sectorSize); + *plusOffset = mdbp->drAlBlSt + mdbp->drEmbedExtent.startBlock * + (mdbp->drAlBlkSiz / driveInfo->sectorSize); + + /* write mdb last in case we fail along the way */ + + /* Writes both copies of the MDB */ + WriteMDB (driveInfo, mdbp); + /* MDB is now big-endian */ + + free(nodeBuffer); + free(mdbp); + + return (0); +} + /* * make_hfsplus * @@ -384,6 +545,150 @@ make_hfsplus(const DriveInfo *driveInfo, hfsparams_t *defaults) return (0); } + +/* + * WriteMDB + * + * Writes the Master Directory Block (MDB) to disk. + * + * The MDB is byte-swapped if necessary to big endian. Since this + * is always the last operation, there's no point in unswapping it. + */ +static void +WriteMDB (const DriveInfo *driveInfo, HFS_MDB *mdbp) +{ + SWAP_HFSMDB (mdbp); + + WriteBuffer(driveInfo, kMDBStart, kBytesPerSector, mdbp); + WriteBuffer(driveInfo, driveInfo->totalSectors - 2, kBytesPerSector, mdbp); +} + + +/* + * InitMDB + * + * Initialize a Master Directory Block (MDB) record. + * + * If the alignment parameter is non-zero, it indicates the aligment + * (in 512 byte sectors) that should be used for allocation blocks. + * For example, if alignment is 8, then allocation blocks will begin + * on a 4K boundary relative to the start of the partition. + * + */ +static void +InitMDB(hfsparams_t *defaults, UInt32 driveBlocks, HFS_MDB *mdbp) +{ + UInt32 alBlkSize; + UInt16 numAlBlks; + UInt32 timeStamp; + UInt16 bitmapBlocks; + UInt32 alignment; + VolumeUUID newVolumeUUID; + VolumeUUID* finderInfoUUIDPtr; + + alignment = defaults->hfsAlignment; + bzero(mdbp, kBytesPerSector); + + alBlkSize = defaults->blockSize; + + /* calculate the number of sectors needed for bitmap (rounded up) */ + if (defaults->flags & kMakeMaxHFSBitmap) + bitmapBlocks = kHFSMaxAllocationBlks / kBitsPerSector; + else + bitmapBlocks = ((driveBlocks / (alBlkSize >> kLog2SectorSize)) + + kBitsPerSector-1) / kBitsPerSector; + + mdbp->drAlBlSt = kVolBitMapStart + bitmapBlocks; /* in sectors (disk blocks) */ + + /* If requested, round up block start to a multiple of "alignment" blocks */ + if (alignment != 0) + mdbp->drAlBlSt = ((mdbp->drAlBlSt + alignment - 1) / alignment) * alignment; + + /* Now find out how many whole allocation blocks remain... */ + numAlBlks = (driveBlocks - mdbp->drAlBlSt - kTailBlocks) / + (alBlkSize >> kLog2SectorSize); + + timeStamp = UTCToLocal(defaults->createDate); + + mdbp->drSigWord = kHFSSigWord; + mdbp->drCrDate = timeStamp; + mdbp->drLsMod = timeStamp; + mdbp->drAtrb = kHFSVolumeUnmountedMask; + mdbp->drVBMSt = kVolBitMapStart; + mdbp->drNmAlBlks = numAlBlks; + mdbp->drAlBlkSiz = alBlkSize; + mdbp->drClpSiz = defaults->dataClumpSize; + mdbp->drNxtCNID = defaults->nextFreeFileID; + mdbp->drFreeBks = numAlBlks; + + /* + * Map UTF-8 input into a Mac encoding. + * On conversion errors "untitled" is used as a fallback. + */ +#if !LINUX + { + UniChar unibuf[kHFSMaxVolumeNameChars]; + CFStringRef cfstr; + CFIndex maxchars; + Boolean cfOK; + + cfstr = CFStringCreateWithCString(kCFAllocatorDefault, (char *)defaults->volumeName, kCFStringEncodingUTF8); + + /* Find out what Mac encoding to use: */ + maxchars = MIN(sizeof(unibuf)/sizeof(UniChar), CFStringGetLength(cfstr)); + CFStringGetCharacters(cfstr, CFRangeMake(0, maxchars), unibuf); + cfOK = CFStringGetPascalString(cfstr, mdbp->drVN, sizeof(mdbp->drVN), defaults->encodingHint); + CFRelease(cfstr); + + if (!cfOK) { + mdbp->drVN[0] = strlen(kDefaultVolumeNameStr); + bcopy(kDefaultVolumeNameStr, &mdbp->drVN[1], mdbp->drVN[0]); + defaults->encodingHint = 0; + warnx("invalid HFS name: \"%s\", using \"%s\" instead", + defaults->volumeName, kDefaultVolumeNameStr); + } + /* defaults->volumeName is used later for the root dir key */ + bcopy(&mdbp->drVN[1], defaults->volumeName, mdbp->drVN[0]); + defaults->volumeName[mdbp->drVN[0]] = '\0'; + } +#endif + /* Save the encoding hint in the Finder Info (field 4). */ + mdbp->drFndrInfo[4] = SET_HFS_TEXT_ENCODING(defaults->encodingHint); + + mdbp->drWrCnt = kWriteSeqNum; + + mdbp->drXTFlSize = mdbp->drXTClpSiz = defaults->extentsClumpSize; + mdbp->drXTExtRec[0].startBlock = 0; + mdbp->drXTExtRec[0].blockCount = mdbp->drXTFlSize / alBlkSize; + mdbp->drFreeBks -= mdbp->drXTExtRec[0].blockCount; + + mdbp->drCTFlSize = mdbp->drCTClpSiz = defaults->catalogClumpSize; + mdbp->drCTExtRec[0].startBlock = mdbp->drXTExtRec[0].startBlock + + mdbp->drXTExtRec[0].blockCount; + mdbp->drCTExtRec[0].blockCount = mdbp->drCTFlSize / alBlkSize; + mdbp->drFreeBks -= mdbp->drCTExtRec[0].blockCount; + + if (defaults->flags & kMakeHFSWrapper) { + mdbp->drFilCnt = mdbp->drNmFls = kWapperFileCount; + mdbp->drNxtCNID += kWapperFileCount; + + /* set blessed system folder to be root folder (2) */ + mdbp->drFndrInfo[0] = kHFSRootFolderID; + + mdbp->drEmbedSigWord = kHFSPlusSigWord; + + /* software lock it and tag as having "bad" blocks */ + mdbp->drAtrb |= kHFSVolumeSparedBlocksMask; + mdbp->drAtrb |= kHFSVolumeSoftwareLockMask; + } + /* Generate and write UUID for the HFS disk */ + GenerateVolumeUUID(&newVolumeUUID); + finderInfoUUIDPtr = (VolumeUUID *)(&mdbp->drFndrInfo[6]); + finderInfoUUIDPtr->v.high = OSSwapHostLongToBig(newVolumeUUID.v.high); + finderInfoUUIDPtr->v.low = OSSwapHostLongToBig(newVolumeUUID.v.low); +} + + /* * WriteVH * @@ -579,6 +884,41 @@ InitVH(hfsparams_t *defaults, UInt64 sectors, HFSPlusVolumeHeader *hp) } +/* + * InitBitmap + * + * This routine initializes the Allocation Bitmap. Allocation blocks + * that are in use have their corresponding bit set. + * + * It assumes that initially there are no gaps between allocated blocks. + * + * It also assumes the buffer is big enough to hold all the bits + * (ie its at least (alBlksUsed/8) bytes in size. + */ +static void +WriteBitmap(const DriveInfo *driveInfo, UInt32 startingSector, + UInt32 alBlksUsed, UInt8 *buffer) +{ + UInt32 bytes, bits, bytesUsed; + + bytes = alBlksUsed >> 3; + bits = alBlksUsed & 0x0007; + + (void)memset(buffer, 0xFF, bytes); + + if (bits) { + *(UInt8 *)(buffer + bytes) = (0xFF00 >> bits) & 0xFF; + ++bytes; + } + + bytesUsed = ROUNDUP(bytes, driveInfo->sectorSize); + + if (bytesUsed > bytes) + bzero(buffer + bytes, bytesUsed - bytes); + WriteBuffer(driveInfo, startingSector, bytesUsed, buffer); +} + + /* * AllocateExtent * @@ -634,6 +974,7 @@ WriteExtentsFile(const DriveInfo *driveInfo, UInt64 startingSector, UInt32 nodeSize; UInt32 temp; SInt16 offset; + int wrapper = (dp->flags & kMakeHFSWrapper); *mapNodes = 0; fileSize = dp->extentsClumpSize; @@ -663,8 +1004,22 @@ WriteExtentsFile(const DriveInfo *driveInfo, UInt64 startingSector, bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 1); /* header */ bthp->clumpSize = SWAP_BE32 (fileSize); - bthp->attributes |= SWAP_BE32 (kBTBigKeysMask); - bthp->maxKeyLength = SWAP_BE16 (kHFSPlusExtentKeyMaximumLength); + if (dp->flags & kMakeStandardHFS) { + bthp->maxKeyLength = SWAP_BE16 (kHFSExtentKeyMaximumLength); + + /* wrapper has a bad-block extent record */ + if (wrapper) { + bthp->treeDepth = SWAP_BE16 (SWAP_BE16 (bthp->treeDepth) + 1); + bthp->leafRecords = SWAP_BE32 (SWAP_BE32 (bthp->leafRecords) + 1); + bthp->rootNode = SWAP_BE32 (1); + bthp->firstLeafNode = SWAP_BE32 (1); + bthp->lastLeafNode = SWAP_BE32 (1); + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - 1); + } + } else { + bthp->attributes |= SWAP_BE32 (kBTBigKeysMask); + bthp->maxKeyLength = SWAP_BE16 (kHFSPlusExtentKeyMaximumLength); + } offset += sizeof(BTHeaderRec); SETOFFSET(buffer, nodeSize, offset, 2); @@ -708,12 +1063,55 @@ WriteExtentsFile(const DriveInfo *driveInfo, UInt64 startingSector, offset += nodeBitsInHeader/8; SETOFFSET(buffer, nodeSize, offset, 4); - + + if (wrapper) { + InitExtentsRoot(nodeSize, bbextp, (buffer + nodeSize)); + } + *bytesUsed = (SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes) - *mapNodes) * nodeSize; WriteBuffer(driveInfo, startingSector, *bytesUsed, buffer); } +static void +InitExtentsRoot(UInt16 btNodeSize, HFSExtentDescriptor *bbextp, void *buffer) +{ + BTNodeDescriptor *ndp; + HFSExtentKey *ekp; + HFSExtentRecord *edp; + SInt16 offset; + + bzero(buffer, btNodeSize); + + /* + * All nodes have a node descriptor... + */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTLeafNode; + ndp->numRecords = SWAP_BE16 (1); + ndp->height = 1; + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, btNodeSize, offset, 1); + + /* + * First and only record is bad block extents... + */ + ekp = (HFSExtentKey *)((UInt8 *) buffer + offset); + ekp->keyLength = kHFSExtentKeyMaximumLength; + // ekp->forkType = 0; + ekp->fileID = SWAP_BE32 (kHFSBadBlockFileID); + // ekp->startBlock = 0; + offset += sizeof(HFSExtentKey); + + edp = (HFSExtentRecord *)((UInt8 *)buffer + offset); + edp[0]->startBlock = SWAP_BE16 (bbextp->startBlock); + edp[0]->blockCount = SWAP_BE16 (bbextp->blockCount); + offset += sizeof(HFSExtentRecord); + + SETOFFSET(buffer, btNodeSize, offset, 2); +} + /* * WriteAttributesFile * @@ -936,6 +1334,7 @@ WriteCatalogFile(const DriveInfo *driveInfo, UInt64 startingSector, UInt32 nodeSize; UInt32 temp; SInt16 offset; + int wrapper = (dp->flags & kMakeHFSWrapper); *mapNodes = 0; fileSize = dp->catalogClumpSize; @@ -965,14 +1364,25 @@ WriteCatalogFile(const DriveInfo *driveInfo, UInt64 startingSector, bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 2); /* header and root */ bthp->clumpSize = SWAP_BE32 (fileSize); + if (dp->flags & kMakeStandardHFS) { + bthp->maxKeyLength = SWAP_BE16 (kHFSCatalogKeyMaximumLength); + + if (dp->flags & kMakeHFSWrapper) { + bthp->treeDepth = SWAP_BE16 (SWAP_BE16 (bthp->treeDepth) + 1); + bthp->leafRecords = SWAP_BE32 (SWAP_BE32 (bthp->leafRecords) + kWapperFileCount); + bthp->firstLeafNode = SWAP_BE32 (SWAP_BE32 (bthp->rootNode) + 1); + bthp->lastLeafNode = SWAP_BE32 (SWAP_BE32 (bthp->firstLeafNode) + 1); + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - 2); /* tree now split with 2 leaf nodes */ + } + } else /* HFS+ */ { + bthp->attributes |= SWAP_BE32 (kBTVariableIndexKeysMask + kBTBigKeysMask); + bthp->maxKeyLength = SWAP_BE16 (kHFSPlusCatalogKeyMaximumLength); + if (dp->flags & kMakeCaseSensitive) + bthp->keyCompareType = kHFSBinaryCompare; + else + bthp->keyCompareType = kHFSCaseFolding; + } - bthp->attributes |= SWAP_BE32 (kBTVariableIndexKeysMask + kBTBigKeysMask); - bthp->maxKeyLength = SWAP_BE16 (kHFSPlusCatalogKeyMaximumLength); - if (dp->flags & kMakeCaseSensitive) - bthp->keyCompareType = kHFSBinaryCompare; - else - bthp->keyCompareType = kHFSCaseFolding; - offset += sizeof(BTHeaderRec); SETOFFSET(buffer, nodeSize, offset, 2); @@ -1015,7 +1425,17 @@ WriteCatalogFile(const DriveInfo *driveInfo, UInt64 startingSector, SETOFFSET(buffer, nodeSize, offset, 4); - InitCatalogRoot_HFSPlus(dp, header, buffer + nodeSize); + if ((dp->flags & kMakeStandardHFS) == 0) { + InitCatalogRoot_HFSPlus(dp, header, buffer + nodeSize); + + } else if (wrapper) { + InitCatalogRoot_HFS (dp, buffer + (1 * nodeSize)); + InitFirstCatalogLeaf (dp, buffer + (2 * nodeSize), TRUE); + InitSecondCatalogLeaf(dp, buffer + (3 * nodeSize)); + + } else /* plain HFS */ { + InitFirstCatalogLeaf(dp, buffer + nodeSize, FALSE); + } *bytesUsed = (SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes) - *mapNodes) * nodeSize; @@ -1235,6 +1655,445 @@ InitCatalogRoot_HFSPlus(const hfsparams_t *dp, const HFSPlusVolumeHeader *header } } + +static void +InitFirstCatalogLeaf(const hfsparams_t *dp, void * buffer, int wrapper) +{ + BTNodeDescriptor *ndp; + HFSCatalogKey *ckp; + HFSCatalogKey *tkp; + HFSCatalogFolder *cdp; + HFSCatalogFile *cfp; + HFSCatalogThread *ctp; + UInt16 nodeSize; + SInt16 offset; + UInt32 timeStamp; + + nodeSize = dp->catalogNodeSize; + timeStamp = UTCToLocal(dp->createDate); + bzero(buffer, nodeSize); + + /* + * All nodes have a node descriptor... + */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTLeafNode; + ndp->numRecords = SWAP_BE16 (2); + ndp->height = 1; + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + /* + * First record is always the root directory... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->nodeName[0] = strlen((char *)dp->volumeName); + bcopy(dp->volumeName, &ckp->nodeName[1], ckp->nodeName[0]); + ckp->keyLength = 1 + 4 + ((ckp->nodeName[0] + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootParentID); + offset += ckp->keyLength + 1; + + cdp = (HFSCatalogFolder *)((UInt8 *)buffer + offset); + cdp->recordType = SWAP_BE16 (kHFSFolderRecord); + if (wrapper) + cdp->valence = SWAP_BE16 (SWAP_BE16 (cdp->valence) + kWapperFileCount); + cdp->folderID = SWAP_BE32 (kHFSRootFolderID); + cdp->createDate = SWAP_BE32 (timeStamp); + cdp->modifyDate = SWAP_BE32 (timeStamp); + offset += sizeof(HFSCatalogFolder); + + SETOFFSET(buffer, nodeSize, offset, 2); + + /* + * Second record is always the root directory thread... + */ + tkp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + tkp->keyLength = kHFSCatalogKeyMinimumLength; + tkp->parentID = SWAP_BE32 (kHFSRootFolderID); + // tkp->nodeName[0] = 0; + offset += tkp->keyLength + 2; + + ctp = (HFSCatalogThread *)((UInt8 *)buffer + offset); + ctp->recordType = SWAP_BE16 (kHFSFolderThreadRecord); + ctp->parentID = SWAP_BE32 (kHFSRootParentID); + bcopy(ckp->nodeName, ctp->nodeName, ckp->nodeName[0]+1); + offset += sizeof(HFSCatalogThread); + + SETOFFSET(buffer, nodeSize, offset, 3); + + /* + * For Wrapper volumes there are more file records... + */ + if (wrapper) { + ndp->fLink = SWAP_BE32 (3); + ndp->numRecords = SWAP_BE16 (SWAP_BE16 (ndp->numRecords) + 2); + + /* + * Add "Desktop DB" file... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = 1 + 4 + ((kDTDB_Chars + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kDTDB_Chars; + bcopy(kDTDB_Name, &ckp->nodeName[1], kDTDB_Chars); + offset += ckp->keyLength + 1; + + cfp = (HFSCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSFileRecord); + cfp->userInfo.fdType = SWAP_BE32 (kDTDB_Type); + cfp->userInfo.fdCreator = SWAP_BE32 (kDTDB_Creator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible); + cfp->fileID = SWAP_BE32 (kDTDB_FileID); + cfp->createDate = SWAP_BE32 (timeStamp); + cfp->modifyDate = SWAP_BE32 (timeStamp); + cfp->dataExtents[0].startBlock = SWAP_BE16 (gDTDBFork.startBlock); + cfp->dataExtents[0].blockCount = SWAP_BE16 (gDTDBFork.blockCount); + cfp->dataPhysicalSize = SWAP_BE32 (gDTDBFork.physicalSize); + cfp->dataLogicalSize = SWAP_BE32 (gDTDBFork.logicalSize); + offset += sizeof(HFSCatalogFile); + + SETOFFSET(buffer, nodeSize, offset, 4); + + /* + * Add empty "Desktop DF" file... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = 1 + 4 + ((kDTDF_Chars + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kDTDF_Chars; + bcopy(kDTDF_Name, &ckp->nodeName[1], kDTDF_Chars); + offset += ckp->keyLength + 1; + + cfp = (HFSCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSFileRecord); + cfp->userInfo.fdType = SWAP_BE32 (kDTDF_Type); + cfp->userInfo.fdCreator = SWAP_BE32 (kDTDF_Creator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible); + cfp->fileID = SWAP_BE32 (kDTDF_FileID); + cfp->createDate = SWAP_BE32 (timeStamp); + cfp->modifyDate = SWAP_BE32 (timeStamp); + offset += sizeof(HFSCatalogFile); + + SETOFFSET(buffer, nodeSize, offset, 5); + } +} + + +static void +InitSecondCatalogLeaf(const hfsparams_t *dp, void * buffer) +{ + BTNodeDescriptor *ndp; + HFSCatalogKey *ckp; + HFSCatalogFile *cfp; + UInt16 nodeSize; + SInt16 offset; + UInt32 timeStamp; + + nodeSize = dp->catalogNodeSize; + timeStamp = UTCToLocal(dp->createDate); + bzero(buffer, nodeSize); + + /* + * All nodes have a node descriptor... + */ + ndp = (BTNodeDescriptor *)buffer; + ndp->bLink = SWAP_BE32 (2); + ndp->kind = kBTLeafNode; + ndp->numRecords = SWAP_BE16 (3); + ndp->height = 1; + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + /* + * Add "Finder" file... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = 1 + 4 + ((kFinder_Chars + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kFinder_Chars; + bcopy(kFinder_Name, &ckp->nodeName[1], kFinder_Chars); + offset += ckp->keyLength + 1; + + cfp = (HFSCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSFileRecord); + cfp->userInfo.fdType = SWAP_BE32 (kFinder_Type); + cfp->userInfo.fdCreator = SWAP_BE32 (kFinder_Creator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible + kNameLocked + kHasBeenInited); + cfp->fileID = SWAP_BE32 (kFinder_FileID); + cfp->createDate = SWAP_BE32 (timeStamp); + cfp->modifyDate = SWAP_BE32 (timeStamp); + offset += sizeof(HFSCatalogFile); + + SETOFFSET(buffer, nodeSize, offset, 2); + + /* + * Add "ReadMe" file... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = 1 + 4 + ((kReadMe_Chars + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kReadMe_Chars; + bcopy(kReadMe_Name, &ckp->nodeName[1], kReadMe_Chars); + offset += ckp->keyLength + 1; + + cfp = (HFSCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSFileRecord); + cfp->userInfo.fdType = SWAP_BE32 (kReadMe_Type); + cfp->userInfo.fdCreator = SWAP_BE32 (kReadMe_Creator); + cfp->fileID = SWAP_BE32 (kReadMe_FileID); + cfp->createDate = SWAP_BE32 (timeStamp); + cfp->modifyDate = SWAP_BE32 (timeStamp); + cfp->dataExtents[0].startBlock = SWAP_BE16 (gReadMeFork.startBlock); + cfp->dataExtents[0].blockCount = SWAP_BE16 (gReadMeFork.blockCount); + cfp->dataPhysicalSize = SWAP_BE32 (gReadMeFork.physicalSize); + cfp->dataLogicalSize = SWAP_BE32 (gReadMeFork.logicalSize); + offset += sizeof(HFSCatalogFile); + + SETOFFSET(buffer, nodeSize, offset, 3); + + /* + * Add "System" file... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = 1 + 4 + ((kSystem_Chars + 2) & 0xFE); /* pad to word */ + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kSystem_Chars; + bcopy(kSystem_Name, &ckp->nodeName[1], kSystem_Chars); + offset += ckp->keyLength + 1; + + cfp = (HFSCatalogFile *)((UInt8 *)buffer + offset); + cfp->recordType = SWAP_BE16 (kHFSFileRecord); + cfp->userInfo.fdType = SWAP_BE32 (kSystem_Type); + cfp->userInfo.fdCreator = SWAP_BE32 (kSystem_Creator); + cfp->userInfo.fdFlags = SWAP_BE16 (kIsInvisible + kNameLocked + kHasBeenInited); + cfp->fileID = SWAP_BE32 (kSystem_FileID); + cfp->createDate = SWAP_BE32 (timeStamp); + cfp->modifyDate = SWAP_BE32 (timeStamp); + cfp->rsrcExtents[0].startBlock = SWAP_BE16 (gSystemFork.startBlock); + cfp->rsrcExtents[0].blockCount = SWAP_BE16 (gSystemFork.blockCount); + cfp->rsrcPhysicalSize = SWAP_BE32 (gSystemFork.physicalSize); + cfp->rsrcLogicalSize = SWAP_BE32 (gSystemFork.logicalSize); + offset += sizeof(HFSCatalogFile); + + SETOFFSET(buffer, nodeSize, offset, 4); +} + + +static void +InitCatalogRoot_HFS(const hfsparams_t *dp, void * buffer) +{ + BTNodeDescriptor *ndp; + HFSCatalogKey *ckp; + UInt32 *prp; /* pointer record */ + UInt16 nodeSize; + SInt16 offset; + + nodeSize = dp->catalogNodeSize; + bzero(buffer, nodeSize); + + /* + * All nodes have a node descriptor... + */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTIndexNode; + ndp->numRecords = SWAP_BE16 (2); + ndp->height = 2; + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + /* + * Add root directory index... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = kHFSCatalogKeyMaximumLength; + ckp->parentID = SWAP_BE32 (kHFSRootParentID); + ckp->nodeName[0] = strlen((char *)dp->volumeName); + bcopy(dp->volumeName, &ckp->nodeName[1], ckp->nodeName[0]); + offset += ckp->keyLength + 1; + + prp = (UInt32 *)((UInt8 *)buffer + offset); + *prp = SWAP_BE32 (2); /* point to first leaf node */ + offset += sizeof(UInt32); + + SETOFFSET(buffer, nodeSize, offset, 2); + + /* + * Add finder file index... + */ + ckp = (HFSCatalogKey *)((UInt8 *)buffer + offset); + ckp->keyLength = kHFSCatalogKeyMaximumLength; + ckp->parentID = SWAP_BE32 (kHFSRootFolderID); + ckp->nodeName[0] = kFinder_Chars; + bcopy(kFinder_Name, &ckp->nodeName[1], kFinder_Chars); + offset += ckp->keyLength + 1; + + prp = (UInt32 *)((UInt8 *)buffer + offset); + *prp = SWAP_BE32 (3); /* point to last leaf node */ + offset += sizeof(UInt32); + + SETOFFSET(buffer, nodeSize, offset, 3); +} + + +static void +WriteDesktopDB(const hfsparams_t *dp, const DriveInfo *driveInfo, + UInt32 startingSector, void *buffer, UInt32 *mapNodes) +{ + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + UInt8 *bmp; + UInt32 nodeBitsInHeader; + UInt32 fileSize; + UInt32 nodeSize; + UInt32 temp; + SInt16 offset; + UInt8 *keyDiscP; + + *mapNodes = 0; + fileSize = gDTDBFork.logicalSize; + nodeSize = kHFSNodeSize; + + bzero(buffer, nodeSize); + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTHeaderNode; + ndp->numRecords = SWAP_BE16 (3); + offset = sizeof(BTNodeDescriptor); + + SETOFFSET(buffer, nodeSize, offset, 1); + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((UInt8 *)buffer + offset); + // bthp->treeDepth = 0; + // bthp->rootNode = 0; + // bthp->firstLeafNode = 0; + // bthp->lastLeafNode = 0; + // bthp->leafRecords = 0; + bthp->nodeSize = SWAP_BE16 (nodeSize); + bthp->maxKeyLength = SWAP_BE16 (37); + bthp->totalNodes = SWAP_BE32 (fileSize / nodeSize); + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->totalNodes) - 1); /* header */ + bthp->clumpSize = SWAP_BE32 (fileSize); + bthp->btreeType = 0xFF; + offset += sizeof(BTHeaderRec); + + SETOFFSET(buffer, nodeSize, offset, 2); + + keyDiscP = (UInt8 *)((UInt8 *)buffer + offset); + *keyDiscP++ = 2; /* length of descriptor */ + *keyDiscP++ = KD_USEPROC; /* always uses a compare proc */ + *keyDiscP++ = 1; /* just one of them */ + offset += kBTreeHeaderUserBytes; + + SETOFFSET(buffer, nodeSize, offset, 3); + + /* FIGURE OUT HOW MANY MAP NODES (IF ANY): */ + nodeBitsInHeader = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16)) ); + + if (SWAP_BE32 (bthp->totalNodes) > nodeBitsInHeader) { + UInt32 nodeBitsInMapNode; + + ndp->fLink = SWAP_BE32 (SWAP_BE32 (bthp->lastLeafNode) + 1); + nodeBitsInMapNode = 8 * (nodeSize + - sizeof(BTNodeDescriptor) + - (2 * sizeof(SInt16)) + - 2 ); + *mapNodes = (SWAP_BE32 (bthp->totalNodes) - nodeBitsInHeader + + (nodeBitsInMapNode - 1)) / nodeBitsInMapNode; + bthp->freeNodes = SWAP_BE32 (SWAP_BE32 (bthp->freeNodes) - *mapNodes); + } + + /* + * FILL IN THE MAP RECORD, MARKING NODES THAT ARE IN USE. + * Note - worst case (32MB alloc blk) will have only 18 nodes in use. + */ + bmp = ((UInt8 *)buffer + offset); + temp = SWAP_BE32 (bthp->totalNodes) - SWAP_BE32 (bthp->freeNodes); + + /* Working a byte at a time is endian safe */ + while (temp >= 8) { *bmp = 0xFF; temp -= 8; bmp++; } + *bmp = ~(0xFF >> temp); + offset += nodeBitsInHeader/8; + + SETOFFSET(buffer, nodeSize, offset, 4); + + WriteBuffer(driveInfo, startingSector, kHFSNodeSize, buffer); +} + + +static void +WriteSystemFile(const DriveInfo *dip, UInt32 startingSector, UInt32 *filesize) +{ + int fd; + ssize_t datasize, writesize; + UInt8 *buf; + struct stat stbuf; + + if (stat(HFS_BOOT_DATA, &stbuf) < 0) + err(1, "stat %s", HFS_BOOT_DATA); + + datasize = stbuf.st_size; + writesize = ROUNDUP(datasize, dip->sectorSize); + + if (datasize > (64 * 1024)) + errx(1, "hfsbootdata file too big."); + + if ((buf = malloc(writesize)) == NULL) + err(1, NULL); + + if ((fd = open(HFS_BOOT_DATA, O_RDONLY, 0)) < 0) + err(1, "open %s", HFS_BOOT_DATA); + + if (read(fd, buf, datasize) != datasize) { + if (errno) + err(1, "read %s", HFS_BOOT_DATA); + else + errx(1, "problems reading %s", HFS_BOOT_DATA); + } + + if (writesize > datasize) + bzero(buf + datasize, writesize - datasize); + + WriteBuffer(dip, startingSector, writesize, buf); + + close(fd); + + free(buf); + + *filesize = datasize; +} + + +static void +WriteReadMeFile(const DriveInfo *dip, UInt32 startingSector, UInt32 *filesize) +{ + ssize_t datasize, writesize; + UInt8 *buf; + + datasize = sizeof(hfswrap_readme); + writesize = ROUNDUP(datasize, dip->sectorSize); + + if ((buf = malloc(writesize)) == NULL) + err(1, NULL); + + bcopy(hfswrap_readme, buf, datasize); + if (writesize > datasize) + bzero(buf + datasize, writesize - datasize); + + WriteBuffer(dip, startingSector, writesize, buf); + + *filesize = datasize; +} + + /* * WriteMapNodes * @@ -1457,6 +2316,20 @@ static UInt32 UTCToLocal(UInt32 utcTime) return (localTime); } + +static UInt32 +DivideAndRoundUp(UInt32 numerator, UInt32 denominator) +{ + UInt32 quotient; + + quotient = numerator / denominator; + if (quotient * denominator != numerator) + quotient++; + + return quotient; +} + + #if !LINUX #define __kCFUserEncodingFileName ("/.CFUserTextEncoding") diff --git a/newfs_hfs.tproj/newfs_hfs.c b/newfs_hfs.tproj/newfs_hfs.c index 88eb1b3..34ac386 100644 --- a/newfs_hfs.tproj/newfs_hfs.c +++ b/newfs_hfs.tproj/newfs_hfs.c @@ -82,10 +82,12 @@ static void getclumpopts __P((char* optlist)); static gid_t a_gid __P((char *)); static uid_t a_uid __P((char *)); static mode_t a_mask __P((char *)); -static int hfs_newfs __P((char *device)); +static int hfs_newfs __P((char *device, int forceHFS)); static void validate_hfsplus_block_size __P((UInt64 sectorCount, UInt32 sectorSize)); static void hfsplus_params __P((const DriveInfo* dip, hfsparams_t *defaults)); +static void hfs_params __P((const DriveInfo* dip, hfsparams_t *defaults)); static UInt32 clumpsizecalc __P((UInt32 clumpblocks)); +static UInt32 CalcBTreeClumpSize __P((UInt32 blockSize, UInt32 nodeSize, UInt32 driveBlocks, int catalog)); static UInt32 CalcHFSPlusBTreeClumpSize __P((UInt32 blockSize, UInt32 nodeSize, UInt64 sectors, int fileID)); static void usage __P((void)); static int get_high_bit (u_int64_t bitstring); @@ -103,6 +105,7 @@ UInt32 gNextCNID = kHFSFirstUserCatalogNodeID; time_t createtime; int gNoCreate = FALSE; +int gWrapper = FALSE; int gUserCatNodeSize = FALSE; int gCaseSensitive = FALSE; int gUserAttrSize = FALSE; @@ -179,6 +182,7 @@ main(argc, argv) extern char *optarg; extern int optind; int ch; + int forceHFS; #if !LINUX char *cp, *special; struct statfs *mp; @@ -190,8 +194,9 @@ main(argc, argv) else progname = *argv; + forceHFS = FALSE; - while ((ch = getopt(argc, argv, "G:J:D:M:N:PU:hsb:c:i:n:v:")) != EOF) + while ((ch = getopt(argc, argv, "G:J:D:M:N:PU:hswb:c:i:n:v:")) != EOF) switch (ch) { case 'G': gGroupID = a_gid(optarg); @@ -255,6 +260,10 @@ main(argc, argv) getclumpopts(optarg); break; + case 'h': + forceHFS = TRUE; + break; + case 'i': gNextCNID = atoi(optarg); /* @@ -286,6 +295,10 @@ main(argc, argv) #endif break; + case 'w': + gWrapper = TRUE; + break; + case '?': default: usage(); @@ -338,7 +351,25 @@ main(argc, argv) } #endif } - if (hfs_newfs(blkdevice) < 0) { + + if (forceHFS && gJournaled) { + fprintf(stderr, "-h -J: incompatible options specified\n"); + usage(); + } + if (gCaseSensitive && (forceHFS || gWrapper)) { + fprintf(stderr, "-s: incompatible options specified\n"); + usage(); + } + if (gWrapper && forceHFS) { + fprintf(stderr, "-h -w: incompatible options specified\n"); + usage(); + } + if (!gWrapper && hfsgrowblks) { + fprintf(stderr, "g clump option requires -w option\n"); + exit(1); + } + + if (hfs_newfs(blkdevice, forceHFS) < 0) { #if LINUX err(1, "cannot create filesystem on %s", blkdevice); #else @@ -657,7 +688,7 @@ static void validate_hfsplus_block_size(UInt64 sectorCount, UInt32 sectorSize) static int -hfs_newfs(char *device) +hfs_newfs(char *device, int forceHFS) { struct stat stbuf; DriveInfo dip = { 0 }; @@ -776,35 +807,58 @@ hfs_newfs(char *device) * HFS Plus allocation block size. This will also calculate a default allocation * block size if none (or zero) was specified. */ - validate_hfsplus_block_size(dip.totalSectors, dip.sectorSize); + if (!forceHFS) + validate_hfsplus_block_size(dip.totalSectors, dip.sectorSize); + + /* Make an HFS disk */ + if (forceHFS || gWrapper) { + hfs_params(&dip, &defaults); + if (gNoCreate == 0) { + UInt32 totalSectors, sectorOffset; + + retval = make_hfs(&dip, &defaults, &totalSectors, §orOffset); + if (retval) + fatal("%s: %s", device, strerror(errno)); + + if (gWrapper) { + dip.totalSectors = totalSectors; + dip.sectorOffset = sectorOffset; + } else { + printf("Initialized %s as a %ld MB HFS volume\n", + device, (long)(dip.totalSectors/2048)); + } + } + } /* Make an HFS Plus disk */ + if (gWrapper || !forceHFS) { - if ((dip.totalSectors * dip.sectorSize ) < kMinHFSPlusVolumeSize) - fatal("%s: partition is too small (minimum is %d KB)", device, kMinHFSPlusVolumeSize/1024); - - hfsplus_params(&dip, &defaults); - if (gNoCreate == 0) { - retval = make_hfsplus(&dip, &defaults); - if (retval == 0) { - printf("Initialized %s as a ", device); - if (dip.totalSectors > 2048ULL*1024*1024) - printf("%ld TB", - (long)((dip.totalSectors + (1024ULL*1024*1024))/(2048ULL*1024*1024))); - else if (dip.totalSectors > 2048*1024) - printf("%ld GB", - (long)((dip.totalSectors + (1024*1024))/(2048*1024))); - else if (dip.totalSectors > 2048) - printf("%ld MB", - (long)((dip.totalSectors + 1024)/2048)); - else - printf("%ld KB", - (long)((dip.totalSectors + 1)/2)); - if (gJournaled) - printf(" HFS Plus volume with a %uk journal\n", - (u_int32_t)defaults.journalSize/1024); - else - printf(" HFS Plus volume\n"); + if ((dip.totalSectors * dip.sectorSize ) < kMinHFSPlusVolumeSize) + fatal("%s: partition is too small (minimum is %d KB)", device, kMinHFSPlusVolumeSize/1024); + + hfsplus_params(&dip, &defaults); + if (gNoCreate == 0) { + retval = make_hfsplus(&dip, &defaults); + if (retval == 0) { + printf("Initialized %s as a ", device); + if (dip.totalSectors > 2048ULL*1024*1024) + printf("%ld TB", + (long)((dip.totalSectors + (1024ULL*1024*1024))/(2048ULL*1024*1024))); + else if (dip.totalSectors > 2048*1024) + printf("%ld GB", + (long)((dip.totalSectors + (1024*1024))/(2048*1024))); + else if (dip.totalSectors > 2048) + printf("%ld MB", + (long)((dip.totalSectors + 1024)/2048)); + else + printf("%ld KB", + (long)((dip.totalSectors + 1)/2)); + if (gJournaled) + printf(" HFS Plus volume with a %uk journal\n", + (u_int32_t)defaults.journalSize/1024); + else + printf(" HFS Plus volume\n"); + } } } @@ -1081,6 +1135,89 @@ static void hfsplus_params (const DriveInfo* dip, hfsparams_t *defaults) } +static void hfs_params(const DriveInfo* dip, hfsparams_t *defaults) +{ + UInt64 sectorCount = dip->totalSectors; + UInt32 sectorSize = dip->sectorSize; + UInt32 alBlkSize; + UInt32 vSectorCount; + UInt32 defaultBlockSize; + + defaults->flags = kMakeStandardHFS; + defaults->nextFreeFileID = gNextCNID; + defaults->createDate = createtime + MAC_GMT_FACTOR; /* Mac OS GMT time */ + defaults->catalogNodeSize = kHFSNodeSize; + defaults->extentsNodeSize = kHFSNodeSize; + defaults->attributesNodeSize = 0; + defaults->attributesClumpSize = 0; + + strncpy((char *)defaults->volumeName, gVolumeName, sizeof(defaults->volumeName) - 1); + defaults->volumeName[sizeof(defaults->volumeName) - 1] = '\0'; + + /* Compute the default allocation block size */ + if (gWrapper && hfsgrowblks) { + defaults->flags |= kMakeMaxHFSBitmap; + vSectorCount = ((UInt64)hfsgrowblks * 512) / sectorSize; + defaultBlockSize = sectorSize * ((vSectorCount >> 16) + 1); + } else + defaultBlockSize = sectorSize * ((sectorCount >> 16) + 1); + + if (gWrapper) { + defaults->flags |= kMakeHFSWrapper; + + /* round alBlkSize up to multiple of HFS Plus blockSize */ + alBlkSize = ((defaultBlockSize + gBlockSize - 1) / gBlockSize) * gBlockSize; + + if (gBlockSize > 4096) + defaults->hfsAlignment = 4096 / sectorSize; /* Align to 4K boundary */ + else + defaults->hfsAlignment = gBlockSize / sectorSize; /* Align to blockSize boundary */ + } else { + /* If allocation block size is undefined or invalid calculate itÉ*/ + alBlkSize = gBlockSize; + defaults->hfsAlignment = 0; + } + + if ( alBlkSize == 0 || (alBlkSize & 0x1FF) != 0 || alBlkSize < defaultBlockSize) + alBlkSize = defaultBlockSize; + + defaults->blockSize = alBlkSize; + + defaults->dataClumpSize = alBlkSize * 4; + defaults->rsrcClumpSize = alBlkSize * 4; + if ( gWrapper || defaults->dataClumpSize > 0x100000 ) + defaults->dataClumpSize = alBlkSize; + + if (gWrapper) { + if (alBlkSize == kHFSNodeSize) { + defaults->extentsClumpSize = (2 * kHFSNodeSize); /* header + root/leaf */ + defaults->catalogClumpSize = (4 * kHFSNodeSize); /* header + root + 2 leaves */ + } else { + defaults->extentsClumpSize = alBlkSize; + defaults->catalogClumpSize = alBlkSize; + } + } else { + defaults->catalogClumpSize = CalcBTreeClumpSize(alBlkSize, sectorSize, sectorCount, TRUE); + defaults->extentsClumpSize = CalcBTreeClumpSize(alBlkSize, sectorSize, sectorCount, FALSE); + } + + if (gNoCreate) { + printf("%lld sectors at %ld bytes per sector\n", dip->physTotalSectors, dip->physSectorSize); + printf("%s format parameters:\n", gWrapper ? "HFS Wrapper" : "HFS"); + printf("\tvolume name: \"%s\"\n", gVolumeName); + printf("\tblock-size: %ld\n", defaults->blockSize); + printf("\ttotal blocks: %lld\n", sectorCount / (alBlkSize / sectorSize) ); + printf("\tfirst free catalog node id: %ld\n", defaults->nextFreeFileID); + printf("\tinitial catalog file size: %ld\n", defaults->catalogClumpSize); + printf("\tinitial extents file size: %ld\n", defaults->extentsClumpSize); + printf("\tfile clump size: %ld\n", defaults->dataClumpSize); + /* hfsgrowblks is in terms of 512-byte sectors */ + if (hfsgrowblks) + printf("\twrapper growable from %lld to %ld sectors\n", dip->physTotalSectors, hfsgrowblks * kBytesPerSector / dip->physSectorSize); + } +} + + static UInt32 clumpsizecalc(UInt32 clumpblocks) { @@ -1095,6 +1232,97 @@ clumpsizecalc(UInt32 clumpblocks) } +/* + * CalcBTreeClumpSize + * + * This routine calculates the file clump size for both the catalog and + * extents overflow files. In general, this is 1/128 the size of the + * volume up to a maximum of 6 MB. For really large HFS volumes it will + * be just 1 allocation block. + */ +static UInt32 +CalcBTreeClumpSize(UInt32 blockSize, UInt32 nodeSize, UInt32 driveBlocks, int catalog) +{ + UInt32 clumpSectors; + UInt32 maximumClumpSectors; + UInt32 sectorsPerBlock = blockSize >> kLog2SectorSize; + UInt32 sectorsPerNode = nodeSize >> kLog2SectorSize; + UInt32 nodeBitsInHeader; + UInt32 limitClumpSectors; + + if (catalog) + limitClumpSectors = 6 * 1024 * 1024 / 512; /* overall limit of 6MB */ + else + limitClumpSectors = 4 * 1024 * 1024 / 512; /* overall limit of 4MB */ + /* + * For small node sizes (eg., HFS, or default HFS Plus extents), then the clump size will + * be as big as the header's map record can handle. (That is, as big as possible, without + * requiring a map node.) + * + * But for a 32K node size, this works out to nearly 8GB. We need to restrict it further. + * To avoid arithmetic overflow, we'll calculate things in terms of 512-byte sectors. + */ + nodeBitsInHeader = 8 * (nodeSize - sizeof(BTNodeDescriptor) + - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes + - (4 * sizeof(SInt16))); + maximumClumpSectors = nodeBitsInHeader * sectorsPerNode; + + if ( maximumClumpSectors > limitClumpSectors ) + maximumClumpSectors = limitClumpSectors; + + /* + * For very large HFS volumes, the allocation block size might be larger than the arbitrary limit + * we set above. Since we have to allocate at least one allocation block, then use that as the + * clump size. + * + * Otherwise, we want to use about 1/128 of the volume, again subject to the above limit. + * To avoid arithmetic overflow, we continue to work with sectors. + * + * But for very small volumes (less than 64K), we'll just use 4 allocation blocks. And that + * will typically be 2KB. + */ + if ( sectorsPerBlock >= maximumClumpSectors ) + { + clumpSectors = sectorsPerBlock; /* for really large volumes just use one allocation block (HFS only) */ + } + else + { + /* + * For large volumes, the default is 1/128 of the volume size, up to the maximumClumpSize + */ + if ( driveBlocks > 128 ) + { + clumpSectors = (driveBlocks / 128); /* the default is 1/128 of the volume size */ + + if (clumpSectors > maximumClumpSectors) + clumpSectors = maximumClumpSectors; + } + else + { + clumpSectors = sectorsPerBlock * 4; /* for really small volumes (ie < 64K) */ + } + } + + /* + * And we need to round up to something that is a multiple of both the node size and the allocation block size + * so that it will occupy a whole number of allocation blocks, and so a whole number of nodes will fit. + * + * For HFS, the node size is always 512, and the allocation block size is always a multiple of 512. For HFS + * Plus, both the node size and allocation block size are powers of 2. So, it suffices to round up to whichever + * value is larger (since the larger value is always a multiple of the smaller value). + */ + + if ( sectorsPerNode > sectorsPerBlock ) + clumpSectors = (clumpSectors / sectorsPerNode) * sectorsPerNode; /* truncate to nearest node*/ + else + clumpSectors = (clumpSectors / sectorsPerBlock) * sectorsPerBlock; /* truncate to nearest node and allocation block */ + + /* Finally, convert the clump size to bytes. */ + return clumpSectors << kLog2SectorSize; +} + + #define CLUMP_ENTRIES 15 short clumptbl[CLUMP_ENTRIES * 3] = { @@ -1247,8 +1475,10 @@ void usage() fprintf(stderr, "usage: %s [-N [partition-size]] [hfsplus-options] special-device\n", progname); fprintf(stderr, " options:\n"); + fprintf(stderr, "\t-h create an HFS format filesystem (HFS Plus is the default)\n"); fprintf(stderr, "\t-N do not create file system, just print out parameters\n"); fprintf(stderr, "\t-s use case-sensitive filenames (default is case-insensitive)\n"); + fprintf(stderr, "\t-w add a HFS wrapper (i.e. Native Mac OS 9 bootable)\n"); fprintf(stderr, " where hfsplus-options are:\n"); fprintf(stderr, "\t-J [journal-size] make this HFS+ volume journaled\n");