On Sat, 18 Oct 2025 at 09:34, <[email protected]> wrote:
>
> This is an automated email from the ASF dual-hosted git repository.
>
> pkarwasz pushed a commit to branch feat/fuzzing
> in repository https://gitbox.apache.org/repos/asf/commons-compress.git
>
> commit 02236cbefcea032a545b37b336739023b4feb991
> Author: Piotr P. Karwasz <[email protected]>
> AuthorDate: Wed Sep 24 14:47:11 2025 +0200
>
>     feat: Add generator for archive tests
> ---
>  pom.xml                                            |   6 +
>  .../archivers/dump/DumpArchiveConstants.java       |   2 +-
>  .../compress/archivers/dump/DumpArchiveEntry.java  |   2 +-
>  .../compress/archivers/TestArchiveGenerator.java   | 581 
> +++++++++++++++++++++
>  .../archivers/fuzzing/AbstractArjHeader.java       |  91 ++++
>  .../archivers/fuzzing/AbstractCpioHeader.java      |  75 +++
>  .../archivers/fuzzing/AbstractTarHeader.java       | 107 ++++
>  .../archivers/fuzzing/AbstractWritable.java        |  40 ++
>  .../compress/archivers/fuzzing/ArHeader.java       |  73 +++
>  .../compress/archivers/fuzzing/ArjLocalHeader.java |  49 ++
>  .../compress/archivers/fuzzing/ArjMainHeader.java  |  49 ++
>  .../archivers/fuzzing/CpioBinaryHeader.java        |  57 ++
>  .../archivers/fuzzing/CpioNewAsciiHeader.java      |  67 +++
>  .../archivers/fuzzing/CpioOldAsciiHeader.java      |  56 ++
>  .../archivers/fuzzing/DumpDirectoryEntry.java      |  90 ++++
>  .../archivers/fuzzing/DumpLocalHeader.java         |  88 ++++
>  .../archivers/fuzzing/DumpSummaryHeader.java       | 127 +++++
>  .../compress/archivers/fuzzing/GnuTarHeader.java   |  37 ++
>  .../compress/archivers/fuzzing/PosixTarHeader.java |  38 ++
>  .../fuzzing/ZipCentralDirectoryHeader.java         | 149 ++++++
>  .../fuzzing/ZipEndOfCentralDirectory.java          | 148 ++++++
>  .../compress/archivers/fuzzing/ZipLocalHeader.java |  66 +++
>  .../compress/archivers/fuzzing/package-info.java   |  19 +
>  23 files changed, 2015 insertions(+), 2 deletions(-)
>
> diff --git a/pom.xml b/pom.xml
> index 36e41c0e1..b3e0df136 100644
> --- a/pom.xml
> +++ b/pom.xml
> @@ -215,6 +215,12 @@ Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, 
> arj.
>        <artifactId>junit-pioneer</artifactId>
>        <scope>test</scope>
>      </dependency>
> +    <dependency>
> +      <groupId>com.code-intelligence</groupId>
> +      <artifactId>jazzer-api</artifactId>
> +      <version>0.25.1</version>
> +      <scope>test</scope>
> +    </dependency>
>    </dependencies>
>    <scm>
>      
> <connection>scm:git:https://gitbox.apache.org/repos/asf/commons-compress.git</connection>
> diff --git 
> a/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
>  
> b/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
> index 8ffd11597..7677dd967 100644
> --- 
> a/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
> +++ 
> b/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
> @@ -122,7 +122,7 @@ public static SEGMENT_TYPE find(final int code) {
>              return null;
>          }
>
> -        final int code;
> +        public final int code;

-1

This exposes what is currently an internal implementation detail.
There are other ways to make the value available to the fuzzing test code.
For example, add a test class in the same package that gives access
via a getter.

We should not modify the public API to satisfy test requirements
unless absolutely necessary.

>
>          SEGMENT_TYPE(final int code) {
>              this.code = code;
> diff --git 
> a/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
>  
> b/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
> index 83e469f44..63476344f 100644
> --- 
> a/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
> +++ 
> b/src/main/java/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
> @@ -377,7 +377,7 @@ public static TYPE find(final int code) {
>              return type;
>          }
>
> -        private final int code;
> +        public final int code;
>
>          TYPE(final int code) {
>              this.code = code;
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/TestArchiveGenerator.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/TestArchiveGenerator.java
> new file mode 100644
> index 000000000..cf369bf11
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/TestArchiveGenerator.java
> @@ -0,0 +1,581 @@
> +package org.apache.commons.compress.archivers;
> +
> +import static java.nio.charset.StandardCharsets.US_ASCII;
> +
> +import java.io.IOException;
> +import java.io.OutputStream;
> +import java.io.PrintWriter;
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.channels.WritableByteChannel;
> +import java.nio.file.Files;
> +import java.nio.file.Path;
> +import java.nio.file.Paths;
> +import java.nio.file.StandardOpenOption;
> +import 
> org.apache.commons.compress.archivers.dump.DumpArchiveConstants.SEGMENT_TYPE;
> +import org.apache.commons.compress.archivers.dump.DumpArchiveEntry.TYPE;
> +import org.apache.commons.compress.archivers.fuzzing.ArHeader;
> +import org.apache.commons.compress.archivers.fuzzing.ArjLocalHeader;
> +import org.apache.commons.compress.archivers.fuzzing.ArjMainHeader;
> +import org.apache.commons.compress.archivers.fuzzing.CpioBinaryHeader;
> +import org.apache.commons.compress.archivers.fuzzing.CpioNewAsciiHeader;
> +import org.apache.commons.compress.archivers.fuzzing.CpioOldAsciiHeader;
> +import org.apache.commons.compress.archivers.fuzzing.DumpDirectoryEntry;
> +import org.apache.commons.compress.archivers.fuzzing.DumpLocalHeader;
> +import org.apache.commons.compress.archivers.fuzzing.DumpSummaryHeader;
> +import org.apache.commons.compress.archivers.fuzzing.PosixTarHeader;
> +import 
> org.apache.commons.compress.archivers.fuzzing.ZipCentralDirectoryHeader;
> +import 
> org.apache.commons.compress.archivers.fuzzing.ZipEndOfCentralDirectory;
> +import org.apache.commons.compress.archivers.fuzzing.ZipLocalHeader;
> +import org.apache.commons.io.IOUtils;
> +import org.apache.commons.lang3.StringUtils;
> +
> +/**
> + * Utility to generate test archives with specific properties.
> + * <p>
> + * Run from the command line, it takes one argument: the output directory.
> + * </p>
> + * <p>
> + * The generated files are checked into the src/test/resources/invalid 
> directory.
> + * </p>
> + */
> +public final class TestArchiveGenerator {
> +
> +    private static final int TIMESTAMP = 0;
> +    private static final int OWNER_ID = 0;
> +    private static final int GROUP_ID = 0;
> +
> +    @SuppressWarnings("OctalInteger")
> +    private static final int FILE_MODE = 0100644;
> +
> +    // Maximum size for a Java array: AR, CPIO and TAR support longer names
> +    private static final int SOFT_ARRAY_MAX_SIZE = Integer.MAX_VALUE - 8;
> +    private static final int ARJ_MAX_SIZE = 2568; // ARJ header - fixed 
> fields
> +
> +    public static void main(final String[] args) throws IOException {
> +        if (args.length != 1) {
> +            System.err.println("Expected one argument: output directory");
> +            System.exit(1);
> +        }
> +        final Path path = Paths.get(args[0]);
> +        if (!Files.isDirectory(path)) {
> +            System.err.println("Not a directory: " + path);
> +            System.exit(1);
> +        }
> +        // Long name examples
> +        final Path longNamePath = path.resolve("long-name");
> +        Files.createDirectories(longNamePath);
> +        generateLongFileNames(longNamePath);
> +    }
> +
> +    public static void generateLongFileNames(final Path path) throws 
> IOException {
> +        if (!Files.isDirectory(path)) {
> +            throw new IOException("Not a directory: " + path);
> +        }
> +        Files.createDirectories(path);
> +        // AR
> +        arInvalidBsdLongName(path);
> +        arInvalidGnuLongName(path);
> +        arValidBsdLongName(path);
> +        arValidGnuLongName(path);
> +        // ARJ
> +        arjLongName(path);
> +        // CPIO
> +        cpioOldAsciiTruncatedLongNames(path);
> +        cpioNewAsciiTruncatedLongNames(path);
> +        cpioBinaryValidLongNames(path);
> +        cpioOldAsciiValidLongNames(path);
> +        cpioNewAsciiValidLongNames(path);
> +        // DUMP
> +        dumpValidLongName(path);
> +        dumpReversedLongName(path);
> +        // TAR
> +        tarPaxInvalidLongNames(path);
> +        tarGnuInvalidLongNames(path);
> +        tarPaxValidLongNames(path);
> +        tarGnuValidLongNames(path);
> +        // ZIP
> +        zipValidLongName(path);
> +    }
> +
> +    /**
> +     * Generates a truncated AR archive with a very long BSD name.
> +     * <p>
> +     * The name has a declared length of {@link #SOFT_ARRAY_MAX_SIZE}, which 
> is the largest
> +     * name a Java array can hold.
> +     * </p>
> +     * <p>
> +     * The AR archive specification allows for even longer names.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void arInvalidBsdLongName(final Path path) throws 
> IOException {
> +        final Path file = path.resolve("bsd-fail.ar");
> +        try (final PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(file))) {
> +            writeArHeader(out);
> +            final ArHeader header = new ArHeader(
> +                    "#1/" + SOFT_ARRAY_MAX_SIZE, TIMESTAMP, OWNER_ID, 
> GROUP_ID, FILE_MODE, SOFT_ARRAY_MAX_SIZE);
> +            header.writeTo(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a valid AR archive with a very long BSD name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void arValidBsdLongName(final Path path) throws 
> IOException {
> +        final Path file = path.resolve("bsd-short-max-value.ar");
> +        try (final PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(file))) {
> +            writeArHeader(out);
> +            final ArHeader header =
> +                    new ArHeader("#1/" + Short.MAX_VALUE, TIMESTAMP, 
> OWNER_ID, GROUP_ID, FILE_MODE, Short.MAX_VALUE);
> +            header.writeTo(out);
> +            out.write(StringUtils.repeat('a', Short.MAX_VALUE));
> +        }
> +    }
> +
> +    /**
> +     * Generates a truncated AR archive with a very long GNU name.
> +     * <p>
> +     * The name has a declared length of {@link #SOFT_ARRAY_MAX_SIZE}, which 
> is the largest
> +     * name a Java array can hold.
> +     * </p>
> +     * <p>
> +     * The AR archive specification allows for even longer names.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void arInvalidGnuLongName(final Path path) throws 
> IOException {
> +        final Path file = path.resolve("gnu-fail.ar");
> +        try (final PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(file))) {
> +            writeArHeader(out);
> +            final ArHeader header = new ArHeader("//", TIMESTAMP, OWNER_ID, 
> GROUP_ID, FILE_MODE, SOFT_ARRAY_MAX_SIZE);
> +            header.writeTo(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a valid AR archive with a very long GNU name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void arValidGnuLongName(final Path path) throws 
> IOException {
> +        final Path file = path.resolve("gnu-short-max-value.ar");
> +        try (final PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(file))) {
> +            writeArHeader(out);
> +            // GNU long name table with one entry and a new line
> +            final ArHeader header1 = new ArHeader("//", TIMESTAMP, OWNER_ID, 
> GROUP_ID, FILE_MODE, Short.MAX_VALUE + 1);
> +            header1.writeTo(out);
> +            out.write(StringUtils.repeat('a', Short.MAX_VALUE));
> +            // End with a new line
> +            out.write('\n');
> +            // Add a file to make the archive valid
> +            final ArHeader header = new ArHeader("/0", TIMESTAMP, OWNER_ID, 
> GROUP_ID, FILE_MODE, 0);
> +            header.writeTo(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates an ARJ archive with a very long file name.
> +     * <p>
> +     * The name in ARJ must be contained in 2600 bytes of the header, and 32 
> bytes are used by
> +     * compulsory fields and null terminator, so the maximum length is 2568 
> bytes.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void arjLongName(final Path path) throws IOException {
> +        final Path file = path.resolve("long-name.arj");
> +        try (final OutputStream out = Files.newOutputStream(file)) {
> +            ByteBuffer buffer = 
> ByteBuffer.allocate(IOUtils.DEFAULT_BUFFER_SIZE);
> +            final String longName = StringUtils.repeat('a', ARJ_MAX_SIZE);
> +            ArjMainHeader mainHeader = new ArjMainHeader(US_ASCII, 
> "long-name.arj", "");
> +            mainHeader.writeTo(buffer);
> +            ArjLocalHeader localHeader = new ArjLocalHeader(US_ASCII, 
> longName, "");
> +            localHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +            byte[] trailer = {(byte) 0x60, (byte) 0xEA, 0x00, 0x00}; // ARJ 
> file trailer
> +            out.write(trailer);
> +        }
> +    }
> +
> +    /**
> +     * Generates CPIO binary archives with a very long file name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE} - 1.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void cpioBinaryValidLongNames(final Path path) throws 
> IOException {
> +        final String longName = StringUtils.repeat('a', Short.MAX_VALUE - 1);
> +        CpioBinaryHeader header = new CpioBinaryHeader(US_ASCII, longName, 
> 0);
> +        final ByteBuffer buffer = ByteBuffer.allocate(2 * Short.MAX_VALUE);
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("bin-big-endian.cpio"))) {
> +            header.writeTo(buffer, ByteOrder.BIG_ENDIAN);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +        }
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("bin-little-endian.cpio"))) {
> +            header.writeTo(buffer, ByteOrder.LITTLE_ENDIAN);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +        }
> +    }
> +
> +    /**
> +     * Generates CPIO old ASCII archives with a very long file name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void cpioOldAsciiValidLongNames(final Path path) throws 
> IOException {
> +        final String longName = StringUtils.repeat('a', Short.MAX_VALUE);
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("odc.cpio")))) {
> +            CpioOldAsciiHeader header = new CpioOldAsciiHeader(US_ASCII, 
> longName, 0);
> +            header.writeTo(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a truncated CPIO old ASCII archive with a very long file 
> name.
> +     * <p>
> +     * The name has a length of {@code 0777776}, which is the largest
> +     * name that can be represented in the name size field of the header.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void cpioOldAsciiTruncatedLongNames(final Path path) 
> throws IOException {
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("odc-fail.cpio")))) {
> +            @SuppressWarnings("OctalInteger")
> +            CpioOldAsciiHeader header = new CpioOldAsciiHeader(US_ASCII, "", 
> 0777776, 0);
> +            header.writeTo(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates CPIO new ASCII and CRC archives with a very long file name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void cpioNewAsciiValidLongNames(final Path path) throws 
> IOException {
> +        final String longName = StringUtils.repeat('a', Short.MAX_VALUE);
> +        CpioNewAsciiHeader header = new CpioNewAsciiHeader(US_ASCII, 
> longName, 0);
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("newc.cpio")))) {
> +            header.writeTo(out, false);
> +        }
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("crc.cpio")))) {
> +            header.writeTo(out, true);
> +        }
> +    }
> +
> +    /**
> +     * Generates a truncated CPIO new ASCII archive with a very long file 
> name.
> +     * <p>
> +     * The name has a length of {@code SOFT_MAX_ARRAY_SIZE}, which is the 
> largest
> +     * name that can be theoretically represented in Java.
> +     * </p>
> +     * <p>
> +     * The CPIO archive specification allows for even longer names.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void cpioNewAsciiTruncatedLongNames(final Path path) 
> throws IOException {
> +        CpioNewAsciiHeader header = new CpioNewAsciiHeader(US_ASCII, "", 
> SOFT_ARRAY_MAX_SIZE, 0);
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("newc-fail.cpio")))) {
> +            header.writeTo(out, false);
> +        }
> +        try (PrintWriter out = new 
> PrintWriter(Files.newBufferedWriter(path.resolve("crc-fail.cpio")))) {
> +            header.writeTo(out, true);
> +        }
> +    }
> +
> +    /**
> +     * Generates a TAR archive with a very long name using the PAX format.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void tarPaxValidLongNames(final Path path) throws 
> IOException {
> +        final byte[] paxEntryContent = createPaxKeyValue("path", 
> StringUtils.repeat('a', Short.MAX_VALUE))
> +                .getBytes(US_ASCII);
> +        ByteBuffer buffer = ByteBuffer.allocate(512);
> +        PosixTarHeader paxHeader = new PosixTarHeader("PaxHeader/long", 
> paxEntryContent.length, 0, (byte) 'x', "");
> +        PosixTarHeader fileHeader = new PosixTarHeader("a", 0, 0, (byte) 
> '0', "");
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("pax.tar"))) {
> +            paxHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +            out.write(paxEntryContent);
> +            padTo512Bytes(paxEntryContent.length, out);
> +            buffer.clear();
> +            fileHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +            writeUstarTrailer(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a truncated TAR archive with a very long name using the PAX 
> format.
> +     * <p>
> +     * The name has a declared length of {@link #SOFT_ARRAY_MAX_SIZE}, which 
> is the largest
> +     * name a Java array can hold.
> +     * </p>
> +     * <p>
> +     * The TAR archive specification allows for even longer names.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void tarPaxInvalidLongNames(final Path path) throws 
> IOException {
> +        // The size of a pax entry for a file with a name of 
> SOFT_ARRAY_MAX_SIZE
> +        final long paxEntrySize =
> +                String.valueOf(SOFT_ARRAY_MAX_SIZE).length() + " 
> path=".length() + SOFT_ARRAY_MAX_SIZE + "\n".length();
> +        ByteBuffer buffer = ByteBuffer.allocate(512);
> +        PosixTarHeader paxHeader = new PosixTarHeader("PaxHeader/long", 
> paxEntrySize, 0, (byte) 'x', "");
> +        try (WritableByteChannel out = Files.newByteChannel(
> +                path.resolve("pax-fail.tar"),
> +                StandardOpenOption.CREATE,
> +                StandardOpenOption.WRITE,
> +                StandardOpenOption.TRUNCATE_EXISTING)) {
> +            paxHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer);
> +        }
> +    }
> +
> +    /**
> +     * Generates a TAR archive with a very long name using the old GNU 
> format.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void tarGnuValidLongNames(final Path path) throws 
> IOException {
> +        final byte[] gnuEntryContent = StringUtils.repeat('a', 
> Short.MAX_VALUE).getBytes(US_ASCII);
> +        ByteBuffer buffer = ByteBuffer.allocate(512);
> +        PosixTarHeader gnuHeader = new PosixTarHeader("././@LongLink", 
> gnuEntryContent.length, 0, (byte) 'L', "");
> +        PosixTarHeader fileHeader = new PosixTarHeader("a", 0, 0, (byte) 
> '0', "");
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("gnu.tar"))) {
> +            gnuHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +            out.write(gnuEntryContent);
> +            padTo512Bytes(gnuEntryContent.length, out);
> +            buffer.clear();
> +            fileHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer.array(), 0, buffer.limit());
> +            writeUstarTrailer(out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a truncated TAR archive with a very long name using the old 
> GNU format.
> +     * <p>
> +     * The name has a declared length of {@link #SOFT_ARRAY_MAX_SIZE}, which 
> is the largest
> +     * name a Java array can hold.
> +     * </p>
> +     * <p>
> +     * The TAR archive specification allows for even longer names.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void tarGnuInvalidLongNames(final Path path) throws 
> IOException {
> +        ByteBuffer buffer = ByteBuffer.allocate(512);
> +        PosixTarHeader gnuHeader = new PosixTarHeader("././@LongLink", 
> SOFT_ARRAY_MAX_SIZE, 0, (byte) 'L', "");
> +        try (WritableByteChannel out = Files.newByteChannel(
> +                path.resolve("gnu-fail.tar"),
> +                StandardOpenOption.CREATE,
> +                StandardOpenOption.WRITE,
> +                StandardOpenOption.TRUNCATE_EXISTING)) {
> +            gnuHeader.writeTo(buffer);
> +            buffer.flip();
> +            out.write(buffer);
> +        }
> +    }
> +
> +    /**
> +     * Generates a Dump archive with a very long name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE} - 1, which is the 
> longest
> +     * name that can be represented in a DumpDirectoryEntry.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void dumpValidLongName(final Path path) throws 
> IOException {
> +        final String longName = StringUtils.repeat('a', 255);
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("long-name.dump"))) {
> +            ByteBuffer buffer = ByteBuffer.allocate(64 * 1024);
> +            // Archive summary
> +            DumpSummaryHeader summary = new DumpSummaryHeader(1);
> +            summary.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // Ignored records
> +            DumpLocalHeader header = new DumpLocalHeader(SEGMENT_TYPE.CLRI, 
> TYPE.FILE, 1, 0, 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            header = new DumpLocalHeader(SEGMENT_TYPE.BITS, TYPE.FILE, 1, 0, 
> 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // 128 directory entries with a single file of very long name
> +            //
> +            // The first directory is the root directory with an empty name.
> +            // The total path length for the file will be 127 * 256 + 255 = 
> Short.MAX_VALUE
> +            final int rootInode = 2;
> +            for (int i = rootInode; i < 128 + rootInode; i++) {
> +                writeSingleFileDumpDirectory(i, longName, out);
> +            }
> +            // Empty file
> +            header = new DumpLocalHeader(SEGMENT_TYPE.INODE, TYPE.FILE, 1, 
> 128 + rootInode, 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // End of dump
> +            header = new DumpLocalHeader(SEGMENT_TYPE.END, TYPE.FILE, 1, 0, 
> 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a Dump archive with a very long name, but with the 
> directories in reverse order.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE} - 1, which is the 
> longest
> +     * name that can be represented in a DumpDirectoryEntry.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void dumpReversedLongName(final Path path) throws 
> IOException {
> +        final String longName = StringUtils.repeat('a', 255);
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("long-name-reversed.dump"))) {
> +            ByteBuffer buffer = ByteBuffer.allocate(64 * 1024);
> +            // Archive summary
> +            DumpSummaryHeader summary = new DumpSummaryHeader(1);
> +            summary.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // Ignored records
> +            DumpLocalHeader header = new DumpLocalHeader(SEGMENT_TYPE.CLRI, 
> TYPE.FILE, 1, 0, 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            header = new DumpLocalHeader(SEGMENT_TYPE.BITS, TYPE.FILE, 1, 0, 
> 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // Empty file
> +            final int rootInode = 2;
> +            header = new DumpLocalHeader(SEGMENT_TYPE.INODE, TYPE.FILE, 1, 
> 128 + rootInode, 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +            // 128 directory entries with a single file of very long name
> +            //
> +            // The first directory is the root directory with an empty name.
> +            // The total path length for the file will be 127 * 256 + 255 = 
> Short.MAX_VALUE
> +            for (int i = 127 + rootInode; i >= rootInode; i--) {
> +                writeSingleFileDumpDirectory(i, longName, out);
> +            }
> +            // End of dump
> +            header = new DumpLocalHeader(SEGMENT_TYPE.END, TYPE.FILE, 1, 0, 
> 0);
> +            header.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +        }
> +    }
> +
> +    /**
> +     * Generates a ZIP archive with a very long name.
> +     * <p>
> +     * The name has a length of {@link Short#MAX_VALUE}, which is the longest
> +     * name that can be represented in a ZIP local file header.
> +     * </p>
> +     * @param path The output directory
> +     */
> +    private static void zipValidLongName(final Path path) throws IOException 
> {
> +        try (OutputStream out = 
> Files.newOutputStream(path.resolve("long-name.zip"))) {
> +            ByteBuffer buffer = ByteBuffer.allocate(64 * 1024);
> +            // File entry
> +            String fileName = StringUtils.repeat('a', Short.MAX_VALUE);
> +            ZipLocalHeader header = new ZipLocalHeader(US_ASCII, fileName, 
> 0, 0);
> +            header.writeTo(buffer);
> +            final int offsetCentralDirectory = buffer.position();
> +            writeByteBuffer(buffer, out);
> +            // Central directory entry
> +            ZipCentralDirectoryHeader centralHeader = new 
> ZipCentralDirectoryHeader(US_ASCII, fileName, 0);
> +            centralHeader.writeTo(buffer);
> +            final int sizeCentralDirectory = buffer.position();
> +            writeByteBuffer(buffer, out);
> +            // End of central directory
> +            ZipEndOfCentralDirectory end =
> +                    new ZipEndOfCentralDirectory(1, sizeCentralDirectory, 
> offsetCentralDirectory);
> +            end.writeTo(buffer);
> +            writeByteBuffer(buffer, out);
> +        }
> +    }
> +
> +    private static void writeSingleFileDumpDirectory(int inode, String 
> fileName, OutputStream out) throws IOException {
> +        final DumpDirectoryEntry dotEntry = new DumpDirectoryEntry(inode, 
> ".");
> +        final DumpDirectoryEntry dotDotEntry = new DumpDirectoryEntry(inode 
> > 2 ? inode - 1 : inode, "..");
> +        final DumpDirectoryEntry entry = new DumpDirectoryEntry(inode + 1, 
> fileName);
> +        int totalLength = dotEntry.recordLength() + 
> dotDotEntry.recordLength() + entry.recordLength();
> +        final DumpLocalHeader header = new 
> DumpLocalHeader(SEGMENT_TYPE.INODE, TYPE.DIRECTORY, 1, inode, totalLength);
> +        final ByteBuffer buffer = ByteBuffer.allocate(64 * 1024);
> +        header.writeTo(buffer);
> +        writeByteBuffer(buffer, out);
> +        dotEntry.writeTo(buffer);
> +        writeByteBuffer(buffer, out);
> +        dotDotEntry.writeTo(buffer);
> +        writeByteBuffer(buffer, out);
> +        entry.writeTo(buffer);
> +        writeByteBuffer(buffer, out);
> +        while (totalLength % 1024 != 0) {
> +            out.write(0);
> +            totalLength++;
> +        }
> +    }
> +
> +    private static void writeByteBuffer(final ByteBuffer buffer, final 
> OutputStream out) throws IOException {
> +        buffer.flip();
> +        out.write(buffer.array(), 0, buffer.limit());
> +        buffer.clear();
> +    }
> +
> +    private static void writeArHeader(final PrintWriter out) {
> +        out.print("!<arch>\n");
> +    }
> +
> +    private static void writeUstarTrailer(final OutputStream out) throws 
> IOException {
> +        int offset = 0;
> +        // 1024 bytes of zero
> +        while (offset < 1024) {
> +            out.write(0);
> +            offset++;
> +        }
> +    }
> +
> +    private static String createPaxKeyValue(final String key, final String 
> value) {
> +        final String entry = ' ' + key + "=" + value + "\n";
> +        // Guess length: length of length + space + entry
> +        int length = String.valueOf(entry.length()).length() + 
> entry.length();
> +        // Recompute if number of digits changes
> +        length = String.valueOf(length).length() + entry.length();
> +        // Return the value
> +        return length + entry;
> +    }
> +
> +    private static void padTo512Bytes(final int offset, final OutputStream 
> out) throws IOException {
> +        int count = offset;
> +        while (count % 512 != 0) {
> +            out.write(0);
> +            count++;
> +        }
> +    }
> +
> +    private TestArchiveGenerator() {
> +        // hide constructor
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractArjHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractArjHeader.java
> new file mode 100644
> index 000000000..40b8563ed
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractArjHeader.java
> @@ -0,0 +1,91 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.IOException;
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.Charset;
> +import java.util.zip.CRC32;
> +
> +public abstract class AbstractArjHeader {
> +    private static final short MAGIC = (short) 0xEA60; // ARJ file magic 
> number
> +
> +    private final byte archiverVersion = 1;
> +    private final byte minVersionToExtract = 1;
> +    private final byte hostOS = 0;
> +    private final byte arjFlags = 0;
> +    private final byte method;
> +    private final byte fileType;
> +    private final byte reserved1 = 0;
> +    private final byte[] fileName;
> +    private final byte[] comment;
> +
> +    public AbstractArjHeader(Charset charset, byte method, byte fileType, 
> String fileName, String comment) {
> +        this.method = method;
> +        this.fileType = fileType;
> +        this.fileName = fileName.getBytes(charset);
> +        this.comment = comment.getBytes(charset);
> +    }
> +
> +    protected abstract int extraLength();
> +
> +    public byte getBasicHeaderLength() {
> +        return (byte) (0x1E + extraLength());
> +    }
> +
> +    public short getHeaderLength() {
> +        return (short) (0x1E + extraLength() + fileName.length + 1 + 
> comment.length + 1);
> +    }
> +
> +    public void writeTo(ByteBuffer output) throws IOException {
> +        if (output.remaining() < getHeaderLength() + 5) {
> +            throw new IOException("Not enough space in output buffer");
> +        }
> +        output.order(ByteOrder.LITTLE_ENDIAN);
> +        final int startPosition = output.position();
> +        writeBasicHeader(output);
> +        output.put(fileName);
> +        output.put((byte) 0); // null terminator for file name
> +        output.put(comment);
> +        output.put((byte) 0); // null terminator for comment
> +        // Calculate and write the checksum
> +        ByteBuffer checksumBuffer = output.duplicate();
> +        checksumBuffer.flip();
> +        checksumBuffer.position(startPosition + 4); // Skip magic and header 
> length
> +        CRC32 crc32 = new CRC32();
> +        crc32.update(checksumBuffer);
> +        output.putInt((int) crc32.getValue());
> +        // Extended header length
> +        output.putShort((short) 0);
> +    }
> +
> +    protected void writeBasicHeader(ByteBuffer output) throws IOException {
> +        output.putShort(MAGIC);
> +        output.putShort(getHeaderLength());
> +        output.put(getBasicHeaderLength());
> +        output.put(archiverVersion);
> +        output.put(minVersionToExtract);
> +        output.put(hostOS);
> +        output.put(arjFlags);
> +        output.put(method);
> +        output.put(fileType);
> +        output.put(reserved1);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractCpioHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractCpioHeader.java
> new file mode 100644
> index 000000000..293c3ae66
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractCpioHeader.java
> @@ -0,0 +1,75 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.IOException;
> +import java.io.Writer;
> +import java.nio.charset.Charset;
> +
> +public abstract class AbstractCpioHeader {
> +
> +    @SuppressWarnings("OctalInteger")
> +    final short magic = 070707; // CPIO binary magic number
> +    final short dev = 0; // Device number
> +    final short ino = 0; // Inode number
> +    @SuppressWarnings("OctalInteger")
> +    final short mode = (short) 0100644; // Regular file with 0644 permissions
> +    final short uid = 0; // User ID
> +    final short gid = 0; // Group ID
> +    final short nlink = 1; // Number of links
> +    final short rdev = 0; // Device number (if special file)
> +    final int mtime = 0; // Modification time
> +    final int fileSize; // Size of the file in bytes
> +
> +    final String fileName;
> +    final long fileNameSize;
> +    final byte[] fileNameBytes;
> +
> +    public AbstractCpioHeader(Charset charset, String fileName, int 
> fileSize) {
> +        this(charset, fileName, fileName.length(), fileSize);
> +    }
> +
> +    public AbstractCpioHeader(Charset charset, String fileName, long 
> fileNameSize, int fileSize) {
> +        this.fileName = fileName;
> +        this.fileNameBytes = fileName.getBytes(charset);
> +        this.fileNameSize = fileNameSize;
> +        this.fileSize = fileSize;
> +    }
> +
> +    long getNameSize() {
> +        return fileNameSize + 1; // +1 for null terminator
> +    }
> +
> +    /** Write file name + NUL via a Writer (PrintWriter) */
> +    void writeNameWithNull(Writer out) throws IOException {
> +        out.write(fileName);
> +        out.write('\0');
> +    }
> +
> +    /** Write padding bytes (as NULs) using a Writer. */
> +    void pad(Writer out, long count) throws IOException {
> +        for (long i = 0; i < count; i++) out.write('\0');
> +    }
> +
> +    /** 4-byte padding after newc/crc header+name or data. */
> +    static long pad4(long len) { return (4 - (len & 3)) & 3; }
> +
> +    /** even (2) padding for bin/odc after name or data. */
> +    static long pad2(long len) { return len & 1; }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractTarHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractTarHeader.java
> new file mode 100644
> index 000000000..d27b4d312
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractTarHeader.java
> @@ -0,0 +1,107 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import static java.nio.charset.StandardCharsets.US_ASCII;
> +
> +import java.nio.ByteBuffer;
> +
> +public abstract class AbstractTarHeader {
> +
> +    public static final String GNU_MAGIC_AND_VERSION = "ustar  \0";
> +    public static final String USTAR_MAGIC_AND_VERSION = "ustar\00000";
> +
> +    private final String fileName;
> +
> +    @SuppressWarnings("OctalInteger")
> +    private final int fileMode = 0100644; // file mode for regular files
> +
> +    private final int ownerId = 0; // default owner ID
> +    private final int groupId = 0; // default group ID
> +    private final long fileSize; // size of the file in bytes
> +    private final long lastModifiedTime; // last modification time in 
> seconds since epoch
> +    private final byte linkIndicator;
> +    private final String linkName;
> +    private final String magicAndVersion;
> +    private final String ownerName = "owner"; // default owner name
> +    private final String groupName = "group"; // default group name
> +    private final int deviceMajorNumber = 0; // default device major number
> +    private final int deviceMinorNumber = 0; // default device minor number
> +
> +    public AbstractTarHeader(
> +            String fileName,
> +            long fileSize,
> +            long lastModifiedTime,
> +            byte linkIndicator,
> +            String linkName,
> +            String magicAndVersion) {
> +        this.fileName = fileName;
> +        this.fileSize = fileSize;
> +        this.lastModifiedTime = lastModifiedTime;
> +        this.linkIndicator = linkIndicator;
> +        this.linkName = linkName;
> +        this.magicAndVersion = magicAndVersion;
> +    }
> +
> +    public void writeTo(ByteBuffer buffer) {
> +        putString(buffer, fileName, 100); // File name
> +        putOctal(buffer, fileMode, 8); // File mode
> +        putOctal(buffer, ownerId, 8); // Owner ID
> +        putOctal(buffer, groupId, 8); // Group ID
> +        putOctal(buffer, fileSize, 12); // File size
> +        putOctal(buffer, lastModifiedTime, 12); // Last modification time
> +        for (int i = 0; i < 8; i++) buffer.put((byte) ' '); // Checksum 
> placeholder
> +        buffer.put(linkIndicator); // Link indicator
> +        putString(buffer, linkName, 100); // Link name
> +        putString(buffer, magicAndVersion, 8); // Magic and version
> +        putString(buffer, ownerName, 32); // Owner name
> +        putString(buffer, groupName, 32); // Group name
> +        putOctal(buffer, deviceMajorNumber, 8); // Device major number
> +        putOctal(buffer, deviceMinorNumber, 8); // Device minor number
> +    }
> +
> +    protected void putString(ByteBuffer buffer, String value, int length) {
> +        final byte[] bytes = value.getBytes(US_ASCII);
> +        final int len = Math.min(bytes.length, length);
> +        buffer.put(bytes, 0, len);
> +        for (int i = len; i < length; i++) buffer.put((byte) 0);
> +    }
> +
> +    protected void putOctal(ByteBuffer buffer, long value, int length) {
> +        putString(buffer, Long.toOctalString(value), length - 1);
> +        buffer.put((byte) 0); // Null terminator
> +    }
> +
> +    protected void addChecksum(ByteBuffer buffer, int startPosition) {
> +        final ByteBuffer checksumBuffer = buffer.duplicate();
> +        checksumBuffer.flip();
> +        checksumBuffer.position(startPosition);
> +        final int checksum = getChecksum(checksumBuffer);
> +        checksumBuffer.position(startPosition + 148);
> +        putOctal(checksumBuffer, checksum, 8); // Write checksum
> +    }
> +
> +    private int getChecksum(ByteBuffer buffer) {
> +        int sum = 0;
> +        for (int i = 0; i < 512; i++) {
> +            sum += Byte.toUnsignedInt(buffer.get(i));
> +        }
> +        return sum;
> +    }
> +}
> \ No newline at end of file
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractWritable.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractWritable.java
> new file mode 100644
> index 000000000..e62997981
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/AbstractWritable.java
> @@ -0,0 +1,40 @@
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import static java.nio.charset.StandardCharsets.US_ASCII;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.charset.Charset;
> +
> +public abstract class AbstractWritable {
> +
> +    public abstract int getRecordSize();
> +
> +    public abstract void writeTo(ByteBuffer buffer);
> +
> +    protected void writeOctalString(ByteBuffer buffer, long value, int 
> length) {
> +        final byte[] bytes = Long.toOctalString(value).getBytes(US_ASCII);
> +        if (bytes.length > length) {
> +            throw new IllegalArgumentException(
> +                    "Value " + value + " is too large to fit in " + length + 
> " octal digits");
> +        }
> +        buffer.put(bytes);
> +        pad(buffer, bytes.length, length, (byte) ' ');
> +    }
> +
> +    protected void writeString(ByteBuffer buffer, String value, Charset 
> charset, int length) {
> +        final byte[] bytes = value.getBytes(charset);
> +        if (bytes.length > length) {
> +            throw new IllegalArgumentException(
> +                    "String \"" + value + "\" is too long to fit in " + 
> length + " bytes");
> +        }
> +        buffer.put(bytes);
> +        pad(buffer, bytes.length, length, (byte) 0);
> +    }
> +
> +    protected void pad(ByteBuffer buffer, int written, int length, byte 
> padByte) {
> +        while (written % length != 0) {
> +            buffer.put(padByte);
> +            written++;
> +        }
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArHeader.java 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArHeader.java
> new file mode 100644
> index 000000000..b0df2f0e7
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArHeader.java
> @@ -0,0 +1,73 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import static java.nio.charset.StandardCharsets.US_ASCII;
> +
> +import com.code_intelligence.jazzer.mutation.annotation.InRange;
> +import com.code_intelligence.jazzer.mutation.annotation.WithUtf8Length;
> +import java.io.PrintWriter;
> +import java.nio.ByteBuffer;
> +
> +public class ArHeader extends AbstractWritable {
> +
> +    private final String name;
> +    private final long lastModified;
> +    private final int userId;
> +    private final int groupId;
> +    private final int mode;
> +    private final long size;
> +
> +    @SuppressWarnings("OctalInteger")
> +    public ArHeader(
> +            final @WithUtf8Length(min = 1, max = 16) String name,
> +            final @InRange(min = 0, max = 0777_777_777_777L) long 
> lastModified,
> +            final @InRange(min = 0, max = 0777_777L) int userId,
> +            final @InRange(min = 0, max = 0777_777L) int groupId,
> +            final @InRange(min = 0, max = 077_777_777L) int mode,
> +            final @InRange(min = 0, max = 07_777_777_777L) long size) {
> +        this.name = name;
> +        this.lastModified = lastModified;
> +        this.userId = userId;
> +        this.groupId = groupId;
> +        this.mode = mode;
> +        this.size = size;
> +    }
> +
> +    @Override
> +    public int getRecordSize() {
> +        return 60;
> +    }
> +
> +    public void writeTo(final PrintWriter writer) {
> +        writer.printf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, lastModified, 
> userId, groupId, mode, size);
> +    }
> +
> +    @Override
> +    public void writeTo(final ByteBuffer buffer) {
> +        writeString(buffer, name, US_ASCII, 16);
> +        writeOctalString(buffer, lastModified, 12);
> +        writeOctalString(buffer, userId, 6);
> +        writeOctalString(buffer, groupId, 6);
> +        writeOctalString(buffer, mode, 8);
> +        writeOctalString(buffer, size, 10);
> +        buffer.put((byte) '`');
> +        buffer.put((byte) '\n');
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjLocalHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjLocalHeader.java
> new file mode 100644
> index 000000000..44b89d193
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjLocalHeader.java
> @@ -0,0 +1,49 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.DataOutput;
> +import java.io.IOException;
> +import java.nio.ByteBuffer;
> +import java.nio.charset.Charset;
> +
> +public class ArjLocalHeader extends AbstractArjHeader {
> +
> +    public ArjLocalHeader(Charset charset, String fileName, String comment) {
> +        super(charset, (byte) 0 /* stored */, (byte) 1 /* text */, fileName, 
> comment);
> +    }
> +
> +    @Override
> +    protected void writeBasicHeader(ByteBuffer output) throws IOException {
> +        super.writeBasicHeader(output);
> +        output.putInt(0); // modification time
> +        output.putInt(0); // compressed file size
> +        output.putInt(0); // uncompressed file size
> +        output.putInt(0); // file CRC32
> +        output.putShort((short) 0); // zero
> +        output.putShort((short) 0); // file access mode
> +        output.put((byte) 0); // zero
> +        output.put((byte) 0); // zero
> +    }
> +
> +    @Override
> +    protected int extraLength() {
> +        return 0;
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjMainHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjMainHeader.java
> new file mode 100644
> index 000000000..57beea5c1
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ArjMainHeader.java
> @@ -0,0 +1,49 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.DataOutput;
> +import java.io.IOException;
> +import java.nio.ByteBuffer;
> +import java.nio.charset.Charset;
> +
> +public class ArjMainHeader extends AbstractArjHeader {
> +
> +    public ArjMainHeader(Charset charset, String fileName, String comment) {
> +        super(charset, (byte) 2, (byte) 2, fileName, comment);
> +    }
> +
> +    @Override
> +    protected int extraLength() {
> +        return 0;
> +    }
> +
> +    @Override
> +    protected void writeBasicHeader(ByteBuffer output) throws IOException {
> +        super.writeBasicHeader(output);
> +        output.putInt(0); // creation time
> +        output.putInt(0); // modification time
> +        output.putInt(0); // zero
> +        output.putInt(0); // zero
> +        output.putShort((short) 0); // zero
> +        output.putShort((short) 0); // zero
> +        output.put((byte) 0); // zero
> +        output.put((byte) 0); // zero
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioBinaryHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioBinaryHeader.java
> new file mode 100644
> index 000000000..6ecdb9a23
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioBinaryHeader.java
> @@ -0,0 +1,57 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.Charset;
> +
> +public class CpioBinaryHeader extends AbstractCpioHeader {
> +    public CpioBinaryHeader(Charset charset, String fileName, int fileSize) {
> +        super(charset, fileName, fileSize);
> +    }
> +
> +    public void writeTo(ByteBuffer output, ByteOrder byteOrder) {
> +        output.order(byteOrder);
> +
> +        output.putShort(magic);
> +        output.putShort(dev);
> +        output.putShort(ino);
> +        output.putShort(mode);
> +        output.putShort(uid);
> +        output.putShort(gid);
> +        output.putShort(nlink);
> +        output.putShort(rdev);
> +        output.putShort((short) (mtime >>> 16));
> +        output.putShort((short) (mtime & 0xFFFF));
> +        output.putShort((short) getNameSize());
> +        output.putShort((short) (fileSize >>> 16));
> +        output.putShort((short) (fileSize & 0xFFFF));
> +
> +        // name + NUL
> +        output.put(fileNameBytes);
> +        output.put((byte) 0);
> +
> +        // even-byte padding after header+name (binary header size is 26 
> bytes)
> +        long afterHeaderAndName = 26 + getNameSize();
> +        if ((afterHeaderAndName & 1) != 0) {
> +            output.put((byte) 0);
> +        }
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioNewAsciiHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioNewAsciiHeader.java
> new file mode 100644
> index 000000000..e1d172323
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioNewAsciiHeader.java
> @@ -0,0 +1,67 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.IOException;
> +import java.io.PrintWriter;
> +import java.nio.charset.Charset;
> +
> +public class CpioNewAsciiHeader extends AbstractCpioHeader {
> +
> +    private final int devMajor = 0; // Major device number
> +    private final int devMinor = 0; // Minor device number
> +    private final int rdevMajor = 0; // Major device number (if special file)
> +    private final int rdevMinor = 0; // Minor device number (if special file)
> +    private final int check = 0; // Checksum (empty file has zero checksum)
> +
> +    public CpioNewAsciiHeader(Charset charset, String fileName, int 
> fileSize) {
> +        super(charset, fileName, fileSize);
> +    }
> +
> +    public CpioNewAsciiHeader(Charset charset, String fileName, long 
> fileNameSize, int fileSize) {
> +        super(charset, fileName, fileNameSize, fileSize);
> +    }
> +
> +    public void writeTo(PrintWriter output, boolean includeCrc) throws 
> IOException {
> +        output.append(includeCrc ? "070702" : "070701");
> +        output.printf(
> +                "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
> +                (int) ino & 0xffff,
> +                (int) mode & 0xffff,
> +                (int) uid & 0xffff,
> +                (int) gid & 0xffff,
> +                (int) nlink & 0xffff,
> +                mtime,
> +                fileSize,
> +                devMajor,
> +                devMinor,
> +                rdevMajor,
> +                rdevMinor,
> +                getNameSize(),
> +                check);
> +
> +        // Name + NUL
> +        writeNameWithNull(output);
> +
> +        // Pad so the next thing (data) starts on a 4-byte boundary.
> +        // newc header is 110 bytes long.
> +        final long afterHeaderAndName = 110 + getNameSize();
> +        pad(output, pad4(afterHeaderAndName));
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioOldAsciiHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioOldAsciiHeader.java
> new file mode 100644
> index 000000000..468f82c16
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/CpioOldAsciiHeader.java
> @@ -0,0 +1,56 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.io.IOException;
> +import java.io.PrintWriter;
> +import java.nio.charset.Charset;
> +
> +public class CpioOldAsciiHeader extends AbstractCpioHeader {
> +
> +    public CpioOldAsciiHeader(Charset charset, String fileName, int 
> fileSize) {
> +        super(charset, fileName, fileSize);
> +    }
> +
> +    public CpioOldAsciiHeader(Charset charset, String fileName, int 
> fileNameSize, int fileSize) {
> +        super(charset, fileName, fileNameSize, fileSize);
> +    }
> +
> +    public void writeTo(PrintWriter output) throws IOException {
> +        output.printf(
> +                "070707%06o%06o%06o%06o%06o%06o%06o%011o%06o%011o",
> +                (int) dev & 0xffff,
> +                (int) ino & 0xffff,
> +                (int) mode & 0xffff,
> +                (int) uid & 0xffff,
> +                (int) gid & 0xffff,
> +                (int) nlink & 0xffff,
> +                (int) rdev & 0xffff,
> +                mtime,
> +                getNameSize(), // **6** octal digits
> +                fileSize // **11** octal digits
> +                );
> +
> +        // Name + NUL, then even-byte padding
> +        writeNameWithNull(output);
> +        // header length for odc is 76 bytes before the name
> +        final long afterHeaderAndName = 76 + getNameSize();
> +        pad(output, pad2(afterHeaderAndName));
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpDirectoryEntry.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpDirectoryEntry.java
> new file mode 100644
> index 000000000..6454abd76
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpDirectoryEntry.java
> @@ -0,0 +1,90 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.StandardCharsets;
> +import java.util.Objects;
> +
> +/**
> + * One on-tape directory record in the classic BSD "odirect" form:
> + *
> + *   u32 d_ino;
> + *   u16 d_reclen;  // total record length, incl. header, name+NUL, and 
> padding
> + *   u16 d_namlen;  // name length (without the trailing NUL)
> + *   char d_name[d_namlen + 1]; // NUL-terminated
> + *   // pad with NULs to the next 4-byte boundary
> + *
> + * All integer fields are written big-endian for portability in test 
> fixtures.
> + */
> +public final class DumpDirectoryEntry {
> +
> +    private static final int HEADER_SIZE = 4 /*ino*/ + 2 /*reclen*/ + 2 
> /*entry type and name length*/;
> +
> +    private final int inode;          // d_ino (unsigned 32)
> +    private final byte entryType = 0; // entry type
> +    private final byte nameLength;
> +    private final String name;        // ASCII preferred for portability
> +
> +    public DumpDirectoryEntry(final int inode, final String name) {
> +        if (name.length() > 255) {
> +            throw new IllegalArgumentException("Name too long: " + 
> name.length());
> +        }
> +        this.inode = inode;
> +        this.nameLength = (byte) name.length();
> +        this.name = Objects.requireNonNull(name, "name");
> +    }
> +
> +    /** Length of this record when written, including header, NUL, and 
> 4-byte padding. */
> +    public int recordLength() {
> +        final int namelen = nameBytes().length;
> +        final int withNul = namelen + 1;
> +        final int padded = (withNul + 3) & ~3; // round up to multiple of 4
> +        return HEADER_SIZE + padded;
> +    }
> +
> +    /** Writes this odirect record at the buffer's current position. */
> +    public void writeTo(final ByteBuffer out) {
> +        final byte[] nameBytes = nameBytes();
> +        final int length = recordLength();
> +
> +        if (out.remaining() < length) {
> +            throw new IllegalArgumentException("Not enough space: need " + 
> length + " bytes");
> +        }
> +
> +        out.order(ByteOrder.LITTLE_ENDIAN);
> +        out.putInt(inode);                    // d_ino
> +        out.putShort((short) length);         // record length
> +        out.put(entryType);
> +        out.put(nameLength);                  // name length
> +
> +        out.put(nameBytes);
> +        out.put((byte) 0);                    // terminating NUL
> +
> +        // pad to 4-byte boundary
> +        int pad = (4 - ((nameBytes.length + 1) & 3)) & 3;
> +        for (int i = 0; i < pad; i++) out.put((byte) 0);
> +    }
> +
> +    private byte[] nameBytes() {
> +        // For test fixtures, ASCII keeps byte-for-char mapping simple.
> +        return name.getBytes(StandardCharsets.US_ASCII);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpLocalHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpLocalHeader.java
> new file mode 100644
> index 000000000..2fe5a772c
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpLocalHeader.java
> @@ -0,0 +1,88 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import org.apache.commons.compress.archivers.dump.DumpArchiveConstants;
> +import 
> org.apache.commons.compress.archivers.dump.DumpArchiveConstants.SEGMENT_TYPE;
> +import org.apache.commons.compress.archivers.dump.DumpArchiveEntry;
> +import org.apache.commons.compress.archivers.dump.DumpArchiveEntry.TYPE;
> +
> +public class DumpLocalHeader {
> +    static final int RECORD_SIZE = 1024;
> +
> +    private final SEGMENT_TYPE segmentType;
> +    private final int dumpDate = 0; // date of this dump (seconds since 
> epoch)
> +    private final int previousDumpDate = 0; // date of previous dump 
> (seconds since epoch)
> +    private final int volume; // volume number
> +    private final int tapeAddress = 0; // tape address of this record
> +    private final int inode; // inode number
> +    private final TYPE entryType; // type of file
> +
> +    @SuppressWarnings("OctalInteger")
> +    private final short mode = (short) 00644; // mode
> +
> +    private final short nlink = 1; // number of links to file
> +    private final long fileSize; // size of file in bytes
> +    private final int count; // number of blocks (1024-byte) in this record
> +
> +    public DumpLocalHeader(SEGMENT_TYPE segmentType, TYPE entryType, int 
> volume, int inode, long fileSize) {
> +        this.segmentType = segmentType;
> +        this.entryType = entryType;
> +        this.volume = volume;
> +        this.inode = inode;
> +        this.fileSize = fileSize;
> +        this.count = (int) ((fileSize + 1023) / 1024); // number of 
> 1024-byte blocks
> +    }
> +
> +    public void writeTo(final ByteBuffer out) {
> +        final int need = RECORD_SIZE;
> +        if (out.remaining() < need) {
> +            throw new IllegalArgumentException("Not enough space: need " + 
> need + " bytes");
> +        }
> +        final int offset = out.position();
> +        out.order(ByteOrder.LITTLE_ENDIAN);
> +        out.putInt(segmentType.code); // segment type
> +        out.putInt(dumpDate); // date of this dump
> +        out.putInt(previousDumpDate); // date of previous dump
> +        out.putInt(volume); // volume number
> +        out.putInt(tapeAddress); // tape address of this record
> +        out.putInt(inode); // inode number
> +        out.putInt(DumpArchiveConstants.NFS_MAGIC); // magic
> +        out.putInt(0); // clear checksum for now
> +        out.putShort((short) (entryType.code << 12 | mode & 0xfff)); // mode 
> and type of file
> +        out.putShort(nlink); // number of links to file
> +        out.putInt(0); // inumber
> +        out.putLong(fileSize); // size of file in bytes
> +        // Fill the rest of the record with zeros
> +        for (int i = 48; i < RECORD_SIZE; i++) {
> +            out.put((byte) 0);
> +        }
> +        out.putInt(offset + 160, count); // number of blocks (512-byte) in 
> this record
> +        // Compute and write checksum
> +        ByteBuffer checksumBuffer = out.duplicate();
> +        
> checksumBuffer.order(ByteOrder.LITTLE_ENDIAN).position(offset).limit(offset + 
> RECORD_SIZE);
> +        int checksum = 0;
> +        for (int i = 0; i < RECORD_SIZE / 4; i++) {
> +            checksum += checksumBuffer.getInt();
> +        }
> +        out.putInt(offset + 28, DumpArchiveConstants.CHECKSUM - checksum);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpSummaryHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpSummaryHeader.java
> new file mode 100644
> index 000000000..047c6919f
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/DumpSummaryHeader.java
> @@ -0,0 +1,127 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import static 
> org.apache.commons.compress.archivers.fuzzing.DumpLocalHeader.RECORD_SIZE;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.StandardCharsets;
> +import java.util.Objects;
> +import org.apache.commons.compress.archivers.dump.DumpArchiveConstants;
> +
> +/**
> + * Minimal "summary/label" block to make a synthetic dump look plausible.
> + * Fixed-size, NUL-padded ASCII fields; big-endian integers for portability.
> + */
> +public final class DumpSummaryHeader {
> +
> +    private final int dumpDate; // seconds since epoch
> +    private final int previousDumpDate; // seconds since epoch
> +    private final int volume; // tape/volume number
> +    private final String label; // 16 bytes, NUL-padded
> +    private final int level; // dump level
> +    private final String filesys; // 32 bytes, NUL-padded
> +    private final String devname; // 32 bytes, NUL-padded
> +    private final String hostname; // 64 bytes, NUL-padded
> +    private final int flags; // misc flags
> +    private final int firstRecord; // first record index
> +    private final int recordCount; // total records
> +
> +    public DumpSummaryHeader(int recordCount) {
> +        this(0, 0, 0, "label", 0, "filesys", "devname", "hostname", 0, 0, 
> recordCount);
> +    }
> +
> +    public DumpSummaryHeader(
> +            int dumpDate,
> +            int previousDumpDate,
> +            int volume,
> +            String label,
> +            int level,
> +            String filesys,
> +            String devname,
> +            String hostname,
> +            int flags,
> +            int firstRecord,
> +            int recordCount) {
> +        this.dumpDate = dumpDate;
> +        this.previousDumpDate = previousDumpDate;
> +        this.volume = volume;
> +        this.label = Objects.requireNonNull(label, "label");
> +        this.level = level;
> +        this.filesys = Objects.requireNonNull(filesys, "filesys");
> +        this.devname = Objects.requireNonNull(devname, "devname");
> +        this.hostname = Objects.requireNonNull(hostname, "hostname");
> +        this.flags = flags;
> +        this.firstRecord = firstRecord;
> +        this.recordCount = recordCount;
> +    }
> +
> +    /** Size of this header when written. */
> +    public int length() {
> +        return 8 + 8 + 4 + 16 + 4 + 32 + 32 + 64 + 4 + 4 + 4;
> +    }
> +
> +    public void writeTo(final ByteBuffer out) {
> +        final int need = length();
> +        if (out.remaining() < need) {
> +            throw new IllegalArgumentException("Not enough space: need " + 
> need + " bytes");
> +        }
> +        int start = out.position();
> +        out.order(ByteOrder.LITTLE_ENDIAN);
> +        out.putInt(0); // unused
> +        out.putInt(dumpDate);
> +        out.putInt(previousDumpDate);
> +        out.putInt(volume);
> +        // Zero up to offset 676
> +        while (out.position() < start + 676) {
> +            out.put((byte) 0);
> +        }
> +        // Magic
> +        out.putInt(start + 24, DumpArchiveConstants.NFS_MAGIC);
> +        // Now the fixed-size fields
> +        putFixed(out, label, 16);
> +        out.putInt(level);
> +        putFixed(out, filesys, DumpArchiveConstants.NAMELEN);
> +        putFixed(out, devname, DumpArchiveConstants.NAMELEN);
> +        putFixed(out, hostname, DumpArchiveConstants.NAMELEN);
> +        out.putInt(flags);
> +        out.putInt(firstRecord);
> +        out.putInt(recordCount);
> +        // Zero up to 1024 bytes
> +        while (out.position() < start + 1024) {
> +            out.put((byte) 0);
> +        }
> +        // Compute and write checksum
> +        ByteBuffer checksumBuffer = out.duplicate();
> +        
> checksumBuffer.order(ByteOrder.LITTLE_ENDIAN).position(start).limit(start + 
> RECORD_SIZE);
> +        int checksum = 0;
> +        for (int i = 0; i < RECORD_SIZE / 4; i++) {
> +            checksum += checksumBuffer.getInt();
> +        }
> +        out.putInt(start + 28, DumpArchiveConstants.CHECKSUM - checksum);
> +    }
> +
> +    private static void putFixed(final ByteBuffer out, final String s, final 
> int len) {
> +        final byte[] b = s.getBytes(StandardCharsets.US_ASCII);
> +        final int n = Math.min(b.length, len);
> +        out.put(b, 0, n);
> +        for (int i = n; i < len; i++) out.put((byte) 0);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/GnuTarHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/GnuTarHeader.java
> new file mode 100644
> index 000000000..5db3e25d8
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/GnuTarHeader.java
> @@ -0,0 +1,37 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +
> +public class GnuTarHeader extends AbstractTarHeader {
> +    public GnuTarHeader(String fileName, long fileSize, long 
> lastModifiedTime, byte linkIndicator, String linkName) {
> +        super(fileName, fileSize, lastModifiedTime, linkIndicator, linkName, 
> GNU_MAGIC_AND_VERSION);
> +    }
> +
> +    @Override
> +    public void writeTo(ByteBuffer buffer) {
> +        final int startPosition = buffer.position();
> +        super.writeTo(buffer);
> +        while (buffer.position() - startPosition < 512) {
> +            buffer.put((byte) 0); // Pad to 512 bytes
> +        }
> +        addChecksum(buffer, startPosition);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/PosixTarHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/PosixTarHeader.java
> new file mode 100644
> index 000000000..07d28d121
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/PosixTarHeader.java
> @@ -0,0 +1,38 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +
> +public class PosixTarHeader extends AbstractTarHeader {
> +    public PosixTarHeader(String fileName, long fileSize, long 
> lastModifiedTime, byte linkIndicator, String linkName) {
> +        super(fileName, fileSize, lastModifiedTime, linkIndicator, linkName, 
> USTAR_MAGIC_AND_VERSION);
> +    }
> +
> +    @Override
> +    public void writeTo(ByteBuffer buffer) {
> +        final int startPosition = buffer.position();
> +        super.writeTo(buffer);
> +        while (buffer.position() - startPosition < 512) {
> +            buffer.put((byte) 0); // Pad to 512 bytes
> +        }
> +        addChecksum(buffer, startPosition);
> +    }
> +
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipCentralDirectoryHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipCentralDirectoryHeader.java
> new file mode 100644
> index 000000000..ec6d9c572
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipCentralDirectoryHeader.java
> @@ -0,0 +1,149 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.Charset;
> +import java.util.zip.CRC32;
> +
> +/**
> + * Writes a single Central Directory File Header (CD) for a simple file.
> + * This is NOT the local file header. Structure (per PKWARE APPNOTE):
> + *
> + *  Offset Size  Field
> + *  ------ ----  --------------------------------------------
> + *  0      4     Signature (0x02014b50)
> + *  4      2     Version made by
> + *  6      2     Version needed to extract
> + *  8      2     General purpose bit flag
> + *  10     2     Compression method
> + *  12     2     Last mod file time (DOS)
> + *  14     2     Last mod file date (DOS)
> + *  16     4     CRC-32
> + *  20     4     Compressed size
> + *  24     4     Uncompressed size
> + *  28     2     File name length (n)
> + *  30     2     Extra field length (m)
> + *  32     2     File comment length (k)
> + *  34     2     Disk number start
> + *  36     2     Internal file attributes
> + *  38     4     External file attributes
> + *  42     4     Relative offset of local header
> + *  46     n     File name
> + *  46+n   m     Extra field
> + *  46+n+m k     File comment
> + */
> +public class ZipCentralDirectoryHeader {
> +    private static final byte[] MAGIC = {0x50, 0x4b, 0x01, 0x02}; // 
> 0x02014b50 little-endian in stream
> +    private static final int FIXED_SIZE = 46;
> +
> +    // For a minimal entry:
> +    private final short versionMadeBy; // (host OS << 8) | version; use Unix 
> (3) + 2.0 (20)
> +    private final short minVersionToExtract = 20; // 2.0
> +    private final short generalPurposeBitFlag = 0; // no flags
> +    private final short compressionMethod = 0; // store (no compression)
> +    private final short lastModFileTime = 0; // optional: 0
> +    private final short lastModFileDate = 0; // optional: 0
> +    private final int crc32; // CRC32 of uncompressed data (0 for empty)
> +    private final int compressedSize; // 0 for empty+stored
> +    private final int uncompressedSize; // 0 for empty
> +    private final byte[] fileNameBytes; // file name (no directory separator 
> normalization here)
> +    private final short extraFieldLength = 0; // none
> +    private final byte[] fileCommentBytes; // optional comment (empty here)
> +    private final short diskNumberStart = 0; // single-disk archives
> +    private final short internalFileAttributes = 0; // 0
> +    private final int externalFileAttributes = 0; // simple default (e.g., 0)
> +    private final int localHeaderOffset; // relative offset of the 
> corresponding local header
> +
> +    /**
> +     * Minimal constructor for an empty, uncompressed file with no comment.
> +     *
> +     * @param charset charset for encoding the file name
> +     * @param fileName file name to store
> +     * @param localHeaderOffset offset (from start of ZIP) of this entry's 
> local file header
> +     */
> +    public ZipCentralDirectoryHeader(Charset charset, String fileName, int 
> localHeaderOffset) {
> +        this(charset, fileName, localHeaderOffset, new byte[0], 0, 0);
> +    }
> +
> +    /**
> +     * General constructor (still simple): lets you pass sizes and an 
> optional comment.
> +     */
> +    public ZipCentralDirectoryHeader(
> +            Charset charset,
> +            String fileName,
> +            int localHeaderOffset,
> +            byte[] fileCommentBytes,
> +            int compressedSize,
> +            int uncompressedSize) {
> +        // Version made by: host OS = Unix (3), version = 2.0 (20).
> +        this.versionMadeBy = (short) ((3 << 8) | 20);
> +        this.fileNameBytes = fileName.getBytes(charset);
> +        this.fileCommentBytes = (fileCommentBytes == null) ? new byte[0] : 
> fileCommentBytes;
> +        this.localHeaderOffset = localHeaderOffset;
> +        this.compressedSize = compressedSize;
> +        this.uncompressedSize = uncompressedSize;
> +
> +        // Compute CRC32 of uncompressed data if you have it; for empty it's 
> 0.
> +        CRC32 crc = new CRC32();
> +        // For empty file we don't update the CRC with any data.
> +        this.crc32 = (int) crc.getValue(); // 0 for empty
> +    }
> +
> +    private int getHeaderLength() {
> +        return FIXED_SIZE + fileNameBytes.length + extraFieldLength + 
> fileCommentBytes.length;
> +    }
> +
> +    public void writeTo(ByteBuffer output) throws java.io.IOException {
> +        if (output.remaining() < getHeaderLength()) {
> +            throw new java.io.IOException(
> +                    "Not enough space in output buffer: need " + 
> getHeaderLength() + ", have " + output.remaining());
> +        }
> +        output.order(ByteOrder.LITTLE_ENDIAN);
> +
> +        // Signature
> +        output.put(MAGIC);
> +
> +        // Fixed fields
> +        output.putShort(versionMadeBy);
> +        output.putShort(minVersionToExtract);
> +        output.putShort(generalPurposeBitFlag);
> +        output.putShort(compressionMethod);
> +        output.putShort(lastModFileTime);
> +        output.putShort(lastModFileDate);
> +        output.putInt(crc32);
> +        output.putInt(compressedSize);
> +        output.putInt(uncompressedSize);
> +        output.putShort((short) fileNameBytes.length);
> +        output.putShort(extraFieldLength);
> +        output.putShort((short) fileCommentBytes.length);
> +        output.putShort(diskNumberStart);
> +        output.putShort(internalFileAttributes);
> +        output.putInt(externalFileAttributes);
> +        output.putInt(localHeaderOffset);
> +
> +        // Variable fields
> +        output.put(fileNameBytes);
> +        // no extra field
> +        if (fileCommentBytes.length > 0) {
> +            output.put(fileCommentBytes);
> +        }
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipEndOfCentralDirectory.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipEndOfCentralDirectory.java
> new file mode 100644
> index 000000000..dd4e6a331
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipEndOfCentralDirectory.java
> @@ -0,0 +1,148 @@
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteBuffer;
> +import java.nio.ByteOrder;
> +import java.nio.charset.Charset;
> +
> +/**
> + * Writes a standard (non-ZIP64) End of Central Directory (EOCD) record.
> + *
> + * Structure (per PKWARE APPNOTE):
> + *
> + *  Offset Size  Field
> + *  ------ ----  --------------------------------------------
> + *  0      4     End of central dir signature (0x06054b50)
> + *  4      2     Number of this disk
> + *  6      2     Disk where central directory starts
> + *  8      2     Number of central directory records on this disk
> + * 10      2     Total number of central directory records
> + * 12      4     Size of central directory (bytes)
> + * 16      4     Offset of start of central directory (w.r.t. start of file)
> + * 20      2     ZIP file comment length (n)
> + * 22      n     ZIP file comment (optional)
> + *
> + * This class intentionally does not emit ZIP64 EOCD structures.
> + */
> +public final class ZipEndOfCentralDirectory {
> +    private static final byte[] MAGIC = { 0x50, 0x4b, 0x05, 0x06 }; // 
> 0x06054b50 in stream
> +    private static final int FIXED_SIZE = 22;
> +
> +    // Fields (classic ZIP, single-disk by default)
> +    private final int numberOfThisDisk;                   // u16
> +    private final int diskWhereCentralDirectoryStarts;    // u16
> +    private final int entriesOnThisDisk;                  // u16
> +    private final int totalEntries;                       // u16
> +    private final long centralDirectorySize;              // u32
> +    private final long centralDirectoryOffset;            // u32
> +    private final byte[] comment;                         // length fits u16
> +
> +    /**
> +     * Minimal single-disk EOCD with no comment.
> +     *
> +     * @param totalEntries total number of CD file headers
> +     * @param centralDirectorySize size in bytes of the central directory
> +     * @param centralDirectoryOffset file offset (from BOF) where the 
> central directory starts
> +     */
> +    public ZipEndOfCentralDirectory(int totalEntries,
> +                                    long centralDirectorySize,
> +                                    long centralDirectoryOffset) {
> +        this(0, 0, totalEntries, totalEntries, centralDirectorySize, 
> centralDirectoryOffset, new byte[0]);
> +    }
> +
> +    /**
> +     * General constructor (still non-ZIP64).
> +     *
> +     * @param numberOfThisDisk typically 0
> +     * @param diskWhereCentralDirectoryStarts typically 0
> +     * @param entriesOnThisDisk number of CD records on this disk (<= 65535)
> +     * @param totalEntries total number of CD records in archive (<= 65535)
> +     * @param centralDirectorySize size of CD in bytes (<= 0xFFFFFFFF)
> +     * @param centralDirectoryOffset offset of CD start (<= 0xFFFFFFFF)
> +     * @param comment archive comment bytes (length <= 65535)
> +     */
> +    public ZipEndOfCentralDirectory(int numberOfThisDisk,
> +                                    int diskWhereCentralDirectoryStarts,
> +                                    int entriesOnThisDisk,
> +                                    int totalEntries,
> +                                    long centralDirectorySize,
> +                                    long centralDirectoryOffset,
> +                                    byte[] comment) {
> +        this.numberOfThisDisk = numberOfThisDisk;
> +        this.diskWhereCentralDirectoryStarts = 
> diskWhereCentralDirectoryStarts;
> +        this.entriesOnThisDisk = entriesOnThisDisk;
> +        this.totalEntries = totalEntries;
> +        this.centralDirectorySize = centralDirectorySize;
> +        this.centralDirectoryOffset = centralDirectoryOffset;
> +        this.comment = (comment == null) ? new byte[0] : comment;
> +        validateClassicLimits();
> +    }
> +
> +    /**
> +     * Convenience factory for string comments.
> +     */
> +    public static ZipEndOfCentralDirectory withComment(Charset charset,
> +                                                       int totalEntries,
> +                                                       long 
> centralDirectorySize,
> +                                                       long 
> centralDirectoryOffset,
> +                                                       String comment) {
> +        byte[] bytes = (comment == null) ? new byte[0] : 
> comment.getBytes(charset);
> +        return new ZipEndOfCentralDirectory(0, 0, totalEntries, totalEntries,
> +                centralDirectorySize, centralDirectoryOffset, bytes);
> +    }
> +
> +    private void validateClassicLimits() throws IllegalArgumentException {
> +        // u16 fields
> +        if (!fitsU16(numberOfThisDisk)
> +                || !fitsU16(diskWhereCentralDirectoryStarts)
> +                || !fitsU16(entriesOnThisDisk)
> +                || !fitsU16(totalEntries)
> +                || !fitsU16(comment.length)) {
> +            throw new IllegalArgumentException("EOCD u16 field out of range 
> (ZIP64 needed?)");
> +        }
> +        // u32 fields
> +        if (!fitsU32(centralDirectorySize) || 
> !fitsU32(centralDirectoryOffset)) {
> +            throw new IllegalArgumentException("Central directory 
> size/offset out of range (ZIP64 needed?)");
> +        }
> +    }
> +
> +    private static boolean fitsU16(int v) {
> +        return (v & 0xFFFF_0000) == 0 && v >= 0;
> +    }
> +
> +    private static boolean fitsU16(int v, int extra) {
> +        int sum = v + extra;
> +        return sum >= 0 && sum <= 0xFFFF;
> +    }
> +
> +    private static boolean fitsU32(long v) {
> +        return (v & 0xFFFF_FFFF_0000_0000L) == 0 && v >= 0;
> +    }
> +
> +    public int getRecordLength() {
> +        return FIXED_SIZE + comment.length;
> +    }
> +
> +    /**
> +     * Writes the EOCD record into the buffer at its current position.
> +     */
> +    public void writeTo(ByteBuffer out) throws java.io.IOException {
> +        int need = getRecordLength();
> +        if (out.remaining() < need) {
> +            throw new java.io.IOException("Not enough space in output 
> buffer: need " + need
> +                    + ", have " + out.remaining());
> +        }
> +        out.order(ByteOrder.LITTLE_ENDIAN);
> +
> +        out.put(MAGIC);
> +        out.putShort((short) numberOfThisDisk);
> +        out.putShort((short) diskWhereCentralDirectoryStarts);
> +        out.putShort((short) entriesOnThisDisk);
> +        out.putShort((short) totalEntries);
> +        out.putInt((int) centralDirectorySize);
> +        out.putInt((int) centralDirectoryOffset);
> +        out.putShort((short) comment.length);
> +        if (comment.length > 0) {
> +            out.put(comment);
> +        }
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipLocalHeader.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipLocalHeader.java
> new file mode 100644
> index 000000000..e74897739
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/ZipLocalHeader.java
> @@ -0,0 +1,66 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
> +
> +import java.nio.ByteOrder;
> +import java.nio.charset.Charset;
> +import java.util.zip.CRC32;
> +
> +public class ZipLocalHeader {
> +    private static final byte[] MAGIC = {0x50, 0x4b, 0x03, 0x04};
> +    private final short minVersionToExtract = 20; // Version needed to 
> extract
> +    private final short generalPurposeBitFlag = 0; // No special flags
> +    private final short compressionMethod = 0; // 0 = no compression
> +    private final short lastModFileTime = 0; // File modification time
> +    private final short lastModFileDate = 0; // File modification date
> +    private final int crc32 = (int) new CRC32().getValue(); // CRC32 checksum
> +    private final int compressedSize; // Compressed size
> +    private final int uncompressedSize; // Uncompressed size
> +    private final byte[] fileName;
> +    private final short extraFieldLength = 0; // No extra field
> +
> +    public ZipLocalHeader(Charset charset, String fileName, int 
> compressedSize, int uncompressedSize) {
> +        this.compressedSize = compressedSize;
> +        this.uncompressedSize = uncompressedSize;
> +        this.fileName = fileName.getBytes(charset);
> +    }
> +
> +    private int getHeaderLength() {
> +        return 30 + fileName.length + extraFieldLength;
> +    }
> +
> +    public void writeTo(java.nio.ByteBuffer output) throws 
> java.io.IOException {
> +        if (output.remaining() < getHeaderLength()) {
> +            throw new java.io.IOException("Not enough space in output 
> buffer");
> +        }
> +        output.order(ByteOrder.LITTLE_ENDIAN);
> +        output.put(MAGIC);
> +        output.putShort(minVersionToExtract);
> +        output.putShort(generalPurposeBitFlag);
> +        output.putShort(compressionMethod);
> +        output.putShort(lastModFileTime);
> +        output.putShort(lastModFileDate);
> +        output.putInt(crc32);
> +        output.putInt(compressedSize);
> +        output.putInt(uncompressedSize);
> +        output.putShort((short) fileName.length);
> +        output.putShort(extraFieldLength);
> +        output.put(fileName);
> +    }
> +}
> diff --git 
> a/src/test/java/org/apache/commons/compress/archivers/fuzzing/package-info.java
>  
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/package-info.java
> new file mode 100644
> index 000000000..bc107e6ad
> --- /dev/null
> +++ 
> b/src/test/java/org/apache/commons/compress/archivers/fuzzing/package-info.java
> @@ -0,0 +1,19 @@
> +/*
> + * Licensed to the Apache Software Foundation (ASF) under one
> + * or more contributor license agreements.  See the NOTICE file
> + * distributed with this work for additional information
> + * regarding copyright ownership.  The ASF licenses this file
> + * to you under the Apache License, Version 2.0 (the
> + * "License"); you may not use this file except in compliance
> + * with the License.  You may obtain a copy of the License at
> + *
> + *   https://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing,
> + * software distributed under the License is distributed on an
> + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
> + * KIND, either express or implied.  See the License for the
> + * specific language governing permissions and limitations
> + * under the License.
> + */
> +package org.apache.commons.compress.archivers.fuzzing;
>

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to