Hi,

Quoth Roberto E. Vargas Caballero <k...@shike2.net>:
> I propose these changes to your patch (not tested yet):

In fact, my changes were wrong too.  At the end I considered that was
worth to use memcpy and avoid the temporary buffer and deal manually
with the multiple slashes at the end of the dirname.  This is the
current proposal:

---- >8 ----

>From 746532584c1b306deebd0b41edd426f7289ea8a2 Mon Sep 17 00:00:00 2001
From: Andrea Calligaris <ac89.hk.pub...@gmail.com>
Date: Wed, 26 Feb 2025 11:03:58 +0100
Subject: [PATCH] tar: archive: improve fix for long names crashing

As requested, I resend my old patch for fixing the crashing while
archiving with names longer than 100 characters.

Last patch dealing with the issue was [1], and the old patch was [2]. The
code before this commit was not dealing correctly with multiple slashes,
but use of basename(3) and dirname(3) needed a temporary buffer because
otherwise we destroyed the path that was used later in several places.
This solution does not modifies the path and use pointer arithmetic to
solve the problem.

[1] https://lists.suckless.org/hackers/2412/19213.html
[2] https://lists.suckless.org/hackers/2402/19071.html

Co-authored-by: Roberto E. Vargas Caballer <k...@shike2.net>
---
 tar.c | 45 ++++++++++++++++++++++++---------------------
 1 file changed, 24 insertions(+), 21 deletions(-)

diff --git a/tar.c b/tar.c
index e4701dc..44c790d 100644
--- a/tar.c
+++ b/tar.c
@@ -179,14 +179,14 @@ putoctal(char *dst, unsigned num, int size)
 static int
 archive(const char *path)
 {
-       char b[BLKSIZ];
        struct group *gr;
        struct header *h;
        struct passwd *pw;
        struct stat st;
-       size_t chksum, i;
+       size_t chksum, i, nlen, plen;
        ssize_t l, r;
        int fd = -1;
+       char b[BLKSIZ], *base, *p;
 
        if (lstat(path, &st) < 0) {
                weprintf("lstat %s:", path);
@@ -202,27 +202,27 @@ archive(const char *path)
        h = (struct header *)b;
        memset(b, 0, sizeof(b));
 
-       if (strlen(path) > 255) {
-               const char *reason = "path exceeds 255 character limit";
-               eprintf("malformed tar archive: %s\n", reason);
-       } else if (strlen(path) >= 100) {
-               size_t prefix_len = 155;
-               const char *last_slash = strrchr(path, '/');
-
-               if (last_slash && last_slash < path + prefix_len) {
-                   prefix_len = last_slash - path + 1;
-               }
-
-               /* strlcpy is fine here - for path ONLY -,
-                * since we're splitting the path.
-                * It's not an issue if the prefix can't hold
-                * the full path — name will take the rest. */
-               strlcpy(h->prefix, path, prefix_len);
-               estrlcpy(h->name, path + prefix_len, sizeof(h->name));
-       } else {
-               estrlcpy(h->name, path,         sizeof(h->name));
+       plen = 0;
+       base = path;
+       if ((nlen = strlen(base)) >= sizeof(h->name)) {
+               /*
+                * Cover case where path name is too long (in which case we
+                * need to split it to prefix and name).
+                */
+               if ((base = strrchr(path, '/')) == NULL)
+                       goto too_long;
+               for (p = base++; p > path && *p == '/'; --p)
+                       ;
+
+               nlen -= base - path;
+               plen = p - path + 1;
+               if (nlen >= sizeof(h->name) || plen >= sizeof(h->prefix))
+                       goto too_long;
        }
 
+       memcpy(h->name, base, nlen);
+       memcpy(h->prefix, path, plen);
+
        putoctal(h->mode,    (unsigned)st.st_mode & 0777, sizeof(h->mode));
        putoctal(h->uid,     (unsigned)st.st_uid,         sizeof(h->uid));
        putoctal(h->gid,     (unsigned)st.st_gid,         sizeof(h->gid));
@@ -270,6 +270,9 @@ archive(const char *path)
        }
 
        return 0;
+
+too_long:
+       eprintf("filename too long: %s\n", path);
 }
 
 static int
-- 
2.45.3


Please, any review will be welcome.

Regards,


Reply via email to