On 05/17/2014 01:13 AM, Paul Eggert wrote:
> Pádraig Brady wrote:
> 
>> The attached patch changes the output to:
>>
>>    $ dd if=/dev/zero of=/dev/null bs=256M count=2
>>    2+0 records in
>>    2+0 records out
>>    536870912 bytes (512 MiB) copied, 0.152887 s, 3.3 GiB/s
> 
> I recall considering this when I added this kind of diagnostic to GNU dd back 
> in 2004
> and going with powers-of-1000 abbreviations because secondary storage devices 
> are normally
> measured that way.  For this reason, I expect many users will prefer 
> powers-of-1000 here.

This is a fair point as it's common to transfer MB based images in
MiB sized blocks for example.

Though the 512 MiB above is useful as one can immediately see that the
requested amount was transferred.  Also it imparts more info than
537 MB as that is trivially inferred from the previous number.
Also MiB is not ambiguous wrt base, though I suppose MB isn't
too bad either as MB as per standards and dd input notation is base 1000.

> This is particularly true for transfer rates: it's rare to see "GiB/s" in 
> real-world prose.

Fair point. We'll leave that one as is.

> 
> So it'd be unwise to make this change.
> 
> The simplest thing to do is to leave "dd" alone, which is my mild preference.
> Alternatively, we could make the proposed behavior optional, with the default 
> being the current behavior.

> If we do that, though, the behavior shouldn't be affected by the abbreviation 
> chosen for the block size.
> Even if the block size is given in powers-of-1024 (which is common, because 
> block sizes are about internal
> memory units, where powers-of-1024 are typical), the total number of bytes 
> transferred and the transfer rates
> are more commonly interpreted in the external world, where powers-of-1000 are 
> typical.

It's not worth a new option, but if it was to be conditional perhaps it could be
based on the actual amount transferred rather than the block size.
Or from a mathematical viewpoint, output the number that loses the least info.
Essentially:

  if ((count % 1000) && ! (count % 1024))
    options |= human_base_1024

The attached patch now produces:

  $ dd if=/dev/zero of=/dev/null bs=256M count=2
  2+0 records in
  2+0 records out
  536870912 bytes (512 MiB) copied, 0.200283 s, 2.7 GB/s

  $ truncate -s 256MB disk.img
  $ dd if=disk.img of=/dev/null bs=2M
  122+1 records in
  122+1 records out
  256000000 bytes (256 MB) copied, 0.129617 s, 2.0 GB/s

cheers,
Pádraig.
>From 660cf3ac60674a42618ad3c7f9ae3eaec7ab90e5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A1draig=20Brady?= <p...@draigbrady.com>
Date: Fri, 16 May 2014 10:32:43 +0100
Subject: [PATCH] dd: output base 1024 transfer counts in IEC units

* src/dd.c (human_size): Add base and from/to block size params so we
can reuse this function for all output size conversions.
(print_stats): Use human_size() in all cases, which will change
the output units from SI to IEC when the size is a power of 1024
and not a power of 1000.
Fixes http://bugs.gnu.org/17505
---
 src/dd.c |   31 ++++++++++++++++++-------------
 1 files changed, 18 insertions(+), 13 deletions(-)

diff --git a/src/dd.c b/src/dd.c
index 1e387f3..f3700c9 100644
--- a/src/dd.c
+++ b/src/dd.c
@@ -653,13 +653,23 @@ Options are:\n\
 }
 
 static char *
-human_size (size_t n)
+human_size (size_t n, size_t base, uintmax_t from_bs, uintmax_t to_bs)
 {
   static char hbuf[LONGEST_HUMAN_READABLE + 1];
+
   int human_opts =
-    (human_autoscale | human_round_to_nearest | human_base_1024
+    (human_autoscale | human_round_to_nearest
      | human_space_before_unit | human_SI | human_B);
-  return human_readable (n, hbuf, human_opts, 1, 1);
+
+  if (! base)
+    {
+      if ((n % 1000) && ! (n % 1024))
+        human_opts |= human_base_1024;
+    }
+  else if (base == 1024)
+    human_opts |= human_base_1024;
+
+  return human_readable (n, hbuf, human_opts, from_bs, to_bs);
 }
 
 /* Ensure input buffer IBUF is allocated.  */
@@ -674,7 +684,7 @@ alloc_ibuf (void)
   if (!real_buf)
     error (EXIT_FAILURE, 0,
            _("memory exhausted by input buffer of size %zu bytes (%s)"),
-           input_blocksize, human_size (input_blocksize));
+           input_blocksize, human_size (input_blocksize, 1024, 1, 1));
 
   real_buf += SWAB_ALIGN_OFFSET;	/* allow space for swab */
 
@@ -696,7 +706,7 @@ alloc_obuf (void)
       if (!real_obuf)
         error (EXIT_FAILURE, 0,
                _("memory exhausted by output buffer of size %zu bytes (%s)"),
-               output_blocksize, human_size (output_blocksize));
+               output_blocksize, human_size (output_blocksize, 1024, 1, 1));
       obuf = ptr_align (real_obuf, page_size);
     }
   else
@@ -734,10 +744,6 @@ multiple_bits_set (int i)
 static void
 print_stats (void)
 {
-  char hbuf[LONGEST_HUMAN_READABLE + 1];
-  int human_opts =
-    (human_autoscale | human_round_to_nearest
-     | human_space_before_unit | human_SI | human_B);
   double delta_s;
   char const *bytes_per_second;
 
@@ -766,8 +772,7 @@ print_stats (void)
            ngettext ("%"PRIuMAX" byte (%s) copied",
                      "%"PRIuMAX" bytes (%s) copied",
                      select_plural (w_bytes)),
-           w_bytes,
-           human_readable (w_bytes, hbuf, human_opts, 1, 1));
+           w_bytes, human_size (w_bytes, 0, 1, 1));
 
   xtime_t now = gethrxtime ();
   if (start_time < now)
@@ -776,8 +781,8 @@ print_stats (void)
       uintmax_t delta_xtime = now;
       delta_xtime -= start_time;
       delta_s = delta_xtime / XTIME_PRECISIONe0;
-      bytes_per_second = human_readable (w_bytes, hbuf, human_opts,
-                                         XTIME_PRECISION, delta_xtime);
+      bytes_per_second = human_size (w_bytes, 1000, XTIME_PRECISION,
+                                     delta_xtime);
     }
   else
     {
-- 
1.7.7.6

Reply via email to