On Wed, 14 Aug 2024 19:11:58 GMT, Gerard Ziemski <gziem...@openjdk.org> wrote:

>> - `VMATree` is used instead of `SortedLinkList` in new class 
>> `VirtualMemoryTrackerWithTree`.
>>  -  A wrapper/helper `RegionTree` is made around VMATree to make some calls 
>> easier.
>>  -  Both old and new versions exist in the code and can be selected via 
>> `MemTracker::set_version()`
>>  - `find_reserved_region()` is used in 4 cases, it will be removed in 
>> further PRs.
>>  - All tier1 tests pass except one ~that expects a 50% increase in committed 
>> memory but it does not happen~  https://bugs.openjdk.org/browse/JDK-8335167.
>>  - Adding a runtime flag for selecting the old or new version can be added 
>> later.
>>  - Some performance tests are added for new version, VMATree and Treap, to 
>> show the idea and should be improved later. Based on the results of 
>> comparing speed of VMATree and VMT, VMATree shows ~40x faster response time.
>
> src/hotspot/share/nmt/memReporter.cpp line 467:
> 
>> 465: 
>> 466:       if (reserved_and_committed)
>> 467:         return;
> 
> This looks better, but now that I got more comfortable with anonymous local 
> functions using lambda, I think we can do better. We can write a single 
> `print_rgn` lambda function that we can use in both "reserved" and 
> "committed" cases:
> 
> 
>   outputStream* out = output();
>   const char* scale = current_scale();
>   auto print_rgn = [&](const VirtualMemoryRegion* rgn, const NativeCallStack* 
> stack,
>                        MEMFLAGS mem_type = MEMFLAGS::mtNone,
>                        const char* region_type = "committed",
>                        bool committed = true) {
>     // Don't report if size is too small
>     if (amount_in_current_scale(rgn->size()) == 0) return;
>     out->cr();
>     INDENT_BY(committed?8:0,
>       print_virtual_memory_region(region_type, rgn->base(), rgn->size());
>       if (stack->is_empty()) {
>         out->cr();
>       } else {
>         if (!committed) {
>           out->print(" for %s", NMTUtil::flag_to_name(mem_type));
>         }
>         out->print_cr(" from ");
>         INDENT_BY(4, _stackprinter.print_stack(stack);)
>       }
>     )
>   };

The whole method here then becomes:


void MemDetailReporter::report_virtual_memory_region(const 
ReservedMemoryRegion* reserved_rgn) {
  assert(reserved_rgn != nullptr, "null pointer");

  // We don't bother about reporting peaks here.
  // That is because peaks - in the context of virtual memory, peak of 
committed areas - make little sense
  // when we report *by region*, which are identified by their location in 
memory. There is a philosophical
  // question about identity here: e.g. a committed region that has been split 
into three regions by
  // uncommitting a middle section of it, should that still count as "having 
peaked" before the split? If
  // yes, which of the three new regions would be the spiritual successor? 
Rather than introducing more
  // complexity, we avoid printing peaks altogether. Note that peaks should 
still be printed when reporting
  // usage *by callsite*.

  if (reserved_rgn->flag() == mtTest) {
    log_debug(nmt)("report vmem, rgn base: " INTPTR_FORMAT " size: " 
SIZE_FORMAT "cur-scale: " SIZE_FORMAT,
      p2i(reserved_rgn->base()), 
reserved_rgn->size(),amount_in_current_scale(reserved_rgn->size()));
  }

  outputStream* out = output();
  const char* scale = current_scale();
  auto print_rgn = [&](const VirtualMemoryRegion* rgn, const NativeCallStack* 
stack,
                       MEMFLAGS mem_type = MEMFLAGS::mtNone,
                       const char* region_type = "committed",
                       bool committed = true) {
    // Don't report if size is too small
    if (amount_in_current_scale(rgn->size()) == 0) return;
    out->cr();
    INDENT_BY(committed?8:0,
      print_virtual_memory_region(region_type, rgn->base(), rgn->size());
      if (stack->is_empty()) {
        out->cr();
      } else {
        if (!committed) {
          out->print(" for %s", NMTUtil::flag_to_name(mem_type));
        }
        out->print_cr(" from ");
        INDENT_BY(4, _stackprinter.print_stack(stack);)
      }
    )
  };

  bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size();
  const char* region_type = (all_committed ? "reserved and committed" : 
"reserved");
  print_rgn(reserved_rgn, reserved_rgn->call_stack(), reserved_rgn->flag(), 
region_type, false);

  if (all_committed) {
    if (MemTracker::is_using_sorted_link_list()) {
      CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
      const CommittedMemoryRegion* committed_rgn = itr.next();
      if (committed_rgn->size() == reserved_rgn->size() &&
          committed_rgn->call_stack()->equals(*reserved_rgn->call_stack())) {
        // One region spanning the entire reserved region, with the same stack 
trace.
        // Don't print this regions because the "reserved and committed" line 
above
        // already indicates that the region is committed.
        assert(itr.next() == nullptr, "Unexpectedly more than one regions");
        return;
      }
    }
    if (MemTracker::is_using_tree()) {
      bool reserved_and_committed = false;
      
VirtualMemoryTrackerWithTree::Instance::tree()->visit_committed_regions(*reserved_rgn,
                                                                    
[&](CommittedMemoryRegion& committed_rgn) {
        if (committed_rgn.size() == reserved_rgn->size() &&
            committed_rgn.call_stack()->equals(*reserved_rgn->call_stack())) {
          // One region spanning the entire reserved region, with the same 
stack trace.
          // Don't print this regions because the "reserved and committed" line 
above
          // already indicates that the region is committed.
          reserved_and_committed = true;
          return false;
        }
        return true;
      });

      if (reserved_and_committed)
        return;
    }
  }

  if (MemTracker::is_using_sorted_link_list()) {
    CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
    CommittedMemoryRegion* crgn;
    while ((crgn = itr.next()) != nullptr) {
      print_rgn(crgn, crgn->call_stack());
    }
  }
  if (MemTracker::is_using_tree()) {
    
VirtualMemoryTrackerWithTree::Instance::tree()->visit_committed_regions(*reserved_rgn,
 [&](CommittedMemoryRegion& crgn) {
      print_rgn(&crgn, crgn.call_stack());
      return true;
    });
  }
}

-------------

PR Review Comment: https://git.openjdk.org/jdk/pull/20425#discussion_r1717435317

Reply via email to