> > here's a snippet. if it attack you put your hand in front of it and
> > say "down!". it's just the intro it's not even the area i'm working on
>
>         # ==> i think work ended up in the read_many function despite
> the read_many_tensors function existing
>         # ==> basically, one outputs partial tensors unless there is a
> region of all-cached or all-uncached
>         # ==> the special cases are for runs of 0s on the left and
> right and can be ignored to start

updated this line to:
        # ==> the special cases are for disk exhaustion and runs of 0s
on the left and right, and can be ignored to start

>
>         def read_many(self, offset_lengths, progress, validate_sorted=True):
>             if validate_sorted:
>                 sorted_offset_lengths = list(offset_lengths)
>                 sorted_offset_lengths.sort()
>                 assert sorted_offset_lengths == offset_lengths
>             OP_FETCH, OP_PLACE, OP_OUTPUT = 1, 2, 4
>             offset_length_tail_idx_ops =
> torch.zeros([offset_lengths.shape[0]*2, 5])
>             OFFSET, LENGTH, TAIL, IDX, OP =
> range(offset_length_tail_ops.shape[-1])
>             op_ct = 0
>             tails = (offset_lengths[:,0] +
> offset_lengths[:,1]).clamp(max=len(self.mmap))
>                                                       aligned_offsets
> = offset_lengths[:,0] // self.blksize; aligned_offsets *= self.blksize
>             aligned_tails = (tails - 1); aligned_tails //=
> self.blksize; aligned_tails += 1; aligned_tails *= self.blksize;
> torch.clamp(aligned_tails, max=self.size(), out=aligned_tails)
>             cls = type(self.fetchers)
>             avail_disk_space = (psutil.disk_usage(self.fn).free +
> cls.sparse_usage) * self.fetchers.usage_frac - cls.sparse_usage
>             min_hole = 0
>             pbar = range(len(offset_lengths))
>             if progress:
>                 pbar = tqdm.tqdm(pbar, total=len(offset_lengths),
> desc=progress, leave=False, unit='rd')
>             idx = 0
>             while idx < len(offset_lengths):

Reply via email to