mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-15 02:08:40 -09:00
binfmt: Remove CBOR-null-based padding hack
Seems like unnecessary complexity.
This commit is contained in:
parent
18f322c532
commit
9418079da3
2 changed files with 6 additions and 9 deletions
|
|
@ -104,7 +104,6 @@ sub cbordata($blknum, $data) {
|
||||||
my($val, $len) = $cbor->decode_prefix(substr $data, $off);
|
my($val, $len) = $cbor->decode_prefix(substr $data, $off);
|
||||||
my $itemref = ($blknum << 24) | $off;
|
my $itemref = ($blknum << 24) | $off;
|
||||||
$off += $len;
|
$off += $len;
|
||||||
next if !defined $val;
|
|
||||||
$nitems++;
|
$nitems++;
|
||||||
|
|
||||||
# Basic validation of the CBOR data. Doesn't validate that every value
|
# Basic validation of the CBOR data. Doesn't validate that every value
|
||||||
|
|
|
||||||
|
|
@ -120,8 +120,6 @@ pub const Thread = struct {
|
||||||
t.off = 0;
|
t.off = 0;
|
||||||
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
||||||
global.index.appendSlice(&[1]u8{0}**8) catch unreachable;
|
global.index.appendSlice(&[1]u8{0}**8) catch unreachable;
|
||||||
// Start the first block with a CBOR 'null', so that itemrefs can never be 0.
|
|
||||||
if (t.block_num == 0) t.cborHead(.simple, 22);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cborHead(t: *Thread, major: CborMajor, arg: u64) void {
|
fn cborHead(t: *Thread, major: CborMajor, arg: u64) void {
|
||||||
|
|
@ -156,18 +154,18 @@ pub const Thread = struct {
|
||||||
t.cborHead(.pos, @intFromEnum(key));
|
t.cborHead(.pos, @intFromEnum(key));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn itemRef(t: *Thread, key: ItemKey, ref: u64) void {
|
fn itemRef(t: *Thread, key: ItemKey, ref: ?u64) void {
|
||||||
if (ref == 0) return;
|
const r = ref orelse return;
|
||||||
t.itemKey(key);
|
t.itemKey(key);
|
||||||
// Full references compress like shit and most of the references point
|
// Full references compress like shit and most of the references point
|
||||||
// into the same block, so optimize that case by using a negative
|
// into the same block, so optimize that case by using a negative
|
||||||
// offset instead.
|
// offset instead.
|
||||||
if ((ref >> 24) == t.block_num) t.cborHead(.neg, t.itemref - ref - 1)
|
if ((r >> 24) == t.block_num) t.cborHead(.neg, t.itemref - r - 1)
|
||||||
else t.cborHead(.pos, ref);
|
else t.cborHead(.pos, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
||||||
fn itemStart(t: *Thread, itype: model.EType, prev_item: u64, name: []const u8) u64 {
|
fn itemStart(t: *Thread, itype: model.EType, prev_item: ?u64, name: []const u8) u64 {
|
||||||
const min_len = name.len + MAX_ITEM_LEN;
|
const min_len = name.len + MAX_ITEM_LEN;
|
||||||
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
||||||
|
|
||||||
|
|
@ -215,7 +213,7 @@ pub const Dir = struct {
|
||||||
// last_item into an atomic integer and other fields could be split up for
|
// last_item into an atomic integer and other fields could be split up for
|
||||||
// subdir use.
|
// subdir use.
|
||||||
lock: std.Thread.Mutex = .{},
|
lock: std.Thread.Mutex = .{},
|
||||||
last_sub: u64 = 0,
|
last_sub: ?u64 = null,
|
||||||
stat: sink.Stat,
|
stat: sink.Stat,
|
||||||
items: u64 = 0,
|
items: u64 = 0,
|
||||||
size: u64 = 0,
|
size: u64 = 0,
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue