Support direct browsing of a binary export

Code is more hacky than I prefer, but this approach does work and isn't
even as involved as I had anticipated.

Still a few known bugs and limitations left to resolve.
This commit is contained in:
Yorhel 2024-08-06 09:46:17 +02:00
parent 8fb2290d5e
commit 30d6ddf149
7 changed files with 235 additions and 85 deletions

View file

@ -188,6 +188,10 @@ const CborVal = struct {
} }
} }
fn isTrue(v: *const CborVal) bool {
return v.major == .simple and v.arg == 21;
}
// Read either a byte or text string. // Read either a byte or text string.
// Doesn't validate UTF-8 strings, doesn't support indefinite-length strings. // Doesn't validate UTF-8 strings, doesn't support indefinite-length strings.
fn bytes(v: *const CborVal) []const u8 { fn bytes(v: *const CborVal) []const u8 {
@ -379,7 +383,7 @@ const Import = struct {
.asize => ctx.stat.size = kv.val.int(u64), .asize => ctx.stat.size = kv.val.int(u64),
.dsize => ctx.stat.blocks = @intCast(kv.val.int(u64)/512), .dsize => ctx.stat.blocks = @intCast(kv.val.int(u64)/512),
.dev => ctx.stat.dev = kv.val.int(u64), .dev => ctx.stat.dev = kv.val.int(u64),
.rderr => ctx.fields.rderr = kv.val.major == .simple and kv.val.arg == 21, .rderr => ctx.fields.rderr = kv.val.isTrue(),
.sub => ctx.fields.sub = kv.val.itemref(ref), .sub => ctx.fields.sub = kv.val.itemref(ref),
.ino => ctx.stat.ino = kv.val.int(u64), .ino => ctx.stat.ino = kv.val.int(u64),
.nlink => ctx.stat.nlink = kv.val.int(u31), .nlink => ctx.stat.nlink = kv.val.int(u31),
@ -427,6 +431,61 @@ const Import = struct {
} }
}; };
// Resolve an itemref and return a newly allocated entry.
// Dir.parent and Link.next/prev are left uninitialized.
pub fn get(ref: u64, alloc: std.mem.Allocator) *model.Entry {
const parser = readItem(ref);
var etype: ?model.EType = null;
var name: []const u8 = "";
var p = parser;
while (p.next()) |kv| {
switch (kv.key) {
.type => etype = kv.val.etype(),
.name => name = kv.val.bytes(),
else => kv.val.skip(),
}
if (etype != null and name.len != 0) break;
}
if (etype == null or name.len == 0) die();
// XXX: 'extended' should really depend on whether the info is in the file.
var entry = model.Entry.create(alloc, etype.?, main.config.extended, name);
entry.next = .{ .ref = std.math.maxInt(u64) };
if (entry.dir()) |d| d.sub = .{ .ref = std.math.maxInt(u64) };
while (p.next()) |kv| switch (kv.key) {
.prev => entry.next = .{ .ref = kv.val.itemref(ref) },
.asize => { if (entry.pack.etype != .dir) entry.size = kv.val.int(u64); },
.dsize => { if (entry.pack.etype != .dir) entry.pack.blocks = @intCast(kv.val.int(u64)/512); },
.rderr => { if (entry.dir()) |d| {
if (kv.val.isTrue()) d.pack.err = true
else d.pack.suberr = true;
} },
.dev => { if (entry.dir()) |d| d.pack.dev = model.devices.getId(kv.val.int(u64)); },
.cumasize => entry.size = kv.val.int(u64),
.cumdsize => entry.pack.blocks = @intCast(kv.val.int(u64)/512),
.shrasize => { if (entry.dir()) |d| d.shared_size = kv.val.int(u64); },
.shrdsize => { if (entry.dir()) |d| d.shared_blocks = kv.val.int(u64)/512; },
.items => { if (entry.dir()) |d| d.items = kv.val.int(u32); },
.sub => { if (entry.dir()) |d| d.sub = .{ .ref = kv.val.itemref(ref) }; },
.ino => { if (entry.link()) |l| l.ino = kv.val.int(u64); },
.nlink => { if (entry.link()) |l| l.pack.nlink = kv.val.int(u31); },
.uid => { if (entry.ext()) |e| e.uid = kv.val.int(u32); },
.gid => { if (entry.ext()) |e| e.gid = kv.val.int(u32); },
.mode => { if (entry.ext()) |e| e.mode = kv.val.int(u16); },
.mtime => { if (entry.ext()) |e| e.mtime = kv.val.int(u64); },
else => kv.val.skip(),
};
return entry;
}
pub fn getRoot() u64 {
return bigu64(global.index[global.index.len-8..][0..8].*);
}
// Walk through the directory tree in depth-first order and pass results to sink.zig. // Walk through the directory tree in depth-first order and pass results to sink.zig.
// Depth-first is required for JSON export, but more efficient strategies are // Depth-first is required for JSON export, but more efficient strategies are
// possible for other sinks. Parallel import is also an option, but that's more // possible for other sinks. Parallel import is also an option, but that's more
@ -434,7 +493,7 @@ const Import = struct {
pub fn import() void { pub fn import() void {
const sink_threads = sink.createThreads(1); const sink_threads = sink.createThreads(1);
var ctx = Import{.sink = &sink_threads[0]}; var ctx = Import{.sink = &sink_threads[0]};
ctx.import(bigu64(global.index[global.index.len-8..][0..8].*), null, 0); ctx.import(getRoot(), null, 0);
sink.done(); sink.done();
} }

View file

@ -6,6 +6,7 @@ const main = @import("main.zig");
const model = @import("model.zig"); const model = @import("model.zig");
const sink = @import("sink.zig"); const sink = @import("sink.zig");
const mem_sink = @import("mem_sink.zig"); const mem_sink = @import("mem_sink.zig");
const bin_reader = @import("bin_reader.zig");
const delete = @import("delete.zig"); const delete = @import("delete.zig");
const ui = @import("ui.zig"); const ui = @import("ui.zig");
const c = @cImport(@cInclude("time.h")); const c = @cImport(@cInclude("time.h"));
@ -13,6 +14,13 @@ const util = @import("util.zig");
// Currently opened directory. // Currently opened directory.
pub var dir_parent: *model.Dir = undefined; pub var dir_parent: *model.Dir = undefined;
pub var dir_path: [:0]u8 = undefined;
var dir_parents = std.ArrayList(model.Ref).init(main.allocator);
var dir_alloc = std.heap.ArenaAllocator.init(main.allocator);
// Used to keep track of which dir is which ref, so we can enter it.
// Only used for binreader browsing.
var dir_refs = std.ArrayList(struct { ptr: *model.Dir, ref: u64 }).init(main.allocator);
// Sorted list of all items in the currently opened directory. // Sorted list of all items in the currently opened directory.
// (first item may be null to indicate the "parent directory" item) // (first item may be null to indicate the "parent directory" item)
@ -33,28 +41,28 @@ const View = struct {
// The hash(name) of the selected entry (cursor), this is used to derive // The hash(name) of the selected entry (cursor), this is used to derive
// cursor_idx after sorting or changing directory. // cursor_idx after sorting or changing directory.
// (collisions may cause the wrong entry to be selected, but dealing with
// string allocations sucks and I expect collisions to be rare enough)
cursor_hash: u64 = 0, cursor_hash: u64 = 0,
fn hashEntry(entry: ?*model.Entry) u64 { fn dirHash() u64 {
return if (entry) |e| std.hash.Wyhash.hash(0, e.name()) else 0; return std.hash.Wyhash.hash(0, dir_path);
} }
// Update cursor_hash and save the current view to the hash table. // Update cursor_hash and save the current view to the hash table.
fn save(self: *@This()) void { fn save(self: *@This()) void {
self.cursor_hash = if (dir_items.items.len == 0) 0 self.cursor_hash = if (dir_items.items.len == 0) 0
else hashEntry(dir_items.items[cursor_idx]); else if (dir_items.items[cursor_idx]) |e| e.nameHash()
opened_dir_views.put(@intFromPtr(dir_parent), self.*) catch {}; else 0;
opened_dir_views.put(dirHash(), self.*) catch {};
} }
// Should be called after dir_parent or dir_items has changed, will load the last saved view and find the proper cursor_idx. // Should be called after dir_parent or dir_items has changed, will load the last saved view and find the proper cursor_idx.
fn load(self: *@This(), sel: ?*const model.Entry) void { fn load(self: *@This(), sel: u64) void {
if (opened_dir_views.get(@intFromPtr(dir_parent))) |v| self.* = v if (opened_dir_views.get(dirHash())) |v| self.* = v
else self.* = @This(){}; else self.* = @This(){};
cursor_idx = 0; cursor_idx = 0;
for (dir_items.items, 0..) |e, i| { for (dir_items.items, 0..) |e, i| {
if (if (sel != null) e == sel else self.cursor_hash == hashEntry(e)) { const h = if (e) |x| x.nameHash() else 0;
if (if (sel != 0) h == sel else self.cursor_hash == h) {
cursor_idx = i; cursor_idx = i;
break; break;
} }
@ -65,10 +73,8 @@ const View = struct {
var current_view = View{}; var current_view = View{};
// Directories the user has browsed to before, and which item was last selected. // Directories the user has browsed to before, and which item was last selected.
// The key is the @intFromPtr() of the opened *Dir; An int because the pointer // The key is the hash of dir_path;
// itself may have gone stale after deletion or refreshing. They're only for var opened_dir_views = std.AutoHashMap(u64, View).init(main.allocator);
// lookups, not dereferencing.
var opened_dir_views = std.AutoHashMap(usize, View).init(main.allocator);
fn sortIntLt(a: anytype, b: @TypeOf(a)) ?bool { fn sortIntLt(a: anytype, b: @TypeOf(a)) ?bool {
return if (a == b) null else if (main.config.sort_order == .asc) a < b else a > b; return if (a == b) null else if (main.config.sort_order == .asc) a < b else a > b;
@ -114,7 +120,7 @@ fn sortLt(_: void, ap: ?*model.Entry, bp: ?*model.Entry) bool {
// - config.sort_* changes // - config.sort_* changes
// - dir_items changes (i.e. from loadDir()) // - dir_items changes (i.e. from loadDir())
// - files in this dir have changed in a way that affects their ordering // - files in this dir have changed in a way that affects their ordering
fn sortDir(next_sel: ?*const model.Entry) void { fn sortDir(next_sel: u64) void {
// No need to sort the first item if that's the parent dir reference, // No need to sort the first item if that's the parent dir reference,
// excluding that allows sortLt() to ignore null values. // excluding that allows sortLt() to ignore null values.
const lst = dir_items.items[(if (dir_items.items.len > 0 and dir_items.items[0] == null) @as(usize, 1) else 0)..]; const lst = dir_items.items[(if (dir_items.items.len > 0 and dir_items.items[0] == null) @as(usize, 1) else 0)..];
@ -126,16 +132,22 @@ fn sortDir(next_sel: ?*const model.Entry) void {
// - dir_parent changes (i.e. we change directory) // - dir_parent changes (i.e. we change directory)
// - config.show_hidden changes // - config.show_hidden changes
// - files in this dir have been added or removed // - files in this dir have been added or removed
pub fn loadDir(next_sel: ?*const model.Entry) void { pub fn loadDir(next_sel: u64) void {
_ = dir_alloc.reset(.free_all);
dir_items.shrinkRetainingCapacity(0); dir_items.shrinkRetainingCapacity(0);
dir_refs.shrinkRetainingCapacity(0);
dir_max_size = 1; dir_max_size = 1;
dir_max_blocks = 1; dir_max_blocks = 1;
dir_has_shared = false; dir_has_shared = false;
if (dir_parent != model.root) if (dir_parents.items.len > 1)
dir_items.append(null) catch unreachable; dir_items.append(null) catch unreachable;
var it = dir_parent.sub; var ref = dir_parent.sub;
while (it) |e| : (it = e.next) { while (!ref.isNull()) {
const e =
if (main.config.binreader) bin_reader.get(ref.ref, dir_alloc.allocator())
else ref.ptr.?;
if (e.pack.blocks > dir_max_blocks) dir_max_blocks = e.pack.blocks; if (e.pack.blocks > dir_max_blocks) dir_max_blocks = e.pack.blocks;
if (e.size > dir_max_size) dir_max_size = e.size; if (e.size > dir_max_size) dir_max_size = e.size;
const shown = main.config.show_hidden or blk: { const shown = main.config.show_hidden or blk: {
@ -148,12 +160,67 @@ pub fn loadDir(next_sel: ?*const model.Entry) void {
}; };
if (shown) { if (shown) {
dir_items.append(e) catch unreachable; dir_items.append(e) catch unreachable;
if (e.dir()) |d| if (d.shared_blocks > 0 or d.shared_size > 0) { dir_has_shared = true; }; if (e.dir()) |d| {
if (d.shared_blocks > 0 or d.shared_size > 0) dir_has_shared = true;
if (main.config.binreader) dir_refs.append(.{ .ptr = d, .ref = ref.ref }) catch unreachable;
}
} }
ref = e.next;
} }
sortDir(next_sel); sortDir(next_sel);
} }
pub fn initRoot() void {
if (main.config.binreader) {
const ref = bin_reader.getRoot();
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse ui.die("Invalid import\n", .{});
dir_parents.append(.{ .ref = ref }) catch unreachable;
} else {
dir_parent = model.root;
dir_parents.append(.{ .ptr = &dir_parent.entry }) catch unreachable;
}
dir_path = main.allocator.dupeZ(u8, dir_parent.entry.name()) catch unreachable;
loadDir(0);
}
fn enterSub(e: *model.Dir) void {
if (main.config.binreader) {
const ref = blk: {
for (dir_refs.items) |r| if (r.ptr == e) break :blk r.ref;
return;
};
dir_parent.entry.destroy(main.allocator);
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse unreachable;
dir_parents.append(.{ .ref = ref }) catch unreachable;
} else {
dir_parent = e;
dir_parents.append(.{ .ptr = &e.entry }) catch unreachable;
}
const newpath = std.fs.path.joinZ(main.allocator, &[_][]const u8{ dir_path, e.entry.name() }) catch unreachable;
main.allocator.free(dir_path);
dir_path = newpath;
}
fn enterParent() void {
std.debug.assert(dir_parents.items.len > 1);
_ = dir_parents.pop();
const p = dir_parents.items[dir_parents.items.len-1];
if (main.config.binreader) {
dir_parent.entry.destroy(main.allocator);
dir_parent = bin_reader.get(p.ref, main.allocator).dir() orelse unreachable;
} else
dir_parent = p.ptr.?.dir() orelse unreachable;
const newpath = main.allocator.dupeZ(u8, std.fs.path.dirname(dir_path) orelse unreachable) catch unreachable;
main.allocator.free(dir_path);
dir_path = newpath;
}
const Row = struct { const Row = struct {
row: u32, row: u32,
col: u32 = 0, col: u32 = 0,
@ -168,7 +235,7 @@ const Row = struct {
const ch: u7 = switch (item.pack.etype) { const ch: u7 = switch (item.pack.etype) {
.dir => if (item.dir().?.pack.err) '!' .dir => if (item.dir().?.pack.err) '!'
else if (item.dir().?.pack.suberr) '.' else if (item.dir().?.pack.suberr) '.'
else if (item.dir().?.sub == null) 'e' else if (item.dir().?.sub.isNull()) 'e'
else return, else return,
.link => 'H', .link => 'H',
.pattern => '<', .pattern => '<',
@ -561,7 +628,7 @@ const info = struct {
if (ch == 10) { // Enter - go to selected entry if (ch == 10) { // Enter - go to selected entry
const l = links.?.items[links_idx]; const l = links.?.items[links_idx];
dir_parent = l.parent; dir_parent = l.parent;
loadDir(&l.entry); loadDir(l.entry.nameHash());
set(null, .info); set(null, .info);
} }
} }
@ -748,12 +815,7 @@ pub fn draw() void {
ui.move(1,3); ui.move(1,3);
ui.addch(' '); ui.addch(' ');
ui.style(.dir); ui.style(.dir);
ui.addstr(ui.shorten(ui.toUtf8(dir_path), ui.cols -| 5));
var pathbuf = std.ArrayList(u8).init(main.allocator);
dir_parent.fmtPath(true, &pathbuf);
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&pathbuf)), ui.cols -| 5));
pathbuf.deinit();
ui.style(.default); ui.style(.default);
ui.addch(' '); ui.addch(' ');
@ -811,7 +873,7 @@ fn sortToggle(col: main.config.SortCol, default_order: main.config.SortOrder) vo
else if (main.config.sort_order == .asc) main.config.sort_order = .desc else if (main.config.sort_order == .asc) main.config.sort_order = .desc
else main.config.sort_order = .asc; else main.config.sort_order = .asc;
main.config.sort_col = col; main.config.sort_col = col;
sortDir(null); sortDir(0);
} }
fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool { fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool {
@ -886,22 +948,22 @@ pub fn keyInput(ch: i32) void {
'M' => if (main.config.extended) sortToggle(.mtime, .desc), 'M' => if (main.config.extended) sortToggle(.mtime, .desc),
'e' => { 'e' => {
main.config.show_hidden = !main.config.show_hidden; main.config.show_hidden = !main.config.show_hidden;
loadDir(null); loadDir(0);
state = .main; state = .main;
}, },
't' => { 't' => {
main.config.sort_dirsfirst = !main.config.sort_dirsfirst; main.config.sort_dirsfirst = !main.config.sort_dirsfirst;
sortDir(null); sortDir(0);
}, },
'a' => { 'a' => {
main.config.show_blocks = !main.config.show_blocks; main.config.show_blocks = !main.config.show_blocks;
if (main.config.show_blocks and main.config.sort_col == .size) { if (main.config.show_blocks and main.config.sort_col == .size) {
main.config.sort_col = .blocks; main.config.sort_col = .blocks;
sortDir(null); sortDir(0);
} }
if (!main.config.show_blocks and main.config.sort_col == .blocks) { if (!main.config.show_blocks and main.config.sort_col == .blocks) {
main.config.sort_col = .size; main.config.sort_col = .size;
sortDir(null); sortDir(0);
} }
}, },
@ -910,21 +972,22 @@ pub fn keyInput(ch: i32) void {
if (dir_items.items.len == 0) { if (dir_items.items.len == 0) {
} else if (dir_items.items[cursor_idx]) |e| { } else if (dir_items.items[cursor_idx]) |e| {
if (e.dir()) |d| { if (e.dir()) |d| {
dir_parent = d; enterSub(d);
loadDir(null); //dir_parent = d;
loadDir(0);
state = .main; state = .main;
} }
} else if (dir_parent.parent) |p| { } else if (dir_parents.items.len > 1) {
dir_parent = p; enterParent();
loadDir(null); loadDir(0);
state = .main; state = .main;
} }
}, },
'h', '<', ui.c.KEY_BACKSPACE, ui.c.KEY_LEFT => { 'h', '<', ui.c.KEY_BACKSPACE, ui.c.KEY_LEFT => {
if (dir_parent.parent) |p| { if (dir_parents.items.len > 1) {
const e = dir_parent; //const h = dir_parent.entry.nameHash();
dir_parent = p; enterParent();
loadDir(&e.entry); loadDir(0);
state = .main; state = .main;
} }
}, },

View file

@ -46,7 +46,7 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
if (entry.dir()) |d| { if (entry.dir()) |d| {
var fd = dir.openDirZ(path, .{ .no_follow = true, .iterate = false }) catch |e| return err(e); var fd = dir.openDirZ(path, .{ .no_follow = true, .iterate = false }) catch |e| return err(e);
var it = &d.sub; var it = &d.sub.ptr;
parent = d; parent = d;
defer parent = parent.parent.?; defer parent = parent.parent.?;
while (it.*) |n| { while (it.*) |n| {
@ -55,15 +55,15 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
return true; return true;
} }
if (it.* == n) // item deletion failed, make sure to still advance to next if (it.* == n) // item deletion failed, make sure to still advance to next
it = &n.next; it = &n.next.ptr;
} }
fd.close(); fd.close();
dir.deleteDirZ(path) catch |e| dir.deleteDirZ(path) catch |e|
return if (e != error.DirNotEmpty or d.sub == null) err(e) else false; return if (e != error.DirNotEmpty or d.sub.ptr == null) err(e) else false;
} else } else
dir.deleteFileZ(path) catch |e| return err(e); dir.deleteFileZ(path) catch |e| return err(e);
ptr.*.?.zeroStats(parent); ptr.*.?.zeroStats(parent);
ptr.* = ptr.*.?.next; ptr.* = ptr.*.?.next.ptr;
return false; return false;
} }
@ -76,8 +76,8 @@ pub fn delete() ?*model.Entry {
// Find the pointer to this entry // Find the pointer to this entry
const e = entry; const e = entry;
var it = &parent.sub; var it = &parent.sub.ptr;
while (it.*) |n| : (it = &n.next) while (it.*) |n| : (it = &n.next.ptr)
if (it.* == entry) if (it.* == entry)
break; break;

View file

@ -95,6 +95,7 @@ pub const config = struct {
pub var sort_natural: bool = true; pub var sort_natural: bool = true;
pub var imported: bool = false; pub var imported: bool = false;
pub var binreader: bool = false;
pub var can_delete: ?bool = null; pub var can_delete: ?bool = null;
pub var can_shell: ?bool = null; pub var can_shell: ?bool = null;
pub var can_refresh: ?bool = null; pub var can_refresh: ?bool = null;
@ -373,10 +374,6 @@ fn spawnShell() void {
ui.deinit(); ui.deinit();
defer ui.init(); defer ui.init();
var path = std.ArrayList(u8).init(allocator);
defer path.deinit();
browser.dir_parent.fmtPath(true, &path);
var env = std.process.getEnvMap(allocator) catch unreachable; var env = std.process.getEnvMap(allocator) catch unreachable;
defer env.deinit(); defer env.deinit();
// NCDU_LEVEL can only count to 9, keeps the implementation simple. // NCDU_LEVEL can only count to 9, keeps the implementation simple.
@ -391,7 +388,7 @@ fn spawnShell() void {
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh"; const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
var child = std.process.Child.init(&.{shell}, allocator); var child = std.process.Child.init(&.{shell}, allocator);
child.cwd = path.items; child.cwd = browser.dir_path;
child.env_map = &env; child.env_map = &env;
const stdin = std.io.getStdIn(); const stdin = std.io.getStdIn();
@ -451,16 +448,18 @@ fn readImport(path: [:0]const u8) !void {
const fd = const fd =
if (std.mem.eql(u8, "-", path)) std.io.getStdIn() if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
else try std.fs.cwd().openFileZ(path, .{}); else try std.fs.cwd().openFileZ(path, .{});
defer fd.close(); errdefer fd.close();
// TODO: While we're at it, recognize and handle compressed JSON // TODO: While we're at it, recognize and handle compressed JSON
var buf: [8]u8 = undefined; var buf: [8]u8 = undefined;
try fd.reader().readNoEof(&buf); try fd.reader().readNoEof(&buf);
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) { if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
try bin_reader.open(fd); try bin_reader.open(fd);
bin_reader.import(); config.binreader = true;
} else } else {
json_import.import(fd, &buf); json_import.import(fd, &buf);
fd.close();
}
} }
pub fn main() void { pub fn main() void {
@ -571,6 +570,8 @@ pub fn main() void {
if (import_file) |f| { if (import_file) |f| {
readImport(f) catch |e| ui.die("Error reading file '{s}': {s}.\n", .{f, ui.errorString(e)}); readImport(f) catch |e| ui.die("Error reading file '{s}': {s}.\n", .{f, ui.errorString(e)});
config.imported = true; config.imported = true;
if (config.binreader and export_json != null or export_bin != null)
bin_reader.import();
} else { } else {
var buf = [_]u8{0} ** (std.fs.MAX_PATH_BYTES+1); var buf = [_]u8{0} ** (std.fs.MAX_PATH_BYTES+1);
const path = const path =
@ -587,8 +588,7 @@ pub fn main() void {
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode. config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
ui.init(); ui.init();
state = .browse; state = .browse;
browser.dir_parent = model.root; browser.initRoot();
browser.loadDir(null);
while (true) { while (true) {
switch (state) { switch (state) {
@ -602,7 +602,7 @@ pub fn main() void {
while (state == .refresh) handleEvent(true, true); while (state == .refresh) handleEvent(true, true);
}; };
state = .browse; state = .browse;
browser.loadDir(null); browser.loadDir(0);
}, },
.shell => { .shell => {
spawnShell(); spawnShell();
@ -611,7 +611,7 @@ pub fn main() void {
.delete => { .delete => {
const next = delete.delete(); const next = delete.delete();
state = .browse; state = .browse;
browser.loadDir(next); browser.loadDir(if (next) |n| n.nameHash() else 0);
}, },
else => handleEvent(true, false) else => handleEvent(true, false)
} }

View file

@ -62,12 +62,12 @@ pub const Dir = struct {
}; };
var count: Map.Size = 0; var count: Map.Size = 0;
var it = dir.sub; var it = dir.sub.ptr;
while (it) |e| : (it = e.next) count += 1; while (it) |e| : (it = e.next.ptr) count += 1;
self.entries.ensureUnusedCapacity(count) catch unreachable; self.entries.ensureUnusedCapacity(count) catch unreachable;
it = dir.sub; it = dir.sub.ptr;
while (it) |e| : (it = e.next) while (it) |e| : (it = e.next.ptr)
self.entries.putAssumeCapacity(e, {}); self.entries.putAssumeCapacity(e, {});
return self; return self;
} }
@ -83,8 +83,8 @@ pub const Dir = struct {
} }
} }
const e = model.Entry.create(t.arena.allocator(), etype, isext, name); const e = model.Entry.create(t.arena.allocator(), etype, isext, name);
e.next = self.dir.sub; e.next.ptr = self.dir.sub.ptr;
self.dir.sub = e; self.dir.sub.ptr = e;
return e; return e;
} }
@ -136,10 +136,10 @@ pub const Dir = struct {
pub fn final(self: *Dir, parent: ?*Dir) void { pub fn final(self: *Dir, parent: ?*Dir) void {
// Remove entries we've not seen // Remove entries we've not seen
if (self.entries.count() > 0) { if (self.entries.count() > 0) {
var it = &self.dir.sub; var it = &self.dir.sub.ptr;
while (it.*) |e| { while (it.*) |e| {
if (self.entries.getKey(e) == e) it.* = e.next if (self.entries.getKey(e) == e) it.* = e.next.ptr
else it = &e.next; else it = &e.next.ptr;
} }
} }
self.entries.deinit(); self.entries.deinit();

View file

@ -42,8 +42,8 @@ fn rec(ctx: *Ctx, dir: *sink.Dir, entry: *model.Entry) void {
var ndir = dir.addDir(ctx.sink, entry.name(), &ctx.stat); var ndir = dir.addDir(ctx.sink, entry.name(), &ctx.stat);
ctx.sink.setDir(ndir); ctx.sink.setDir(ndir);
if (d.pack.err) ndir.setReadError(ctx.sink); if (d.pack.err) ndir.setReadError(ctx.sink);
var it = d.sub; var it = d.sub.ptr;
while (it) |e| : (it = e.next) rec(ctx, ndir, e); while (it) |e| : (it = e.next.ptr) rec(ctx, ndir, e);
ctx.sink.setDir(dir); ctx.sink.setDir(dir);
ndir.unref(ctx.sink); ndir.unref(ctx.sink);
}, },
@ -65,8 +65,8 @@ pub fn run(d: *model.Dir) void {
const root = sink.createRoot(buf.items, &ctx.stat); const root = sink.createRoot(buf.items, &ctx.stat);
buf.deinit(); buf.deinit();
var it = d.sub; var it = d.sub.ptr;
while (it) |e| : (it = e.next) rec(&ctx, root, e); while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
root.unref(ctx.sink); root.unref(ctx.sink);
sink.done(); sink.done();

View file

@ -37,6 +37,20 @@ pub const EType = enum(i3) {
// Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags. // Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags.
pub const Blocks = u60; pub const Blocks = u60;
// Entries read from bin_reader may refer to other entries by itemref rather than pointer.
// This is a hack that allows browser.zig to use the same types for in-memory
// and bin_reader-backed directory trees. Most code can only deal with
// in-memory trees and accesses the .ptr field directly.
pub const Ref = extern union {
ptr: ?*Entry align(1),
ref: u64 align(1),
pub fn isNull(r: Ref) bool {
if (main.config.binreader) return r.ref == std.math.maxInt(u64)
else return r.ptr == null;
}
};
// Memory layout: // Memory layout:
// (Ext +) Dir + name // (Ext +) Dir + name
// or: (Ext +) Link + name // or: (Ext +) Link + name
@ -51,7 +65,7 @@ pub const Blocks = u60;
pub const Entry = extern struct { pub const Entry = extern struct {
pack: Packed align(1), pack: Packed align(1),
size: u64 align(1) = 0, size: u64 align(1) = 0,
next: ?*Entry align(1) = null, next: Ref = .{ .ptr = null },
pub const Packed = packed struct(u64) { pub const Packed = packed struct(u64) {
etype: EType, etype: EType,
@ -83,6 +97,10 @@ pub const Entry = extern struct {
return std.mem.sliceTo(name_ptr, 0); return std.mem.sliceTo(name_ptr, 0);
} }
pub fn nameHash(self: *const Self) u64 {
return std.hash.Wyhash.hash(0, self.name());
}
pub fn ext(self: *Self) ?*Ext { pub fn ext(self: *Self) ?*Ext {
if (!self.pack.isext) return null; if (!self.pack.isext) return null;
return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1); return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1);
@ -115,6 +133,17 @@ pub const Entry = extern struct {
}; };
} }
pub fn destroy(self: *Self, allocator: std.mem.Allocator) void {
const ptr: [*]u8 = if (self.ext()) |e| @ptrCast(e) else @ptrCast(self);
const esize: usize = switch (self.pack.etype) {
.dir => @sizeOf(Dir),
.link => @sizeOf(Link),
else => @sizeOf(File),
};
const size = (if (self.pack.isext) @as(usize, @sizeOf(Ext)) else 0) + esize + self.name().len + 1;
allocator.free(ptr[0..size]);
}
fn hasErr(self: *Self) bool { fn hasErr(self: *Self) bool {
return return
if(self.dir()) |d| d.pack.err or d.pack.suberr if(self.dir()) |d| d.pack.err or d.pack.suberr
@ -123,8 +152,8 @@ pub const Entry = extern struct {
fn removeLinks(self: *Entry) void { fn removeLinks(self: *Entry) void {
if (self.dir()) |d| { if (self.dir()) |d| {
var it = d.sub; var it = d.sub.ptr;
while (it) |e| : (it = e.next) e.removeLinks(); while (it) |e| : (it = e.next.ptr) e.removeLinks();
} }
if (self.link()) |l| l.removeLink(); if (self.link()) |l| l.removeLink();
} }
@ -136,8 +165,8 @@ pub const Entry = extern struct {
d.items = 0; d.items = 0;
d.pack.err = false; d.pack.err = false;
d.pack.suberr = false; d.pack.suberr = false;
var it = d.sub; var it = d.sub.ptr;
while (it) |e| : (it = e.next) e.zeroStatsRec(); while (it) |e| : (it = e.next.ptr) e.zeroStatsRec();
} }
} }
@ -163,7 +192,7 @@ const DevId = u30; // Can be reduced to make room for more flags in Dir.Packed.
pub const Dir = extern struct { pub const Dir = extern struct {
entry: Entry, entry: Entry,
sub: ?*Entry align(1) = null, sub: Ref = .{ .ptr = null },
parent: ?*Dir align(1) = null, parent: ?*Dir align(1) = null,
// entry.{blocks,size}: Total size of all unique files + dirs. Non-shared hardlinks are counted only once. // entry.{blocks,size}: Total size of all unique files + dirs. Non-shared hardlinks are counted only once.
@ -210,8 +239,8 @@ pub const Dir = extern struct {
// been updated and does not propagate to parents. // been updated and does not propagate to parents.
pub fn updateSubErr(self: *@This()) void { pub fn updateSubErr(self: *@This()) void {
self.pack.suberr = false; self.pack.suberr = false;
var sub = self.sub; var sub = self.sub.ptr;
while (sub) |e| : (sub = e.next) { while (sub) |e| : (sub = e.next.ptr) {
if (e.hasErr()) { if (e.hasErr()) {
self.pack.suberr = true; self.pack.suberr = true;
break; break;
@ -460,9 +489,8 @@ pub var root: *Dir = undefined;
test "entry" { test "entry" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator); var e = Entry.create(std.testing.allocator, .reg, false, "hello");
defer arena.deinit(); defer e.destroy(std.testing.allocator);
var e = Entry.create(arena.allocator(), .reg, false, "hello");
try std.testing.expectEqual(e.pack.etype, .reg); try std.testing.expectEqual(e.pack.etype, .reg);
try std.testing.expect(!e.pack.isext); try std.testing.expect(!e.pack.isext);
try std.testing.expectEqualStrings(e.name(), "hello"); try std.testing.expectEqualStrings(e.name(), "hello");