From cc12c90dbc3bc7066df0b2a25103f61e4be70749 Mon Sep 17 00:00:00 2001 From: Yorhel Date: Sun, 14 Jul 2024 16:21:59 +0200 Subject: [PATCH] Re-add scan progress UI + directory refreshing --- src/browser.zig | 4 +- src/delete.zig | 2 +- src/main.zig | 9 +- src/model.zig | 142 ++++++--------------------- src/scan.zig | 9 +- src/sink.zig | 252 +++++++++++++++++++++++++++++++++++++++++++++--- 6 files changed, 284 insertions(+), 134 deletions(-) diff --git a/src/browser.zig b/src/browser.zig index e48c916..caef8f4 100644 --- a/src/browser.zig +++ b/src/browser.zig @@ -4,7 +4,7 @@ const std = @import("std"); const main = @import("main.zig"); const model = @import("model.zig"); -const scan = @import("scan.zig"); +const sink = @import("sink.zig"); const delete = @import("delete.zig"); const ui = @import("ui.zig"); const c = @cImport(@cInclude("time.h")); @@ -848,7 +848,7 @@ pub fn keyInput(ch: i32) void { message = "Directory refresh feature disabled." else { main.state = .refresh; - //scan.setupRefresh(dir_parent); + sink.state.out = sink.state.Out{ .mem = dir_parent }; } }, 'b' => { diff --git a/src/delete.zig b/src/delete.zig index ae262a8..b2653e0 100644 --- a/src/delete.zig +++ b/src/delete.zig @@ -62,7 +62,7 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry) return if (e != error.DirNotEmpty or d.sub == null) err(e) else false; } else dir.deleteFileZ(path) catch |e| return err(e); - ptr.*.?.delStats(parent); + ptr.*.?.zeroStats(parent); ptr.* = ptr.*.?.next; return false; } diff --git a/src/main.zig b/src/main.zig index a592b7d..41fce0a 100644 --- a/src/main.zig +++ b/src/main.zig @@ -542,7 +542,14 @@ pub fn main() void { while (true) { switch (state) { .refresh => { - //scan.scan(); + var full_path = std.ArrayList(u8).init(allocator); + defer full_path.deinit(); + sink.state.out.mem.?.fmtPath(true, &full_path); + scan.scan(util.arrayListBufZ(&full_path)) catch { + sink.state.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable; + sink.state.status = .err; + while (state == .refresh) handleEvent(true, true); + }; state = .browse; browser.loadDir(null); }, diff --git a/src/model.zig b/src/model.zig index 791e439..2e70db7 100644 --- a/src/model.zig +++ b/src/model.zig @@ -9,7 +9,7 @@ const util = @import("util.zig"); pub const EType = enum(u2) { dir, link, file }; // Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags. -pub const Blocks = u60; +pub const Blocks = u61; // Memory layout: // (Ext +) Dir + name @@ -30,12 +30,6 @@ pub const Entry = extern struct { pub const Packed = packed struct(u64) { etype: EType, isext: bool, - // Whether or not this entry's size has been counted in its parents. - // Counting of Link entries is deferred until the scan/delete operation has - // completed, so for those entries this flag indicates an intention to be - // counted. - // TODO: Think we can remove this - counted: bool = false, blocks: Blocks = 0, // 512-byte blocks }; @@ -108,100 +102,33 @@ pub const Entry = extern struct { else false; } - pub fn addStats(self: *Entry, parent: *Dir, nlink: u31) void { - if (self.pack.counted) return; - self.pack.counted = true; - - // Add link to the inode map, but don't count its size (yet). - if (self.link()) |l| { - l.parent = parent; - var d = inodes.map.getOrPut(l) catch unreachable; - if (!d.found_existing) { - d.value_ptr.* = .{ .counted = false, .nlink = nlink }; - inodes.total_blocks +|= self.pack.blocks; - l.next = l; - } else { - inodes.setStats(.{ .key_ptr = d.key_ptr, .value_ptr = d.value_ptr }, false); - // If the nlink counts are not consistent, reset to 0 so we calculate with what we have instead. - if (d.value_ptr.nlink != nlink) - d.value_ptr.nlink = 0; - l.next = d.key_ptr.*.next; - d.key_ptr.*.next = l; - } - inodes.addUncounted(l); - } - - var it: ?*Dir = parent; - while(it) |p| : (it = p.parent) { - if (self.ext()) |e| - if (p.entry.ext()) |pe| - if (e.mtime > pe.mtime) { pe.mtime = e.mtime; }; - p.items +|= 1; - if (self.pack.etype != .link) { - p.entry.size +|= self.size; - p.entry.pack.blocks +|= self.pack.blocks; - } - } - } - - // Opposite of addStats(), but has some limitations: - // - If addStats() saturated adding sizes, then the sizes after delStats() - // will be incorrect. - // - mtime of parents is not adjusted (but that's a feature, possibly?) - // - // This function assumes that, for directories, all sub-entries have - // already been un-counted. - // - // When removing a Link, the entry's nlink counter is reset to zero, so - // that it will be recalculated based on our view of the tree. This means - // that links outside of the scanned directory will not be considered - // anymore, meaning that delStats() followed by addStats() with the same - // data may cause information to be lost. - pub fn delStats(self: *Entry, parent: *Dir) void { - if (!self.pack.counted) return; - defer self.pack.counted = false; // defer, to make sure inodes.setStats() still sees it as counted. - - if (self.link()) |l| { - var d = inodes.map.getEntry(l).?; - inodes.setStats(d, false); - d.value_ptr.nlink = 0; - if (l.next == l) { - _ = inodes.map.remove(l); - _ = inodes.uncounted.remove(l); - inodes.total_blocks -|= self.pack.blocks; - } else { - if (d.key_ptr.* == l) - d.key_ptr.* = l.next; - inodes.addUncounted(l.next); - // This is O(n), which in this context has the potential to - // slow ncdu down to a crawl. But this function is only called - // on refresh/delete operations and even then it's not common - // to have very long lists, so this blowing up should be very - // rare. This removal can also be deferred to setStats() to - // amortize the costs, if necessary. - var it = l.next; - while (it.next != l) it = it.next; - it.next = l.next; - } - } - - var it: ?*Dir = parent; - while(it) |p| : (it = p.parent) { - p.items -|= 1; - if (self.pack.etype != .link) { - p.entry.size -|= self.size; - p.entry.pack.blocks -|= self.pack.blocks; - } - } - } - - pub fn delStatsRec(self: *Entry, parent: *Dir) void { + fn zeroStatsRec(self: *Entry) void { + self.pack.blocks = 0; + self.size = 0; + if (self.file()) |f| f.pack = .{}; if (self.dir()) |d| { + d.items = 0; + d.pack.err = false; + d.pack.suberr = false; var it = d.sub; - while (it) |e| : (it = e.next) - e.delStatsRec(d); + while (it) |e| : (it = e.next) zeroStatsRec(e); } - self.delStats(parent); + } + + // Recursively set stats and those of sub-items to zero and removes counts + // from parent directories; as if this item does not exist in the tree. + // XXX: Does not update the 'suberr' flag of parent directories, make sure + // to call updateSubErr() afterwards. + pub fn zeroStats(self: *Entry, parent: ?*Dir) void { + // TODO: Uncount nested links. + + var it = parent; + while (it) |p| : (it = p.parent) { + p.entry.pack.blocks -|= self.pack.blocks; + p.entry.size -|= self.size; + p.items -|= 1 + (if (self.dir()) |d| d.items else 0); + } + self.zeroStatsRec(); } }; @@ -341,11 +268,6 @@ pub const inodes = struct { const Map = std.HashMap(*Link, Inode, HashContext, 80); pub var map = Map.init(main.allocator); - // Cumulative size of all unique hard links in the map. This is a somewhat - // ugly workaround to provide accurate sizes during the initial scan, when - // the hard links are not counted as part of the parent directories yet. - pub var total_blocks: Blocks = 0; - // List of nodes in 'map' with !counted, to speed up addAllStats(). // If this list grows large relative to the number of nodes in 'map', then // this list is cleared and uncounted_full is set instead, so that @@ -399,14 +321,12 @@ pub const inodes = struct { defer dirs.deinit(); var it = entry.key_ptr.*; while (true) { - if (it.entry.pack.counted) { - nlink += 1; - var parent: ?*Dir = it.parent; - while (parent) |p| : (parent = p.parent) { - const de = dirs.getOrPut(p) catch unreachable; - if (de.found_existing) de.value_ptr.* += 1 - else de.value_ptr.* = 1; - } + nlink += 1; + var parent: ?*Dir = it.parent; + while (parent) |p| : (parent = p.parent) { + const de = dirs.getOrPut(p) catch unreachable; + if (de.found_existing) de.value_ptr.* += 1 + else de.value_ptr.* = 1; } it = it.next; if (it == entry.key_ptr.*) diff --git a/src/scan.zig b/src/scan.zig index 286bd09..8f13b7d 100644 --- a/src/scan.zig +++ b/src/scan.zig @@ -282,13 +282,12 @@ const Thread = struct { pub fn scan(path: [:0]const u8) !void { + const sink_threads = sink.createThreads(main.config.threads); + defer sink.done(); + const stat = try statAt(std.fs.cwd(), path, true); const fd = try std.fs.cwd().openDirZ(path, .{ .iterate = true }); - sink.state.threads = main.allocator.alloc(sink.Thread, main.config.threads) catch unreachable; - for (sink.state.threads) |*t| t.* = .{}; - defer main.allocator.free(sink.state.threads); - var state = State{ .threads = main.allocator.alloc(Thread, main.config.threads) catch unreachable, }; @@ -298,7 +297,7 @@ pub fn scan(path: [:0]const u8) !void { const dir = Dir.create(fd, stat.dev, exclude.getPatterns(path), root); _ = state.tryPush(dir); - for (sink.state.threads, state.threads, 0..) |*s, *t, n| + for (sink_threads, state.threads, 0..) |*s, *t, n| t.* = .{ .sink = s, .state = &state, .thread_num = n }; // XXX: Continue with fewer threads on error? diff --git a/src/sink.zig b/src/sink.zig index bae419f..cfa5775 100644 --- a/src/sink.zig +++ b/src/sink.zig @@ -220,6 +220,14 @@ pub const Dir = struct { switch (d.out) { .mem => |*m| m.addSpecial(t.arena.allocator(), name, sp), } + if (sp == .err) { + state.last_error_lock.lock(); + defer state.last_error_lock.unlock(); + if (state.last_error) |p| main.allocator.free(p); + const p = d.path(); + state.last_error = std.fs.path.joinZ(main.allocator, &.{ p, name }) catch unreachable; + main.allocator.free(p); + } } pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) void { @@ -255,7 +263,7 @@ pub const Dir = struct { } } - fn path(d: *Dir) []const u8 { + fn path(d: *Dir) [:0]const u8 { var components = std.ArrayList([]const u8).init(main.allocator); defer components.deinit(); var it: ?*Dir = d; @@ -269,7 +277,7 @@ pub const Dir = struct { if (i == 0) break; i -= 1; } - return out.toOwnedSlice() catch unreachable; + return out.toOwnedSliceSentinel(0) catch unreachable; } fn ref(d: *Dir) void { @@ -308,47 +316,263 @@ pub const Thread = struct { pub const state = struct { + pub var status: enum { done, err, zeroing, hlcnt, running } = .running; pub var threads: []Thread = undefined; + pub var out: Out = .{ .mem = null }; + + pub var last_error: ?[:0]u8 = null; + var last_error_lock = std.Thread.Mutex{}; + var need_confirm_quit = false; + + pub const Out = union(enum) { + mem: ?*model.Dir, + }; }; +// Must be the first thing to call from a source; initializes global state. +pub fn createThreads(num: usize) []Thread { + state.status = .running; + if (state.last_error) |p| main.allocator.free(p); + state.last_error = null; + state.threads = main.allocator.alloc(Thread, num) catch unreachable; + for (state.threads) |*t| t.* = .{}; + return state.threads; +} + + +// Must be the last thing to call from a source. +pub fn done() void { + // TODO: Do hardlink stuff. + state.status = .done; + main.allocator.free(state.threads); + // Clear the screen when done. + if (main.config.scan_ui == .line) main.handleEvent(false, true); +} + + pub fn createRoot(path: []const u8, stat: *const Stat) *Dir { - // TODO: Handle other outputs - model.root = model.Entry.create(main.allocator, .dir, main.config.extended, path).dir().?; - model.root.entry.pack.blocks = stat.blocks; - model.root.entry.size = stat.size; - model.root.pack.dev = model.devices.getId(stat.dev); + const out = switch (state.out) { + .mem => |parent| sw: { + const p = parent orelse blk: { + model.root = model.Entry.create(main.allocator, .dir, main.config.extended, path).dir().?; + break :blk model.root; + }; + state.status = .zeroing; + if (p.items > 10_000) main.handleEvent(false, true); + // Do the zeroStats() here, after the "root" entry has been + // stat'ed and opened, so that a fatal error on refresh won't + // zero-out the requested directory. + p.entry.zeroStats(p.parent); + state.status = .running; + p.entry.pack.blocks = stat.blocks; + p.entry.size = stat.size; + p.pack.dev = model.devices.getId(stat.dev); + break :sw .{ .mem = MemDir.init(p) }; + }, + }; const d = main.allocator.create(Dir) catch unreachable; d.* = .{ .name = main.allocator.dupe(u8, path) catch unreachable, .parent = null, - .out = .{ .mem = MemDir.init(model.root) }, + .out = out, }; return d; } -pub fn draw() void { +fn drawConsole() void { + const st = struct { + var ansi: ?bool = null; + var lines_written: usize = 0; + }; + const stderr = std.io.getStdErr(); + const ansi = st.ansi orelse blk: { + const t = stderr.supportsAnsiEscapeCodes(); + st.ansi = t; + break :blk t; + }; + + var buf: [4096]u8 = undefined; + var strm = std.io.fixedBufferStream(buf[0..]); + var wr = strm.writer(); + while (ansi and st.lines_written > 0) { + wr.writeAll("\x1b[1F\x1b[2K") catch {}; + st.lines_written -= 1; + } + + if (state.status == .running) { + var bytes: u64 = 0; + var files: u64 = 0; + for (state.threads) |*t| { + bytes +|= t.bytes_seen.load(.monotonic); + files += t.files_seen.load(.monotonic); + } + const r = ui.FmtSize.fmt(bytes); + wr.print("{} files / {s}{s}\n", .{files, r.num(), r.unit}) catch {}; + st.lines_written += 1; + + for (state.threads, 0..) |*t, i| { + const dir = blk: { + t.lock.lock(); + defer t.lock.unlock(); + break :blk if (t.current_dir) |d| d.path() else null; + }; + wr.print(" #{}: {s}\n", .{i+1, ui.shorten(ui.toUtf8(dir orelse "(waiting)"), 73)}) catch {}; + st.lines_written += 1; + if (dir) |p| main.allocator.free(p); + } + } + + stderr.writeAll(strm.getWritten()) catch {}; +} + + +fn drawProgress() void { + const st = struct { var animation_pos: usize = 0; }; + var bytes: u64 = 0; var files: u64 = 0; for (state.threads) |*t| { bytes +|= t.bytes_seen.load(.monotonic); files += t.files_seen.load(.monotonic); } - const r = ui.FmtSize.fmt(bytes); - std.debug.print("{} files / {s}{s}\n", .{files, r.num(), r.unit}); - for (state.threads, 0..) |*t, i| { + ui.init(); + const width = ui.cols -| 5; + const numthreads: u32 = @intCast(@min(state.threads.len, @max(1, ui.rows -| 10))); + const box = ui.Box.create(8 + numthreads, width, "Scanning..."); + box.move(2, 2); + ui.addstr("Total items: "); + ui.addnum(.default, files); + + if (width > 48) { + box.move(2, 30); + ui.addstr("size: "); + ui.addsize(.default, bytes); + } + + for (0..numthreads) |i| { + box.move(3+@as(u32, @intCast(i)), 4); const dir = blk: { + const t = &state.threads[i]; t.lock.lock(); defer t.lock.unlock(); break :blk if (t.current_dir) |d| d.path() else null; }; - std.debug.print(" #{}: {s}\n", .{i, dir orelse "(waiting)"}); + ui.addstr(ui.shorten(ui.toUtf8(dir orelse "(waiting)"), width -| 6)); if (dir) |p| main.allocator.free(p); } + + blk: { + state.last_error_lock.lock(); + defer state.last_error_lock.unlock(); + const err = state.last_error orelse break :blk; + box.move(4 + numthreads, 2); + ui.style(.bold); + ui.addstr("Warning: "); + ui.style(.default); + ui.addstr("error scanning "); + ui.addstr(ui.shorten(ui.toUtf8(err), width -| 28)); + box.move(5 + numthreads, 3); + ui.addstr("some directory sizes may not be correct."); + } + + if (state.need_confirm_quit) { + box.move(6 + numthreads, width -| 20); + ui.addstr("Press "); + ui.style(.key); + ui.addch('y'); + ui.style(.default); + ui.addstr(" to confirm"); + } else { + box.move(6 + numthreads, width -| 18); + ui.addstr("Press "); + ui.style(.key); + ui.addch('q'); + ui.style(.default); + ui.addstr(" to abort"); + } + + if (main.config.update_delay < std.time.ns_per_s and width > 40) { + const txt = "Scanning..."; + st.animation_pos += 1; + if (st.animation_pos >= txt.len*2) st.animation_pos = 0; + if (st.animation_pos < txt.len) { + box.move(6 + numthreads, 2); + for (txt[0..st.animation_pos + 1]) |t| ui.addch(t); + } else { + var i: u32 = txt.len-1; + while (i > st.animation_pos-txt.len) : (i -= 1) { + box.move(6 + numthreads, 2+i); + ui.addch(txt[i]); + } + } + } } -pub fn keyInput(_: i32) void { + +fn drawError() void { + const width = ui.cols -| 5; + const box = ui.Box.create(6, width, "Scan error"); + + box.move(2, 2); + ui.addstr("Unable to open directory:"); + box.move(3, 4); + ui.addstr(ui.shorten(ui.toUtf8(state.last_error.?), width -| 10)); + + box.move(4, width -| 27); + ui.addstr("Press any key to continue"); +} + + +fn drawMessage(msg: []const u8) void { + const width = ui.cols -| 5; + const box = ui.Box.create(4, width, "Scan error"); + box.move(2, 2); + ui.addstr(msg); +} + + +pub fn draw() void { + switch (main.config.scan_ui.?) { + .none => {}, + .line => drawConsole(), + .full => switch (state.status) { + .done => {}, + .err => drawError(), + .zeroing => { + const box = ui.Box.create(4, ui.cols -| 5, "Initializing"); + box.move(2, 2); + ui.addstr("Clearing directory counts..."); + }, + .hlcnt => { + const box = ui.Box.create(4, ui.cols -| 5, "Finalizing"); + box.move(2, 2); + ui.addstr("Counting hardlinks..."); + }, + .running => drawProgress(), + }, + } +} + + +pub fn keyInput(ch: i32) void { + switch (state.status) { + .done => {}, + .err => main.state = .browse, + .zeroing => {}, + .hlcnt => {}, + .running => { + switch (ch) { + 'q' => { + if (main.config.confirm_quit) state.need_confirm_quit = !state.need_confirm_quit + else ui.quit(); + }, + 'y', 'Y' => if (state.need_confirm_quit) ui.quit(), + else => state.need_confirm_quit = false, + } + }, + } }