mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-12 17:08:39 -09:00
Rewrite scan/import code, experiment with multithreaded scanning (again)
Benchmarks are looking very promising this time. This commit breaks a lot, though: - Hard link counting - Refreshing - JSON import - JSON export - Progress UI - OOM handling is not thread-safe All of which needs to be reimplemented and fixed again. Also haven't really tested this code very well yet so there's likely to be bugs. There's also a behavioral change: --exclude-kernfs is not checked on the given root directory anymore, meaning that the filesystem the user asked to scan is being scanned even if that's a 'kernfs'. I suspect that's more sensible behavior. The old scan.zig was quite messy and hard for me to reason about and extend, this new sink API is looking to be less confusing. I hope it stays that way as more features are added.
This commit is contained in:
parent
c41467f240
commit
f2541d42ba
6 changed files with 611 additions and 1061 deletions
|
|
@ -848,7 +848,7 @@ pub fn keyInput(ch: i32) void {
|
|||
message = "Directory refresh feature disabled."
|
||||
else {
|
||||
main.state = .refresh;
|
||||
scan.setupRefresh(dir_parent);
|
||||
//scan.setupRefresh(dir_parent);
|
||||
}
|
||||
},
|
||||
'b' => {
|
||||
|
|
|
|||
27
src/main.zig
27
src/main.zig
|
|
@ -6,6 +6,7 @@ pub const program_version = "2.4";
|
|||
const std = @import("std");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const delete = @import("delete.zig");
|
||||
|
|
@ -16,6 +17,7 @@ const c = @cImport(@cInclude("locale.h"));
|
|||
test "imports" {
|
||||
_ = model;
|
||||
_ = scan;
|
||||
_ = sink;
|
||||
_ = ui;
|
||||
_ = browser;
|
||||
_ = delete;
|
||||
|
|
@ -57,6 +59,7 @@ pub const config = struct {
|
|||
pub var exclude_caches: bool = false;
|
||||
pub var exclude_kernfs: bool = false;
|
||||
pub var exclude_patterns: std.ArrayList([:0]const u8) = std.ArrayList([:0]const u8).init(allocator);
|
||||
pub var threads: usize = 1;
|
||||
|
||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||
pub var scan_ui: ?enum { none, line, full } = null;
|
||||
|
|
@ -456,7 +459,7 @@ pub fn main() void {
|
|||
}
|
||||
}
|
||||
|
||||
var scan_dir: ?[]const u8 = null;
|
||||
var scan_dir: ?[:0]const u8 = null;
|
||||
var import_file: ?[:0]const u8 = null;
|
||||
var export_file: ?[:0]const u8 = null;
|
||||
var quit_after_scan = false;
|
||||
|
|
@ -480,11 +483,14 @@ pub fn main() void {
|
|||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("--ignore-config")) {}
|
||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||
else if (opt.is("--experimental-threads")) config.threads = std.fmt.parseInt(u8, args.arg(), 10) catch ui.die("Invalid number of threads.\n", .{})
|
||||
else if (argConfig(&args, opt)) {}
|
||||
else ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
}
|
||||
}
|
||||
|
||||
if (config.threads == 0) config.threads = std.Thread.getCpuCount() catch 1;
|
||||
|
||||
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
||||
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
||||
|
||||
|
|
@ -511,11 +517,16 @@ pub fn main() void {
|
|||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)})
|
||||
) else null;
|
||||
|
||||
if (import_file) |f| {
|
||||
scan.importRoot(f, out_file);
|
||||
if (import_file) |_| {
|
||||
//scan.importRoot(f, out_file);
|
||||
config.imported = true;
|
||||
} else scan.scanRoot(scan_dir orelse ".", out_file)
|
||||
catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
} else {
|
||||
var buf = [_]u8{0} ** (std.fs.MAX_PATH_BYTES+1);
|
||||
const path =
|
||||
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
||||
else |_| (scan_dir orelse ".");
|
||||
scan.scan(path) catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
}
|
||||
if (quit_after_scan or out_file != null) return;
|
||||
|
||||
config.can_shell = config.can_shell orelse !config.imported;
|
||||
|
|
@ -531,7 +542,7 @@ pub fn main() void {
|
|||
while (true) {
|
||||
switch (state) {
|
||||
.refresh => {
|
||||
scan.scan();
|
||||
//scan.scan();
|
||||
state = .browse;
|
||||
browser.loadDir(null);
|
||||
},
|
||||
|
|
@ -557,7 +568,7 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
|||
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
||||
if (ui.inited) _ = ui.c.erase();
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.draw(),
|
||||
.scan, .refresh => sink.draw(),
|
||||
.browse => browser.draw(),
|
||||
.delete => delete.draw(),
|
||||
.shell => unreachable,
|
||||
|
|
@ -576,7 +587,7 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
|||
if (ch == 0) return;
|
||||
if (ch == -1) return handleEvent(firstblock, true);
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.keyInput(ch),
|
||||
.scan, .refresh => sink.keyInput(ch),
|
||||
.browse => browser.keyInput(ch),
|
||||
.delete => delete.keyInput(ch),
|
||||
.shell => unreachable,
|
||||
|
|
|
|||
|
|
@ -6,15 +6,6 @@ const main = @import("main.zig");
|
|||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// While an arena allocator is optimimal for almost all scenarios in which ncdu
|
||||
// is used, it doesn't allow for re-using deleted nodes after doing a delete or
|
||||
// refresh operation, so a long-running ncdu session with regular refreshes
|
||||
// will leak memory, but I'd say that's worth the efficiency gains.
|
||||
// TODO: Can still implement a simple bucketed free list on top of this arena
|
||||
// allocator to reuse nodes, if necessary.
|
||||
var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = allocator_state.allocator();
|
||||
|
||||
pub const EType = enum(u2) { dir, link, file };
|
||||
|
||||
// Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags.
|
||||
|
|
@ -43,6 +34,7 @@ pub const Entry = extern struct {
|
|||
// Counting of Link entries is deferred until the scan/delete operation has
|
||||
// completed, so for those entries this flag indicates an intention to be
|
||||
// counted.
|
||||
// TODO: Think we can remove this
|
||||
counted: bool = false,
|
||||
blocks: Blocks = 0, // 512-byte blocks
|
||||
};
|
||||
|
|
@ -82,7 +74,7 @@ pub const Entry = extern struct {
|
|||
return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1);
|
||||
}
|
||||
|
||||
fn alloc(comptime T: type, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
||||
var ptr = blk: while (true) {
|
||||
if (allocator.allocWithOptions(u8, size, 1, null)) |p| break :blk p
|
||||
|
|
@ -101,11 +93,11 @@ pub const Entry = extern struct {
|
|||
return &e.entry;
|
||||
}
|
||||
|
||||
pub fn create(etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
pub fn create(allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
return switch (etype) {
|
||||
.dir => alloc(Dir, etype, isext, ename),
|
||||
.file => alloc(File, etype, isext, ename),
|
||||
.link => alloc(Link, etype, isext, ename),
|
||||
.dir => alloc(Dir, allocator, etype, isext, ename),
|
||||
.file => alloc(File, allocator, etype, isext, ename),
|
||||
.link => alloc(Link, allocator, etype, isext, ename),
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -321,12 +313,15 @@ pub const Ext = extern struct {
|
|||
// List of st_dev entries. Those are typically 64bits, but that's quite a waste
|
||||
// of space when a typical scan won't cover many unique devices.
|
||||
pub const devices = struct {
|
||||
var lock = std.Thread.Mutex{};
|
||||
// id -> dev
|
||||
pub var list = std.ArrayList(u64).init(main.allocator);
|
||||
// dev -> id
|
||||
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||
|
||||
pub fn getId(dev: u64) DevId {
|
||||
lock.lock();
|
||||
defer lock.unlock();
|
||||
const d = lookup.getOrPut(dev) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
||||
|
|
@ -462,7 +457,9 @@ pub var root: *Dir = undefined;
|
|||
|
||||
|
||||
test "entry" {
|
||||
var e = Entry.create(.file, false, "hello");
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var e = Entry.create(arena.allocator(), .file, false, "hello");
|
||||
try std.testing.expectEqual(e.pack.etype, .file);
|
||||
try std.testing.expect(!e.pack.isext);
|
||||
try std.testing.expectEqualStrings(e.name(), "hello");
|
||||
|
|
|
|||
1261
src/scan.zig
1261
src/scan.zig
File diff suppressed because it is too large
Load diff
354
src/sink.zig
Normal file
354
src/sink.zig
Normal file
|
|
@ -0,0 +1,354 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// "sink" in this case is where the scan/import results (from scan.zig and
|
||||
// json_import.zig) are being forwarded to and processed. This code handles
|
||||
// aggregating the tree structure into memory or exporting it as JSON. Also
|
||||
// handles progress display.
|
||||
|
||||
// API for sources:
|
||||
//
|
||||
// Single-threaded:
|
||||
//
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(name, stat)
|
||||
// (no dir.stuff here)
|
||||
// sub.addstuff();
|
||||
// sub.unref();
|
||||
// dir.unref();
|
||||
//
|
||||
// Multi-threaded interleaving:
|
||||
//
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(...)
|
||||
// sub.addstuff();
|
||||
// sub2 = dir.addDir(..);
|
||||
// sub.unref();
|
||||
// dir.unref(); // <- no more direct descendants for x, but subdirs could still be active
|
||||
// sub2.addStuff();
|
||||
// sub2.unref(); // <- this is where 'dir' is really done.
|
||||
//
|
||||
// Rule:
|
||||
// No concurrent method calls on a single Dir object, but objects may be passed between threads.
|
||||
|
||||
|
||||
// Concise stat struct for fields we're interested in, with the types used by the model.
|
||||
pub const Stat = struct {
|
||||
blocks: model.Blocks = 0,
|
||||
size: u64 = 0,
|
||||
dev: u64 = 0,
|
||||
ino: u64 = 0,
|
||||
nlink: u31 = 0,
|
||||
hlinkc: bool = false,
|
||||
dir: bool = false,
|
||||
reg: bool = true,
|
||||
symlink: bool = false,
|
||||
ext: model.Ext = .{},
|
||||
};
|
||||
|
||||
pub const Special = enum { err, other_fs, kernfs, excluded };
|
||||
|
||||
|
||||
const MemDir = struct {
|
||||
dir: *model.Dir,
|
||||
entries: Map,
|
||||
|
||||
own_blocks: model.Blocks,
|
||||
own_bytes: u64,
|
||||
|
||||
// Additional counts collected from subdirectories. Subdirs may run final()
|
||||
// from separate threads so these need to be protected.
|
||||
blocks: model.Blocks = 0,
|
||||
bytes: u64 = 0,
|
||||
items: u32 = 0,
|
||||
mtime: u64 = 0,
|
||||
suberr: bool = false,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
|
||||
const Map = std.HashMap(*model.Entry, void, HashContext, 80);
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), e: *model.Entry) u64 {
|
||||
return std.hash.Wyhash.hash(0, e.name());
|
||||
}
|
||||
pub fn eql(_: @This(), a: *model.Entry, b: *model.Entry) bool {
|
||||
return a == b or std.mem.eql(u8, a.name(), b.name());
|
||||
}
|
||||
};
|
||||
|
||||
const HashContextAdapted = struct {
|
||||
pub fn hash(_: @This(), v: []const u8) u64 {
|
||||
return std.hash.Wyhash.hash(0, v);
|
||||
}
|
||||
pub fn eql(_: @This(), a: []const u8, b: *model.Entry) bool {
|
||||
return std.mem.eql(u8, a, b.name());
|
||||
}
|
||||
};
|
||||
|
||||
fn init(dir: *model.Dir) MemDir {
|
||||
var self = MemDir{
|
||||
.dir = dir,
|
||||
.entries = Map.initContext(main.allocator, HashContext{}),
|
||||
.own_blocks = dir.entry.pack.blocks,
|
||||
.own_bytes = dir.entry.size,
|
||||
};
|
||||
|
||||
var count: Map.Size = 0;
|
||||
var it = dir.sub;
|
||||
while (it) |e| : (it = e.next) count += 1;
|
||||
self.entries.ensureUnusedCapacity(count) catch unreachable;
|
||||
|
||||
it = dir.sub;
|
||||
while (it) |e| : (it = e.next)
|
||||
self.entries.putAssumeCapacity(e, {});
|
||||
return self;
|
||||
}
|
||||
|
||||
fn getEntry(self: *MemDir, alloc: std.mem.Allocator, etype: model.EType, isext: bool, name: []const u8) *model.Entry {
|
||||
if (self.entries.getKeyAdapted(name, HashContextAdapted{})) |e| {
|
||||
// XXX: In-place conversion may be possible in some cases.
|
||||
if (e.pack.etype == etype and (!isext or e.pack.isext)) {
|
||||
e.pack.isext = isext;
|
||||
_ = self.entries.removeAdapted(name, HashContextAdapted{});
|
||||
return e;
|
||||
}
|
||||
}
|
||||
const e = model.Entry.create(alloc, etype, isext, name);
|
||||
e.next = self.dir.sub;
|
||||
self.dir.sub = e;
|
||||
return e;
|
||||
}
|
||||
|
||||
fn addSpecial(self: *MemDir, alloc: std.mem.Allocator, name: []const u8, t: Special) void {
|
||||
self.dir.items += 1;
|
||||
|
||||
const e = self.getEntry(alloc, .file, false, name);
|
||||
e.file().?.pack = switch (t) {
|
||||
.err => .{ .err = true },
|
||||
.other_fs => .{ .other_fs = true },
|
||||
.kernfs => .{ .kernfs = true },
|
||||
.excluded => .{ .excluded = true },
|
||||
};
|
||||
}
|
||||
|
||||
fn addStat(self: *MemDir, alloc: std.mem.Allocator, name: []const u8, stat: *const Stat) *model.Entry {
|
||||
self.dir.items +|= 1;
|
||||
if (!stat.hlinkc) {
|
||||
self.dir.entry.pack.blocks +|= stat.blocks;
|
||||
self.dir.entry.size +|= stat.size;
|
||||
}
|
||||
|
||||
const etype = if (stat.dir) model.EType.dir
|
||||
else if (stat.hlinkc) model.EType.link
|
||||
else model.EType.file;
|
||||
const e = self.getEntry(alloc, etype, main.config.extended, name);
|
||||
e.pack.blocks = stat.blocks;
|
||||
e.size = stat.size;
|
||||
if (e.dir()) |d| {
|
||||
d.parent = self.dir;
|
||||
d.pack.dev = model.devices.getId(stat.dev);
|
||||
}
|
||||
if (e.file()) |f| f.pack = .{ .notreg = !stat.dir and !stat.reg };
|
||||
if (e.link()) |l| l.ino = stat.ino; // TODO: Add to inodes table
|
||||
if (e.ext()) |ext| ext.* = stat.ext;
|
||||
return e;
|
||||
}
|
||||
|
||||
fn setReadError(self: *MemDir) void {
|
||||
self.dir.pack.err = true;
|
||||
}
|
||||
|
||||
fn final(self: *MemDir, parent: ?*MemDir) void {
|
||||
// Remove entries we've not seen
|
||||
if (self.entries.count() > 0) {
|
||||
var it = &self.dir.sub;
|
||||
while (it.*) |e| {
|
||||
if (self.entries.contains(e)) it.* = e.next
|
||||
else it = &e.next;
|
||||
}
|
||||
}
|
||||
|
||||
// Grab counts collected from subdirectories
|
||||
self.dir.entry.pack.blocks +|= self.blocks;
|
||||
self.dir.entry.size +|= self.bytes;
|
||||
self.dir.items +|= self.items;
|
||||
if (self.suberr) self.dir.pack.suberr = true;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (self.mtime > e.mtime) self.mtime = e.mtime;
|
||||
}
|
||||
|
||||
// Add own counts to parent
|
||||
if (parent) |p| {
|
||||
p.lock.lock();
|
||||
defer p.lock.unlock();
|
||||
p.blocks +|= self.dir.entry.pack.blocks - self.own_blocks;
|
||||
p.bytes +|= self.dir.entry.size - self.own_bytes;
|
||||
p.items +|= self.dir.items;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (e.mtime > p.mtime) e.mtime = p.mtime;
|
||||
}
|
||||
if (self.suberr or self.dir.pack.err) p.suberr = true;
|
||||
}
|
||||
self.entries.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
refcnt: std.atomic.Value(usize) = std.atomic.Value(usize).init(1),
|
||||
// (XXX: This allocation can be avoided when scanning to a MemDir)
|
||||
name: []const u8,
|
||||
parent: ?*Dir,
|
||||
out: Out,
|
||||
|
||||
const Out = union(enum) {
|
||||
mem: MemDir,
|
||||
};
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: Special) void {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.addSpecial(t.arena.allocator(), name, sp),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) void {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.bytes_seen.fetchAdd((stat.blocks *| 512) / @max(1, stat.nlink), .monotonic);
|
||||
switch (d.out) {
|
||||
.mem => |*m| _ = m.addStat(t.arena.allocator(), name, stat),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) *Dir {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.bytes_seen.fetchAdd(stat.blocks *| 512, .monotonic);
|
||||
|
||||
const s = main.allocator.create(Dir) catch unreachable;
|
||||
s.* = .{
|
||||
.name = main.allocator.dupe(u8, name) catch unreachable,
|
||||
.parent = d,
|
||||
.out = switch (d.out) {
|
||||
.mem => |*m| .{
|
||||
.mem = MemDir.init(m.addStat(t.arena.allocator(), name, stat).dir().?)
|
||||
},
|
||||
},
|
||||
};
|
||||
d.ref();
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir, t: *Thread) void {
|
||||
_ = t;
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.setReadError(),
|
||||
}
|
||||
}
|
||||
|
||||
fn path(d: *Dir) []const u8 {
|
||||
var components = std.ArrayList([]const u8).init(main.allocator);
|
||||
defer components.deinit();
|
||||
var it: ?*Dir = d;
|
||||
while (it) |e| : (it = e.parent) components.append(e.name) catch unreachable;
|
||||
|
||||
var out = std.ArrayList(u8).init(main.allocator);
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/')) out.append('/') catch unreachable;
|
||||
out.appendSlice(components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
return out.toOwnedSlice() catch unreachable;
|
||||
}
|
||||
|
||||
fn ref(d: *Dir) void {
|
||||
_ = d.refcnt.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn unref(d: *Dir) void {
|
||||
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
||||
d.refcnt.fence(.acquire);
|
||||
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
||||
}
|
||||
|
||||
if (d.parent) |p| p.unref();
|
||||
if (d.name.len > 0) main.allocator.free(d.name);
|
||||
main.allocator.destroy(d);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
current_dir: ?*Dir = null,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
bytes_seen: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
files_seen: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
// Arena allocator for model.Entry structs, these are never freed.
|
||||
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
||||
|
||||
pub fn setDir(t: *Thread, d: ?*Dir) void {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.current_dir = d;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const state = struct {
|
||||
pub var threads: []Thread = undefined;
|
||||
};
|
||||
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const Stat) *Dir {
|
||||
// TODO: Handle other outputs
|
||||
model.root = model.Entry.create(main.allocator, .dir, main.config.extended, path).dir().?;
|
||||
model.root.entry.pack.blocks = stat.blocks;
|
||||
model.root.entry.size = stat.size;
|
||||
model.root.pack.dev = model.devices.getId(stat.dev);
|
||||
|
||||
const d = main.allocator.create(Dir) catch unreachable;
|
||||
d.* = .{
|
||||
.name = main.allocator.dupe(u8, path) catch unreachable,
|
||||
.parent = null,
|
||||
.out = .{ .mem = MemDir.init(model.root) },
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
pub fn draw() void {
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (state.threads) |*t| {
|
||||
bytes +|= t.bytes_seen.load(.monotonic);
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
const r = ui.FmtSize.fmt(bytes);
|
||||
std.debug.print("{} files / {s}{s}\n", .{files, r.num(), r.unit});
|
||||
|
||||
for (state.threads, 0..) |*t, i| {
|
||||
const dir = blk: {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
std.debug.print(" #{}: {s}\n", .{i, dir orelse "(waiting)"});
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keyInput(_: i32) void {
|
||||
}
|
||||
|
|
@ -43,6 +43,7 @@ pub fn quit() noreturn {
|
|||
// Also, init() and other ncurses-related functions may have hidden allocation,
|
||||
// no clue if ncurses will consistently report OOM, but we're not handling that
|
||||
// right now.
|
||||
// TODO: Make thread-safe!
|
||||
pub fn oom() void {
|
||||
const haveui = inited;
|
||||
deinit();
|
||||
|
|
|
|||
Loading…
Reference in a new issue