mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-15 10:18:39 -09:00
Improved error reporting + minor cleanup
This commit is contained in:
parent
2390308883
commit
59ef5fd27b
4 changed files with 150 additions and 113 deletions
15
src/main.zig
15
src/main.zig
|
|
@ -184,8 +184,7 @@ fn readExcludeFile(path: []const u8) !void {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Better error reporting
|
pub fn main() void {
|
||||||
pub fn main() !void {
|
|
||||||
// Grab thousands_sep from the current C locale.
|
// Grab thousands_sep from the current C locale.
|
||||||
_ = c.setlocale(c.LC_ALL, "");
|
_ = c.setlocale(c.LC_ALL, "");
|
||||||
if (c.localeconv()) |locale| {
|
if (c.localeconv()) |locale| {
|
||||||
|
|
@ -228,7 +227,7 @@ pub fn main() !void {
|
||||||
else if(opt.is("--exclude")) config.exclude_patterns.append(args.arg()) catch unreachable
|
else if(opt.is("--exclude")) config.exclude_patterns.append(args.arg()) catch unreachable
|
||||||
else if(opt.is("-X") or opt.is("--exclude-from")) {
|
else if(opt.is("-X") or opt.is("--exclude-from")) {
|
||||||
const arg = args.arg();
|
const arg = args.arg();
|
||||||
readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {}.\n", .{ arg, e });
|
readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||||
} else if(opt.is("--exclude-caches")) config.exclude_caches = true
|
} else if(opt.is("--exclude-caches")) config.exclude_caches = true
|
||||||
else if(opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
else if(opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
||||||
else if(opt.is("--confirm-quit")) config.confirm_quit = true
|
else if(opt.is("--confirm-quit")) config.confirm_quit = true
|
||||||
|
|
@ -255,17 +254,19 @@ pub fn main() !void {
|
||||||
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
|
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
|
||||||
config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
|
config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
|
||||||
|
|
||||||
event_delay_timer = try std.time.Timer.start();
|
event_delay_timer = std.time.Timer.start() catch unreachable;
|
||||||
defer ui.deinit();
|
defer ui.deinit();
|
||||||
state = .scan;
|
state = .scan;
|
||||||
|
|
||||||
var out_file = if (export_file) |f| (
|
var out_file = if (export_file) |f| (
|
||||||
if (std.mem.eql(u8, f, "-")) std.io.getStdOut()
|
if (std.mem.eql(u8, f, "-")) std.io.getStdOut()
|
||||||
else try std.fs.cwd().createFileZ(f, .{})
|
else std.fs.cwd().createFileZ(f, .{})
|
||||||
|
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)})
|
||||||
) else null;
|
) else null;
|
||||||
|
|
||||||
try if (import_file) |f| scan.importRoot(f, out_file)
|
if (import_file) |f| scan.importRoot(f, out_file)
|
||||||
else scan.scanRoot(scan_dir orelse ".", out_file);
|
else scan.scanRoot(scan_dir orelse ".", out_file)
|
||||||
|
catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||||
if (out_file != null) return;
|
if (out_file != null) return;
|
||||||
|
|
||||||
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
|
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
|
||||||
|
|
|
||||||
|
|
@ -126,8 +126,8 @@ pub const Entry = packed struct {
|
||||||
add_total = new_hl;
|
add_total = new_hl;
|
||||||
|
|
||||||
} else if (self.link()) |l| {
|
} else if (self.link()) |l| {
|
||||||
const n = HardlinkNode{ .ino = l.ino, .dir = p, .num_files = 1 };
|
const n = devices.HardlinkNode{ .ino = l.ino, .dir = p, .num_files = 1 };
|
||||||
var d = devices.items[dev].hardlinks.getOrPut(n) catch unreachable;
|
var d = devices.list.items[dev].hardlinks.getOrPut(n) catch unreachable;
|
||||||
new_hl = !d.found_existing;
|
new_hl = !d.found_existing;
|
||||||
if (d.found_existing) d.entry.key.num_files += 1;
|
if (d.found_existing) d.entry.key.num_files += 1;
|
||||||
// First time we encounter this file in this dir, count it.
|
// First time we encounter this file in this dir, count it.
|
||||||
|
|
@ -167,7 +167,7 @@ pub const Dir = packed struct {
|
||||||
shared_size: u64,
|
shared_size: u64,
|
||||||
items: u32,
|
items: u32,
|
||||||
|
|
||||||
// Indexes into the global 'devices' array
|
// Indexes into the global 'devices.list' array
|
||||||
dev: DevId,
|
dev: DevId,
|
||||||
|
|
||||||
err: bool,
|
err: bool,
|
||||||
|
|
@ -252,58 +252,64 @@ comptime {
|
||||||
// with the same dev,ino. ncdu provides this list in the info window. Doesn't
|
// with the same dev,ino. ncdu provides this list in the info window. Doesn't
|
||||||
// seem too commonly used, can still be provided by a slow full scan of the
|
// seem too commonly used, can still be provided by a slow full scan of the
|
||||||
// tree.
|
// tree.
|
||||||
|
|
||||||
|
|
||||||
// 20 bytes per hardlink/Dir entry, everything in a single allocation.
|
|
||||||
// (Should really be aligned to 8 bytes and hence take up 24 bytes, but let's see how this works out)
|
|
||||||
//
|
//
|
||||||
// getEntry() allows modification of the key without re-insertion (this is unsafe in the general case, but works fine for modifying num_files)
|
// Problem: A file's st_nlink count may have changed during a scan and hence be
|
||||||
//
|
// inconsistent with other entries for the same file. Not ~too~ common so a
|
||||||
// Potential problem: HashMap uses a 32bit item counter, which may be exceeded in extreme scenarios.
|
// few glitches are fine, but I haven't worked out the impact of this yet.
|
||||||
// (ncdu itself doesn't support more than 31bit-counted files, but this table is hardlink_count*parent_dirs and may grow a bit)
|
|
||||||
|
|
||||||
const HardlinkNode = packed struct {
|
|
||||||
ino: u64,
|
|
||||||
dir: *Dir,
|
|
||||||
num_files: u32,
|
|
||||||
|
|
||||||
const Self = @This();
|
pub const devices = struct {
|
||||||
|
var list: std.ArrayList(Device) = std.ArrayList(Device).init(main.allocator);
|
||||||
|
var lookup: std.AutoHashMap(u64, DevId) = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||||
|
|
||||||
// hash() assumes a struct layout, hence the 'packed struct'
|
// 20 bytes per hardlink/Dir entry, everything in a single allocation.
|
||||||
fn hash(self: Self) u64 { return std.hash.Wyhash.hash(0, @ptrCast([*]const u8, &self)[0..@byteOffsetOf(Self, "dir")+@sizeOf(*Dir)]); }
|
// (Should really be aligned to 8 bytes and hence take up 24 bytes, but let's see how this works out)
|
||||||
fn eql(a: Self, b: Self) bool { return a.ino == b.ino and a.dir == b.dir; }
|
//
|
||||||
};
|
// getEntry() allows modification of the key without re-insertion (this is unsafe in the general case, but works fine for modifying num_files)
|
||||||
|
//
|
||||||
|
// Potential problem: HashMap uses a 32bit item counter, which may be exceeded in extreme scenarios.
|
||||||
|
// (ncdu itself doesn't support more than 31bit-counted files, but this table is hardlink_count*parent_dirs and may grow a bit)
|
||||||
|
|
||||||
// Device entry, this is used for two reasons:
|
const HardlinkNode = packed struct {
|
||||||
// 1. st_dev ids are 64-bit, but in a typical filesystem there's only a few
|
ino: u64,
|
||||||
// unique ids, hence we can save RAM by only storing smaller DevId's in Dir
|
dir: *Dir,
|
||||||
// entries and using that as an index to a lookup table.
|
num_files: u32,
|
||||||
// 2. Keeping track of hardlink counts for each dir and inode, as described above.
|
|
||||||
//
|
const Self = @This();
|
||||||
// (Device entries are never deallocated)
|
|
||||||
const Device = struct {
|
// hash() assumes a struct layout, hence the 'packed struct'
|
||||||
dev: u64,
|
fn hash(self: Self) u64 { return std.hash.Wyhash.hash(0, @ptrCast([*]const u8, &self)[0..@byteOffsetOf(Self, "dir")+@sizeOf(*Dir)]); }
|
||||||
hardlinks: Hardlinks = Hardlinks.init(main.allocator),
|
fn eql(a: Self, b: Self) bool { return a.ino == b.ino and a.dir == b.dir; }
|
||||||
|
};
|
||||||
|
|
||||||
const Hardlinks = std.HashMap(HardlinkNode, void, HardlinkNode.hash, HardlinkNode.eql, 80);
|
const Hardlinks = std.HashMap(HardlinkNode, void, HardlinkNode.hash, HardlinkNode.eql, 80);
|
||||||
};
|
|
||||||
|
|
||||||
var devices: std.ArrayList(Device) = std.ArrayList(Device).init(main.allocator);
|
// Device entry, this is used for two reasons:
|
||||||
var dev_lookup: std.AutoHashMap(u64, DevId) = std.AutoHashMap(u64, DevId).init(main.allocator);
|
// 1. st_dev ids are 64-bit, but in a typical filesystem there's only a few
|
||||||
|
// unique ids, hence we can save RAM by only storing smaller DevId's in Dir
|
||||||
|
// entries and using that as an index to a lookup table.
|
||||||
|
// 2. Keeping track of hardlink counts for each dir and inode, as described above.
|
||||||
|
//
|
||||||
|
// (Device entries are never deallocated)
|
||||||
|
const Device = struct {
|
||||||
|
dev: u64,
|
||||||
|
hardlinks: Hardlinks = Hardlinks.init(main.allocator),
|
||||||
|
};
|
||||||
|
|
||||||
pub fn getDevId(dev: u64) DevId {
|
pub fn getId(dev: u64) DevId {
|
||||||
var d = dev_lookup.getOrPut(dev) catch unreachable;
|
var d = lookup.getOrPut(dev) catch unreachable;
|
||||||
if (!d.found_existing) {
|
if (!d.found_existing) {
|
||||||
errdefer dev_lookup.removeAssertDiscard(dev);
|
errdefer lookup.removeAssertDiscard(dev);
|
||||||
d.entry.value = @intCast(DevId, devices.items.len);
|
d.entry.value = @intCast(DevId, list.items.len);
|
||||||
devices.append(.{ .dev = dev }) catch unreachable;
|
list.append(.{ .dev = dev }) catch unreachable;
|
||||||
|
}
|
||||||
|
return d.entry.value;
|
||||||
}
|
}
|
||||||
return d.entry.value;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn getDev(id: DevId) u64 {
|
pub fn getDev(id: DevId) u64 {
|
||||||
return devices.items[id].dev;
|
return list.items[id].dev;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub var root: *Dir = undefined;
|
pub var root: *Dir = undefined;
|
||||||
|
|
||||||
|
|
|
||||||
134
src/scan.zig
134
src/scan.zig
|
|
@ -131,14 +131,18 @@ const Context = struct {
|
||||||
const Writer = std.io.BufferedWriter(4096, std.fs.File.Writer);
|
const Writer = std.io.BufferedWriter(4096, std.fs.File.Writer);
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
fn initFile(out: std.fs.File) !Self {
|
fn writeErr(e: anyerror) noreturn {
|
||||||
|
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn initFile(out: std.fs.File) Self {
|
||||||
var buf = main.allocator.create(Writer) catch unreachable;
|
var buf = main.allocator.create(Writer) catch unreachable;
|
||||||
errdefer main.allocator.destroy(buf);
|
errdefer main.allocator.destroy(buf);
|
||||||
buf.* = std.io.bufferedWriter(out.writer());
|
buf.* = std.io.bufferedWriter(out.writer());
|
||||||
var wr = buf.writer();
|
var wr = buf.writer();
|
||||||
try wr.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
wr.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":") catch |e| writeErr(e);
|
||||||
try wr.print("{d}", .{std.time.timestamp()});
|
wr.print("{d}", .{std.time.timestamp()}) catch |e| writeErr(e);
|
||||||
try wr.writeByte('}');
|
wr.writeByte('}') catch |e| writeErr(e);
|
||||||
return Self{ .wr = buf };
|
return Self{ .wr = buf };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -146,10 +150,10 @@ const Context = struct {
|
||||||
return Self{ .parents = model.Parents{} };
|
return Self{ .parents = model.Parents{} };
|
||||||
}
|
}
|
||||||
|
|
||||||
fn final(self: *Self) !void {
|
fn final(self: *Self) void {
|
||||||
if (self.wr) |wr| {
|
if (self.wr) |wr| {
|
||||||
try wr.writer().writeByte(']');
|
wr.writer().writeByte(']') catch |e| writeErr(e);
|
||||||
try wr.flush();
|
wr.flush() catch |e| writeErr(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -171,7 +175,7 @@ const Context = struct {
|
||||||
|
|
||||||
if (self.stat.dir) {
|
if (self.stat.dir) {
|
||||||
if (self.parents) |*p| if (p.top() != model.root) p.pop();
|
if (self.parents) |*p| if (p.top() != model.root) p.pop();
|
||||||
if (self.wr) |w| w.writer().writeByte(']') catch ui.die("Error writing to file.", .{});
|
if (self.wr) |w| w.writer().writeByte(']') catch |e| writeErr(e);
|
||||||
} else
|
} else
|
||||||
self.stat.dir = true; // repeated popPath()s mean we're closing parent dirs.
|
self.stat.dir = true; // repeated popPath()s mean we're closing parent dirs.
|
||||||
}
|
}
|
||||||
|
|
@ -188,9 +192,24 @@ const Context = struct {
|
||||||
|
|
||||||
const Special = enum { err, other_fs, kernfs, excluded };
|
const Special = enum { err, other_fs, kernfs, excluded };
|
||||||
|
|
||||||
|
fn writeSpecial(self: *Self, w: anytype, t: Special) !void {
|
||||||
|
try w.writeAll(",\n");
|
||||||
|
if (self.stat.dir) try w.writeByte('[');
|
||||||
|
try w.writeAll("{\"name\":");
|
||||||
|
try writeJsonString(w, self.name);
|
||||||
|
switch (t) {
|
||||||
|
.err => try w.writeAll(",\"read_error\":true"),
|
||||||
|
.other_fs => try w.writeAll(",\"excluded\":\"othfs\""),
|
||||||
|
.kernfs => try w.writeAll(",\"excluded\":\"kernfs\""),
|
||||||
|
.excluded => try w.writeAll(",\"excluded\":\"pattern\""),
|
||||||
|
}
|
||||||
|
try w.writeByte('}');
|
||||||
|
if (self.stat.dir) try w.writeByte(']');
|
||||||
|
}
|
||||||
|
|
||||||
// Insert the current path as a special entry (i.e. a file/dir that is not counted)
|
// Insert the current path as a special entry (i.e. a file/dir that is not counted)
|
||||||
// Ignores self.stat except for the 'dir' option.
|
// Ignores self.stat except for the 'dir' option.
|
||||||
fn addSpecial(self: *Self, t: Special) !void {
|
fn addSpecial(self: *Self, t: Special) void {
|
||||||
std.debug.assert(self.items_seen > 0); // root item can't be a special
|
std.debug.assert(self.items_seen > 0); // root item can't be a special
|
||||||
|
|
||||||
if (t == .err) {
|
if (t == .err) {
|
||||||
|
|
@ -209,26 +228,30 @@ const Context = struct {
|
||||||
.excluded => f.excluded = true,
|
.excluded => f.excluded = true,
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (self.wr) |wr| {
|
} else if (self.wr) |wr|
|
||||||
var w = wr.writer();
|
self.writeSpecial(wr.writer(), t) catch |e| writeErr(e);
|
||||||
try w.writeAll(",\n");
|
|
||||||
if (self.stat.dir) try w.writeByte('[');
|
|
||||||
try w.writeAll("{\"name\":");
|
|
||||||
try writeJsonString(w, self.name);
|
|
||||||
switch (t) {
|
|
||||||
.err => try w.writeAll(",\"read_error\":true"),
|
|
||||||
.other_fs => try w.writeAll(",\"excluded\":\"othfs\""),
|
|
||||||
.kernfs => try w.writeAll(",\"excluded\":\"kernfs\""),
|
|
||||||
.excluded => try w.writeAll(",\"excluded\":\"pattern\""),
|
|
||||||
}
|
|
||||||
try w.writeByte('}');
|
|
||||||
if (self.stat.dir) try w.writeByte(']');
|
|
||||||
}
|
|
||||||
self.items_seen += 1;
|
self.items_seen += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn writeStat(self: *Self, w: anytype, dir_dev: u64) !void {
|
||||||
|
try w.writeAll(",\n");
|
||||||
|
if (self.stat.dir) try w.writeByte('[');
|
||||||
|
try w.writeAll("{\"name\":");
|
||||||
|
try writeJsonString(w, self.name);
|
||||||
|
if (self.stat.size > 0) try w.print(",\"asize\":{d}", .{ self.stat.size });
|
||||||
|
if (self.stat.blocks > 0) try w.print(",\"dsize\":{d}", .{ blocksToSize(self.stat.blocks) });
|
||||||
|
if (self.stat.dir and self.stat.dev != dir_dev) try w.print(",\"dev\":{d}", .{ self.stat.dev });
|
||||||
|
if (self.stat.hlinkc) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
|
||||||
|
if (!self.stat.dir and !self.stat.reg) try w.writeAll(",\"notreg\":true");
|
||||||
|
if (main.config.extended)
|
||||||
|
try w.print(",\"uid\":{d},\"gid\":{d},\"mode\":{d},\"mtime\":{d}",
|
||||||
|
.{ self.stat.ext.uid, self.stat.ext.gid, self.stat.ext.mode, self.stat.ext.mtime });
|
||||||
|
try w.writeByte('}');
|
||||||
|
}
|
||||||
|
|
||||||
// Insert current path as a counted file/dir/hardlink, with information from self.stat
|
// Insert current path as a counted file/dir/hardlink, with information from self.stat
|
||||||
fn addStat(self: *Self, dir_dev: u64) !void {
|
fn addStat(self: *Self, dir_dev: u64) void {
|
||||||
if (self.parents) |*p| {
|
if (self.parents) |*p| {
|
||||||
const etype = if (self.stat.dir) model.EType.dir
|
const etype = if (self.stat.dir) model.EType.dir
|
||||||
else if (self.stat.hlinkc) model.EType.link
|
else if (self.stat.hlinkc) model.EType.link
|
||||||
|
|
@ -236,7 +259,7 @@ const Context = struct {
|
||||||
var e = model.Entry.create(etype, main.config.extended, self.name);
|
var e = model.Entry.create(etype, main.config.extended, self.name);
|
||||||
e.blocks = self.stat.blocks;
|
e.blocks = self.stat.blocks;
|
||||||
e.size = self.stat.size;
|
e.size = self.stat.size;
|
||||||
if (e.dir()) |d| d.dev = model.getDevId(self.stat.dev);
|
if (e.dir()) |d| d.dev = model.devices.getId(self.stat.dev);
|
||||||
if (e.file()) |f| f.notreg = !self.stat.dir and !self.stat.reg;
|
if (e.file()) |f| f.notreg = !self.stat.dir and !self.stat.reg;
|
||||||
// TODO: Handle the scenario where we don't know the hard link count
|
// TODO: Handle the scenario where we don't know the hard link count
|
||||||
// (i.e. on imports from old ncdu versions that don't have the "nlink" field)
|
// (i.e. on imports from old ncdu versions that don't have the "nlink" field)
|
||||||
|
|
@ -253,22 +276,8 @@ const Context = struct {
|
||||||
if (e.dir()) |d| p.push(d); // Enter the directory
|
if (e.dir()) |d| p.push(d); // Enter the directory
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (self.wr) |wr| {
|
} else if (self.wr) |wr|
|
||||||
var w = wr.writer();
|
self.writeStat(wr.writer(), dir_dev) catch |e| writeErr(e);
|
||||||
try w.writeAll(",\n");
|
|
||||||
if (self.stat.dir) try w.writeByte('[');
|
|
||||||
try w.writeAll("{\"name\":");
|
|
||||||
try writeJsonString(w, self.name);
|
|
||||||
if (self.stat.size > 0) try w.print(",\"asize\":{d}", .{ self.stat.size });
|
|
||||||
if (self.stat.blocks > 0) try w.print(",\"dsize\":{d}", .{ blocksToSize(self.stat.blocks) });
|
|
||||||
if (self.stat.dir and self.stat.dev != dir_dev) try w.print(",\"dev\":{d}", .{ self.stat.dev });
|
|
||||||
if (self.stat.hlinkc) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
|
|
||||||
if (!self.stat.dir and !self.stat.reg) try w.writeAll(",\"notreg\":true");
|
|
||||||
if (main.config.extended)
|
|
||||||
try w.print(",\"uid\":{d},\"gid\":{d},\"mode\":{d},\"mtime\":{d}",
|
|
||||||
.{ self.stat.ext.uid, self.stat.ext.gid, self.stat.ext.mode, self.stat.ext.mtime });
|
|
||||||
try w.writeByte('}');
|
|
||||||
}
|
|
||||||
|
|
||||||
self.items_seen += 1;
|
self.items_seen += 1;
|
||||||
}
|
}
|
||||||
|
|
@ -286,7 +295,7 @@ const Context = struct {
|
||||||
var active_context: ?*Context = null;
|
var active_context: ?*Context = null;
|
||||||
|
|
||||||
// Read and index entries of the given dir.
|
// Read and index entries of the given dir.
|
||||||
fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Error!void {
|
fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) void {
|
||||||
// XXX: The iterator allocates 8k+ bytes on the stack, may want to do heap allocation here?
|
// XXX: The iterator allocates 8k+ bytes on the stack, may want to do heap allocation here?
|
||||||
var it = dir.iterate();
|
var it = dir.iterate();
|
||||||
while(true) {
|
while(true) {
|
||||||
|
|
@ -313,17 +322,17 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
|
||||||
break :blk false;
|
break :blk false;
|
||||||
};
|
};
|
||||||
if (excluded) {
|
if (excluded) {
|
||||||
try ctx.addSpecial(.excluded);
|
ctx.addSpecial(.excluded);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.stat = Stat.read(dir, ctx.name, false) catch {
|
ctx.stat = Stat.read(dir, ctx.name, false) catch {
|
||||||
try ctx.addSpecial(.err);
|
ctx.addSpecial(.err);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (main.config.same_fs and ctx.stat.dev != dir_dev) {
|
if (main.config.same_fs and ctx.stat.dev != dir_dev) {
|
||||||
try ctx.addSpecial(.other_fs);
|
ctx.addSpecial(.other_fs);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -341,13 +350,13 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
|
||||||
|
|
||||||
var edir =
|
var edir =
|
||||||
if (ctx.stat.dir) dir.openDirZ(ctx.name, .{ .access_sub_paths = true, .iterate = true, .no_follow = true }) catch {
|
if (ctx.stat.dir) dir.openDirZ(ctx.name, .{ .access_sub_paths = true, .iterate = true, .no_follow = true }) catch {
|
||||||
try ctx.addSpecial(.err);
|
ctx.addSpecial(.err);
|
||||||
continue;
|
continue;
|
||||||
} else null;
|
} else null;
|
||||||
defer if (edir != null) edir.?.close();
|
defer if (edir != null) edir.?.close();
|
||||||
|
|
||||||
if (std.builtin.os.tag == .linux and main.config.exclude_kernfs and ctx.stat.dir and isKernfs(edir.?, ctx.stat.dev)) {
|
if (std.builtin.os.tag == .linux and main.config.exclude_kernfs and ctx.stat.dir and isKernfs(edir.?, ctx.stat.dev)) {
|
||||||
try ctx.addSpecial(.kernfs);
|
ctx.addSpecial(.kernfs);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -357,20 +366,20 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
|
||||||
var buf: [sig.len]u8 = undefined;
|
var buf: [sig.len]u8 = undefined;
|
||||||
if (f.reader().readAll(&buf)) |len| {
|
if (f.reader().readAll(&buf)) |len| {
|
||||||
if (len == sig.len and std.mem.eql(u8, &buf, sig)) {
|
if (len == sig.len and std.mem.eql(u8, &buf, sig)) {
|
||||||
try ctx.addSpecial(.excluded);
|
ctx.addSpecial(.excluded);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
|
|
||||||
try ctx.addStat(dir_dev);
|
ctx.addStat(dir_dev);
|
||||||
if (ctx.stat.dir) try scanDir(ctx, edir.?, ctx.stat.dev);
|
if (ctx.stat.dir) scanDir(ctx, edir.?, ctx.stat.dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
|
pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
|
||||||
var ctx = if (out) |f| try Context.initFile(f) else Context.initMem();
|
var ctx = if (out) |f| Context.initFile(f) else Context.initMem();
|
||||||
active_context = &ctx;
|
active_context = &ctx;
|
||||||
defer active_context = null;
|
defer active_context = null;
|
||||||
defer ctx.deinit();
|
defer ctx.deinit();
|
||||||
|
|
@ -380,14 +389,14 @@ pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
|
||||||
ctx.pushPath(full_path orelse path);
|
ctx.pushPath(full_path orelse path);
|
||||||
|
|
||||||
ctx.stat = try Stat.read(std.fs.cwd(), ctx.pathZ(), true);
|
ctx.stat = try Stat.read(std.fs.cwd(), ctx.pathZ(), true);
|
||||||
if (!ctx.stat.dir) return error.NotADirectory;
|
if (!ctx.stat.dir) return error.NotDir;
|
||||||
try ctx.addStat(0);
|
ctx.addStat(0);
|
||||||
|
|
||||||
var dir = try std.fs.cwd().openDirZ(ctx.pathZ(), .{ .access_sub_paths = true, .iterate = true });
|
var dir = try std.fs.cwd().openDirZ(ctx.pathZ(), .{ .access_sub_paths = true, .iterate = true });
|
||||||
defer dir.close();
|
defer dir.close();
|
||||||
try scanDir(&ctx, dir, ctx.stat.dev);
|
scanDir(&ctx, dir, ctx.stat.dev);
|
||||||
ctx.popPath();
|
ctx.popPath();
|
||||||
try ctx.final();
|
ctx.final();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Using a custom recursive descent JSON parser here. std.json is great, but
|
// Using a custom recursive descent JSON parser here. std.json is great, but
|
||||||
|
|
@ -704,8 +713,8 @@ const Import = struct {
|
||||||
}
|
}
|
||||||
if (name) |n| self.ctx.pushPath(n)
|
if (name) |n| self.ctx.pushPath(n)
|
||||||
else self.die("missing \"name\" field");
|
else self.die("missing \"name\" field");
|
||||||
if (special) |s| self.ctx.addSpecial(s) catch unreachable
|
if (special) |s| self.ctx.addSpecial(s)
|
||||||
else self.ctx.addStat(dir_dev) catch unreachable;
|
else self.ctx.addStat(dir_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn item(self: *Self, dev: u64) void {
|
fn item(self: *Self, dev: u64) void {
|
||||||
|
|
@ -771,20 +780,21 @@ const Import = struct {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn importRoot(path: [:0]const u8, out: ?std.fs.File) !void {
|
pub fn importRoot(path: [:0]const u8, out: ?std.fs.File) void {
|
||||||
var fd = if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
|
var fd = if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
|
||||||
else try std.fs.cwd().openFileZ(path, .{});
|
else std.fs.cwd().openFileZ(path, .{})
|
||||||
|
catch |e| ui.die("Error reading file: {s}.\n", .{ui.errorString(e)});
|
||||||
defer fd.close();
|
defer fd.close();
|
||||||
|
|
||||||
var imp = Import{
|
var imp = Import{
|
||||||
.ctx = if (out) |f| try Context.initFile(f) else Context.initMem(),
|
.ctx = if (out) |f| Context.initFile(f) else Context.initMem(),
|
||||||
.rd = std.io.bufferedReader(fd.reader()),
|
.rd = std.io.bufferedReader(fd.reader()),
|
||||||
};
|
};
|
||||||
active_context = &imp.ctx;
|
active_context = &imp.ctx;
|
||||||
defer active_context = null;
|
defer active_context = null;
|
||||||
defer imp.ctx.deinit();
|
defer imp.ctx.deinit();
|
||||||
imp.root();
|
imp.root();
|
||||||
try imp.ctx.final();
|
imp.ctx.final();
|
||||||
}
|
}
|
||||||
|
|
||||||
var animation_pos: u32 = 0;
|
var animation_pos: u32 = 0;
|
||||||
|
|
|
||||||
20
src/ui.zig
20
src/ui.zig
|
|
@ -47,6 +47,26 @@ pub fn oom() void {
|
||||||
init();
|
init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lazy strerror() for Zig file I/O, not complete.
|
||||||
|
// (Would be nicer if Zig just exposed errno so I could call strerror() directly)
|
||||||
|
pub fn errorString(e: anyerror) []const u8 {
|
||||||
|
return switch (e) {
|
||||||
|
error.DiskQuota => "Disk quota exceeded",
|
||||||
|
error.FileTooBig => "File too big",
|
||||||
|
error.InputOutput => "I/O error",
|
||||||
|
error.NoSpaceLeft => "No space left on device",
|
||||||
|
error.AccessDenied => "Access denied",
|
||||||
|
error.SymlinkLoop => "Symlink loop",
|
||||||
|
error.ProcessFdQuotaExceeded => "Process file descriptor limit exceeded",
|
||||||
|
error.SystemFdQuotaExceeded => "System file descriptor limit exceeded",
|
||||||
|
error.NameTooLong => "Filename too long",
|
||||||
|
error.FileNotFound => "No such file or directory",
|
||||||
|
error.IsDir => "Is a directory",
|
||||||
|
error.NotDir => "Not a directory",
|
||||||
|
else => "Unknown error", // rather useless :(
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
|
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
|
||||||
|
|
||||||
fn toUtf8BadChar(ch: u8) bool {
|
fn toUtf8BadChar(ch: u8) bool {
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue