mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-13 17:28:40 -09:00
Compare commits
157 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b3d0a670e | ||
|
|
f452244576 | ||
|
|
14bb8d0dd1 | ||
|
|
19cfdcf543 | ||
|
|
5129de737e | ||
|
|
68671a1af1 | ||
|
|
74c91768a0 | ||
|
|
ac4d689e22 | ||
|
|
66b875eb00 | ||
|
|
67f34090fb | ||
|
|
5b96a48f53 | ||
|
|
58e6458130 | ||
|
|
653c3bfe70 | ||
|
|
beac59fb12 | ||
|
|
d97a7f73dd | ||
|
|
35a9faadb2 | ||
|
|
e43d22ba3f | ||
|
|
f4e4694612 | ||
|
|
c9f3d39d3e | ||
|
|
2b4c1ca03e | ||
|
|
af7163acf6 | ||
|
|
5438312440 | ||
|
|
0918096301 | ||
|
|
ee1d80da6a | ||
|
|
93a81a3898 | ||
|
|
cf3a8f3043 | ||
|
|
f7fe61194b | ||
|
|
456cde16df | ||
|
|
3c77dc458a | ||
|
|
ce9921846c | ||
|
|
e0ab5d40c7 | ||
|
|
607b07a30e | ||
|
|
b4dc9f1d4d | ||
|
|
2e5c767d4c | ||
|
|
5d5182ede3 | ||
|
|
db96bc698c | ||
|
|
4873a7c765 | ||
|
|
49d43f89a1 | ||
|
|
e5a6a1c5ea | ||
|
|
5593fa2233 | ||
|
|
9d51df02c1 | ||
|
|
7ed209a8e5 | ||
|
|
4bd6e3daba | ||
|
|
2fcd7f370c | ||
|
|
232a4f8741 | ||
|
|
bdc730f1e5 | ||
|
|
df5845baad | ||
|
|
0e6967498f | ||
|
|
bd442673d2 | ||
|
|
28d9eaecab | ||
|
|
61d7fc8473 | ||
|
|
e142d012f0 | ||
|
|
39517c01a8 | ||
|
|
cc26ead5f8 | ||
|
|
ca46c7241f | ||
|
|
e324804cdd | ||
|
|
26229d7a63 | ||
|
|
4ef9c3e817 | ||
|
|
c30699f93b | ||
|
|
6b7983b2f5 | ||
|
|
9418079da3 | ||
|
|
18f322c532 | ||
|
|
252f7fc253 | ||
|
|
49ef7cc34e | ||
|
|
17e384b485 | ||
|
|
ad166de925 | ||
|
|
22dca22450 | ||
|
|
30d6ddf149 | ||
|
|
8fb2290d5e | ||
|
|
90b43755b8 | ||
|
|
8ad61e87c1 | ||
|
|
85e12beb1c | ||
|
|
025e5ee99e | ||
|
|
cd00ae50d1 | ||
|
|
5a0c8c6175 | ||
|
|
ebaa9b6a89 | ||
|
|
f25bc5cbf4 | ||
|
|
87d336baeb | ||
|
|
0a6bcee32b | ||
|
|
3c055810d0 | ||
|
|
f6bffa40c7 | ||
|
|
08d373881c | ||
|
|
dc42c91619 | ||
|
|
2b2b4473e5 | ||
|
|
9cbe1bc91f | ||
|
|
f28f69d831 | ||
|
|
a5e57ee5ad | ||
|
|
b0d4fbe94f | ||
|
|
99f92934c6 | ||
|
|
9b517f27b1 | ||
|
|
705bd8907d | ||
|
|
e5508ba9b4 | ||
|
|
6bb31a4653 | ||
|
|
7558fd7f8e | ||
|
|
1e56c8604e | ||
|
|
d2e8dd8a90 | ||
|
|
ddbed8b07f | ||
|
|
db51987446 | ||
|
|
cc12c90dbc | ||
|
|
f2541d42ba | ||
|
|
c41467f240 | ||
|
|
2f97601736 | ||
|
|
574a4348a3 | ||
|
|
0215f3569d | ||
|
|
f4f4af4ee5 | ||
|
|
6db150cc98 | ||
|
|
a4484f27f3 | ||
|
|
d0d064aaf9 | ||
|
|
0e54ca775c | ||
|
|
d60bcb2113 | ||
|
|
e1818430b7 | ||
|
|
29bbab64b3 | ||
|
|
5944b738d0 | ||
|
|
946d2a0316 | ||
|
|
8ce5bae872 | ||
|
|
c41e3f5828 | ||
|
|
1fa40ae498 | ||
|
|
f03eee5443 | ||
|
|
491988d9a5 | ||
|
|
a2eb84e7d3 | ||
|
|
c83159f076 | ||
|
|
115de253a8 | ||
|
|
a71bc6eca5 | ||
|
|
ec99218645 | ||
|
|
83d3630ca7 | ||
|
|
ab6dc5be75 | ||
|
|
0d99781c67 | ||
|
|
e6cfacfa06 | ||
|
|
74be277249 | ||
|
|
46b88bcb5c | ||
|
|
ca1f293310 | ||
|
|
07a13d9c73 | ||
|
|
54d50e0443 | ||
|
|
ec233ff33a | ||
|
|
c002d9fa92 | ||
|
|
cebaaf0972 | ||
|
|
4d124c7c3d | ||
|
|
890e5a4af7 | ||
|
|
91281ef11f | ||
|
|
1452b91032 | ||
|
|
f7e774ee6e | ||
|
|
f37362af36 | ||
|
|
0d16b9f33e | ||
|
|
34dafffc62 | ||
|
|
1548f9276f | ||
|
|
d6728bca95 | ||
|
|
d523a77fdc | ||
|
|
f0764ea24e | ||
|
|
058b26bf9a | ||
|
|
e6806059e6 | ||
|
|
bb98939e24 | ||
|
|
0fc14173f2 | ||
|
|
2e4f0f0bce | ||
|
|
5f383966a9 | ||
|
|
3942722eba | ||
|
|
1a3de55e68 | ||
|
|
1f46dacf12 |
24 changed files with 5343 additions and 2477 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -1,10 +1,11 @@
|
|||
# SPDX-FileCopyrightText: 2021 Yoran Heling <projects@yorhel.nl>
|
||||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
*.swp
|
||||
*~
|
||||
ncdu.1
|
||||
ncurses
|
||||
zstd
|
||||
static-*/
|
||||
zig-cache/
|
||||
zig-out/
|
||||
.zig-cache/
|
||||
|
|
|
|||
106
ChangeLog
106
ChangeLog
|
|
@ -1,6 +1,110 @@
|
|||
# SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
2.9.2 - 2025-10-24
|
||||
- Still requires Zig 0.14 or 0.15
|
||||
- Fix hang on loading config file when compiled with Zig 0.15.2
|
||||
|
||||
2.9.1 - 2025-08-21
|
||||
- Add support for building with Zig 0.15
|
||||
- Zig 0.14 is still supported
|
||||
|
||||
2.9 - 2025-08-16
|
||||
- Still requires Zig 0.14
|
||||
- Add --delete-command option to replace the built-in file deletion
|
||||
- Move term cursor to selected option in delete confirmation window
|
||||
- Support binary import on older Linux kernels lacking statx() (may break
|
||||
again in the future, Zig does not officially support such old kernels)
|
||||
|
||||
2.8.2 - 2025-05-01
|
||||
- Still requires Zig 0.14
|
||||
- Fix a build error on MacOS
|
||||
|
||||
2.8.1 - 2025-04-28
|
||||
- Still requires Zig 0.14
|
||||
- Fix integer overflow in binary export
|
||||
- Fix crash when `fstatat()` returns EINVAL
|
||||
- Minor build system improvements
|
||||
|
||||
2.8 - 2025-03-05
|
||||
- Now requires Zig 0.14
|
||||
- Add support for @-prefixed lines to ignore errors in config file
|
||||
- List all supported options in `--help`
|
||||
- Use `kB` instead of `KB` in `--si` mode
|
||||
|
||||
2.7 - 2024-11-19
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Support transparent reading/writing of zstandard-compressed JSON
|
||||
- Add `--compress` and `--export-block-size` options
|
||||
- Perform tilde expansion on paths in the config file
|
||||
- Fix JSON import of escaped UTF-16 surrogate pairs
|
||||
- Fix incorrect field in root item when exporting to the binary format
|
||||
- Add -Dstrip build flag
|
||||
|
||||
2.6 - 2024-09-27
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add dependency on libzstd
|
||||
- Add new export format to support threaded export and low-memory browsing
|
||||
- Add `-O` and `--compress-level` CLI flags
|
||||
- Add progress indicator to hardlink counting stage
|
||||
- Fix displaying and exporting zero values when extended info is not available
|
||||
- Fix clearing screen in some error cases
|
||||
- Fix uncommon edge case in hardlink counting on refresh
|
||||
- Use integer math instead of floating point to format numbers
|
||||
|
||||
2.5 - 2024-07-24
|
||||
- Still requires Zig 0.12 or 0.13
|
||||
- Add parallel scanning with `-t,--threads` CLI flags
|
||||
- Improve JSON export and import performance
|
||||
- `--exclude-kernfs` is no longer checked on the top-level scan path
|
||||
- Fix entries sometimes not showing up after refresh
|
||||
- Fix file descriptor leak with `--exclude-caches` checking
|
||||
- Fix possible crash on invalid UTF8 when scanning in `-1` UI mode
|
||||
- Fix JSON export and import of the "other filesystem" flag
|
||||
- Fix JSON import containing directories with a read error
|
||||
- Fix mtime display of 'special' files
|
||||
- Fix edge case bad performance when deleting hardlinks with many links
|
||||
- Increased memory use for hardlinks (by ~10% in extreme cases, sorry)
|
||||
|
||||
2.4 - 2024-04-21
|
||||
- Now requires Zig 0.12
|
||||
- Revert default color scheme back to 'off'
|
||||
- Rewrite man page in mdoc, drop pod2man dependency
|
||||
- Fix updating parent dir error status on refresh
|
||||
|
||||
2.3 - 2023-08-04
|
||||
- Now requires Zig 0.11
|
||||
- Add --(enable|disable)-natsort options
|
||||
- Add indicator to apparent size/disk usage selection in the footer
|
||||
- Fix build on armv7l (hopefully)
|
||||
- Minor build system additions
|
||||
|
||||
2.2.2 - 2023-01-19
|
||||
- Now requires Zig 0.10 or 0.10.1
|
||||
- That's it, pretty much.
|
||||
|
||||
2.2.1 - 2022-10-25
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix bug with 'dark' and 'off' color themes on FreeBSD and MacOS
|
||||
|
||||
2.2 - 2022-10-17
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- (breaking) Wildcards in exclude patterns don't cross directory boundary anymore
|
||||
- Improve exclude pattern matching performance
|
||||
- Set full background in default dark-bg color scheme
|
||||
- Fix broken JSON export when a filename contains control characters below 0x10
|
||||
|
||||
2.1.2 - 2022-04-28
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix possible crash on shortening file names with unicode variation
|
||||
selectors or combining marks
|
||||
|
||||
2.1.1 - 2022-03-25
|
||||
- Still requires Zig 0.9.0 or 0.9.1
|
||||
- Fix potential crash when refreshing
|
||||
- Fix typo in --graph-style=eighth-block
|
||||
- Revert default --graph-style to hash characters
|
||||
|
||||
2.1 - 2022-02-07
|
||||
- Still requires Zig 0.9.0
|
||||
- Use natural sort order when sorting by file name
|
||||
|
|
|
|||
72
Makefile
72
Makefile
|
|
@ -1,40 +1,37 @@
|
|||
# SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Optional semi-standard Makefile with some handy tools.
|
||||
# Ncdu itself can be built with just the zig build system.
|
||||
|
||||
ZIG ?= zig
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man/man1
|
||||
ZIG_FLAGS ?= -Drelease-fast
|
||||
ZIG_FLAGS ?= --release=fast -Dstrip
|
||||
|
||||
NCDU_VERSION=$(shell grep 'program_version = "' src/main.zig | sed -e 's/^.*"\(.\+\)".*$$/\1/')
|
||||
|
||||
.PHONY: build test
|
||||
build: release
|
||||
|
||||
release:
|
||||
zig build ${ZIG_FLAGS}
|
||||
$(ZIG) build ${ZIG_FLAGS}
|
||||
|
||||
debug:
|
||||
zig build
|
||||
$(ZIG) build
|
||||
|
||||
clean:
|
||||
rm -rf zig-cache zig-out
|
||||
|
||||
distclean: clean
|
||||
rm -f ncdu.1
|
||||
|
||||
doc: ncdu.1
|
||||
|
||||
ncdu.1: ncdu.pod src/main.zig
|
||||
pod2man --center "ncdu manual" --release "ncdu-${NCDU_VERSION}" ncdu.pod >ncdu.1
|
||||
|
||||
install: install-bin install-doc
|
||||
|
||||
install-bin: release
|
||||
mkdir -p ${BINDIR}
|
||||
install -m0755 zig-out/bin/ncdu ${BINDIR}/
|
||||
|
||||
install-doc: doc
|
||||
install-doc:
|
||||
mkdir -p ${MANDIR}
|
||||
install -m0644 ncdu.1 ${MANDIR}/
|
||||
|
||||
|
|
@ -47,43 +44,57 @@ uninstall-bin:
|
|||
uninstall-doc:
|
||||
rm -f ${MANDIR}/ncdu.1
|
||||
|
||||
dist: doc
|
||||
dist:
|
||||
rm -f ncdu-${NCDU_VERSION}.tar.gz
|
||||
mkdir ncdu-${NCDU_VERSION}
|
||||
for f in ncdu.1 `git ls-files | grep -v ^\.gitignore`; do mkdir -p ncdu-${NCDU_VERSION}/`dirname $$f`; ln -s "`pwd`/$$f" ncdu-${NCDU_VERSION}/$$f; done
|
||||
for f in `git ls-files | grep -v ^\.gitignore`; do mkdir -p ncdu-${NCDU_VERSION}/`dirname $$f`; ln -s "`pwd`/$$f" ncdu-${NCDU_VERSION}/$$f; done
|
||||
tar -cophzf ncdu-${NCDU_VERSION}.tar.gz --sort=name ncdu-${NCDU_VERSION}
|
||||
rm -rf ncdu-${NCDU_VERSION}
|
||||
|
||||
|
||||
# ASSUMPTION: the ncurses source tree has been extracted into ncurses/
|
||||
# ASSUMPTION:
|
||||
# - the ncurses source tree has been extracted into ncurses/
|
||||
# - the zstd source tree has been extracted into zstd/
|
||||
# Would be nicer to do all this with the Zig build system, but no way am I
|
||||
# going to write build.zig's for these projects.
|
||||
static-%.tar.gz:
|
||||
mkdir -p static-$*/nc static-$*/inst/pkg
|
||||
cp -R zstd/lib static-$*/zstd
|
||||
make -C static-$*/zstd -j8 libzstd.a V=1\
|
||||
ZSTD_LIB_DICTBUILDER=0\
|
||||
ZSTD_LIB_MINIFY=1\
|
||||
ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1\
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"
|
||||
cd static-$*/nc && ../../ncurses/configure --prefix="`pwd`/../inst"\
|
||||
--with-pkg-config-libdir="`pwd`/../inst/pkg"\
|
||||
--without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs\
|
||||
--without-tests --enable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||
--without-tests --disable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||
--without-gpm --without-sysmouse --enable-widec --with-default-terminfo-dir=/usr/share/terminfo\
|
||||
--with-terminfo-dirs=/usr/share/terminfo:/lib/terminfo:/usr/local/share/terminfo\
|
||||
--with-fallbacks="screen linux vt100 xterm xterm-256color" --host=$*\
|
||||
CC="zig cc --target=$*"\
|
||||
LD="zig cc --target=$*"\
|
||||
AR="zig ar" RANLIB="zig ranlib"\
|
||||
CPPFLAGS=-D_GNU_SOURCE && make && make install.libs
|
||||
CC="${ZIG} cc --target=$*"\
|
||||
LD="${ZIG} cc --target=$*"\
|
||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"\
|
||||
CPPFLAGS=-D_GNU_SOURCE && make -j8
|
||||
@# zig-build - cleaner approach but doesn't work, results in a dynamically linked binary.
|
||||
@#cd static-$* && PKG_CONFIG_LIBDIR="`pwd`/inst/pkg" zig build -Dtarget=$*
|
||||
@# --build-file ../build.zig --search-prefix inst/ --cache-dir zig -Drelease-fast=true
|
||||
@# Alternative approach, bypassing zig-build
|
||||
cd static-$* && zig build-exe -target $*\
|
||||
-Iinst/include -Iinst/include/ncursesw -lc inst/lib/libncursesw.a\
|
||||
--cache-dir zig-cache -static --strip -O ReleaseFast ../src/main.zig ../src/ncurses_refs.c
|
||||
cd static-$* && ${ZIG} build-exe -target $*\
|
||||
-Inc/include -Izstd -lc nc/lib/libncursesw.a zstd/libzstd.a\
|
||||
--cache-dir zig-cache -static -fstrip -O ReleaseFast ../src/main.zig
|
||||
@# My system's strip can't deal with arm binaries and zig doesn't wrap a strip alternative.
|
||||
@# Whatever, just let it error for those.
|
||||
strip -R .eh_frame -R .eh_frame_hdr static-$*/main || true
|
||||
cd static-$* && mv main ncdu && tar -czf ../static-$*.tar.gz ncdu
|
||||
rm -rf static-$*
|
||||
|
||||
static-linux-x86_64: static-x86_64-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-x86_64.tar.gz
|
||||
|
||||
static-linux-i386: static-i386-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-i386.tar.gz
|
||||
static-linux-x86: static-x86-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-x86.tar.gz
|
||||
|
||||
static-linux-aarch64: static-aarch64-linux-musl.tar.gz
|
||||
mv $< ncdu-${NCDU_VERSION}-linux-aarch64.tar.gz
|
||||
|
|
@ -93,6 +104,11 @@ static-linux-arm: static-arm-linux-musleabi.tar.gz
|
|||
|
||||
static:\
|
||||
static-linux-x86_64 \
|
||||
static-linux-i386 \
|
||||
static-linux-x86 \
|
||||
static-linux-aarch64 \
|
||||
static-linux-arm
|
||||
|
||||
test:
|
||||
zig build test
|
||||
mandoc -T lint ncdu.1
|
||||
reuse lint
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
<!--
|
||||
SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
SPDX-License-Identifier: MIT
|
||||
-->
|
||||
|
||||
|
|
@ -19,9 +19,10 @@ C version (1.x).
|
|||
|
||||
## Requirements
|
||||
|
||||
- Zig 0.9.0
|
||||
- Zig 0.14 or 0.15
|
||||
- Some sort of POSIX-like OS
|
||||
- ncurses libraries and header files
|
||||
- ncurses
|
||||
- libzstd
|
||||
|
||||
## Install
|
||||
|
||||
|
|
|
|||
54
build.zig
54
build.zig
|
|
@ -1,21 +1,38 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.build.Builder) void {
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const mode = b.standardReleaseOptions();
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const exe = b.addExecutable("ncdu", "src/main.zig");
|
||||
exe.setTarget(target);
|
||||
exe.setBuildMode(mode);
|
||||
exe.addCSourceFile("src/ncurses_refs.c", &[_][]const u8{});
|
||||
exe.linkLibC();
|
||||
exe.linkSystemLibrary("ncursesw");
|
||||
exe.install();
|
||||
const pie = b.option(bool, "pie", "Build with PIE support (by default: target-dependant)");
|
||||
const strip = b.option(bool, "strip", "Strip debugging info (by default false)") orelse false;
|
||||
|
||||
const run_cmd = exe.run();
|
||||
const main_mod = b.createModule(.{
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.strip = strip,
|
||||
.link_libc = true,
|
||||
});
|
||||
main_mod.linkSystemLibrary("ncursesw", .{});
|
||||
main_mod.linkSystemLibrary("zstd", .{});
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "ncdu",
|
||||
.root_module = main_mod,
|
||||
});
|
||||
exe.pie = pie;
|
||||
// https://github.com/ziglang/zig/blob/faccd79ca5debbe22fe168193b8de54393257604/build.zig#L745-L748
|
||||
if (target.result.os.tag.isDarwin()) {
|
||||
// useful for package maintainers
|
||||
exe.headerpad_max_install_names = true;
|
||||
}
|
||||
b.installArtifact(exe);
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
|
|
@ -24,10 +41,13 @@ pub fn build(b: *std.build.Builder) void {
|
|||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const tst = b.addTest("src/main.zig");
|
||||
tst.linkLibC();
|
||||
tst.linkSystemLibrary("ncursesw");
|
||||
tst.addCSourceFile("src/ncurses_refs.c", &[_][]const u8{});
|
||||
const tst_step = b.step("test", "Run tests");
|
||||
tst_step.dependOn(&tst.step);
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_module = main_mod,
|
||||
});
|
||||
unit_tests.pie = pie;
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_unit_tests.step);
|
||||
}
|
||||
|
|
|
|||
620
ncdu.1
Normal file
620
ncdu.1
Normal file
|
|
@ -0,0 +1,620 @@
|
|||
.\" SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
.\" SPDX-License-Identifier: MIT
|
||||
.Dd August 16, 2025
|
||||
.Dt NCDU 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm ncdu
|
||||
.Nd NCurses Disk Usage
|
||||
.
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Op Fl f Ar file
|
||||
.Op Fl o Ar file
|
||||
.Op Fl O Ar file
|
||||
.Op Fl e , \-extended , \-no\-extended
|
||||
.Op Fl \-ignore\-config
|
||||
.Op Fl x , \-one\-file\-system , \-cross\-file\-system
|
||||
.Op Fl \-exclude Ar pattern
|
||||
.Op Fl X , \-exclude\-from Ar file
|
||||
.Op Fl \-include\-caches , \-exclude\-caches
|
||||
.Op Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||
.Op Fl \-include\-kernfs , \-exclude\-kernfs
|
||||
.Op Fl t , \-threads Ar num
|
||||
.Op Fl c , \-compress , \-no\-compress
|
||||
.Op Fl \-compress\-level Ar num
|
||||
.Op Fl \-export\-block\-size Ar num
|
||||
.Op Fl 0 , 1 , 2
|
||||
.Op Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||
.Op Fl \-enable\-shell , \-disable\-shell
|
||||
.Op Fl \-enable\-delete , \-disable\-delete
|
||||
.Op Fl \-enable\-refresh , \-disable\-refresh
|
||||
.Op Fl r
|
||||
.Op Fl \-si , \-no\-si
|
||||
.Op Fl \-disk\-usage , \-apparent\-size
|
||||
.Op Fl \-show\-hidden , \-hide\-hidden
|
||||
.Op Fl \-show\-itemcount , \-hide\-itemcount
|
||||
.Op Fl \-show\-mtime , \-hide\-mtime
|
||||
.Op Fl \-show\-graph , \-hide\-graph
|
||||
.Op Fl \-show\-percent , \-hide\-percent
|
||||
.Op Fl \-graph\-style Ar hash | half\-block | eighth\-block
|
||||
.Op Fl \-shared\-column Ar off | shared | unique
|
||||
.Op Fl \-sort Ar column
|
||||
.Op Fl \-enable\-natsort , \-disable\-natsort
|
||||
.Op Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||
.Op Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||
.Op Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||
.Op Fl \-delete\-command Ar command
|
||||
.Op Fl \-color Ar off | dark | dark-bg
|
||||
.Op Ar path
|
||||
.Nm
|
||||
.Op Fl h , \-help
|
||||
.Nm
|
||||
.Op Fl v , V , \-version
|
||||
.
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
(NCurses Disk Usage) is an interactive curses-based version of the well-known
|
||||
.Xr du 1 ,
|
||||
and provides a fast way to see what directories are using your disk space.
|
||||
.
|
||||
.Sh OPTIONS
|
||||
.Ss Mode Selection
|
||||
.Bl -tag -width Ds
|
||||
.It Fl h , \-help
|
||||
Print a short help message and quit.
|
||||
.It Fl v , V , \-version
|
||||
Print version and quit.
|
||||
.It Fl f Ar file
|
||||
Load the given file, which has earlier been created with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
flag.
|
||||
If
|
||||
.Ar file
|
||||
is equivalent to '\-', the file is read from standard input.
|
||||
Reading from standard input is only supported for the JSON format.
|
||||
.Pp
|
||||
For the sake of preventing a screw-up, the current version of
|
||||
.Nm
|
||||
will assume that the directory information in the imported file does not
|
||||
represent the filesystem on which the file is being imported.
|
||||
That is, the refresh, file deletion and shell spawning options in the browser
|
||||
will be disabled.
|
||||
.It Ar dir
|
||||
Scan the given directory.
|
||||
.It Fl o Ar file
|
||||
Export the directory tree in JSON format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
.Ar file
|
||||
is '\-', the data is written to standard output.
|
||||
See the examples section below for some handy use cases.
|
||||
.Pp
|
||||
Be warned that the exported data may grow quite large when exporting a
|
||||
directory with many files.
|
||||
10.000 files will get you an export in the order of 600 to 700 KiB
|
||||
uncompressed, or a little over 100 KiB when compressed with gzip.
|
||||
This scales linearly, so be prepared to handle a few tens of megabytes when
|
||||
dealing with millions of files.
|
||||
.Pp
|
||||
Consider enabling
|
||||
.Fl c
|
||||
to output Zstandard-compressed JSON, which can significantly reduce size of the
|
||||
exported data.
|
||||
.Pp
|
||||
When running a multi-threaded scan or when scanning a directory tree that may
|
||||
not fit in memory, consider using
|
||||
.Fl O
|
||||
instead.
|
||||
.It Fl O Ar file
|
||||
Export the directory tree in binary format to
|
||||
.Ar file
|
||||
instead of opening the browser interface.
|
||||
If
|
||||
.Ar file
|
||||
is '\-', the data is written to standard output.
|
||||
The binary format has built-in compression, supports low-memory multi-threaded
|
||||
export (in combination with
|
||||
.Fl t )
|
||||
and can be browsed without importing the entire directory tree into memory.
|
||||
.It Fl e , \-extended , \-no\-extended
|
||||
Enable/disable extended information mode.
|
||||
This will, in addition to the usual file information, also read the ownership,
|
||||
permissions and last modification time for each file.
|
||||
This will result in higher memory usage (by roughly ~30%) and in a larger
|
||||
output file when exporting.
|
||||
.Pp
|
||||
When using the file export/import function, this flag should be added both when
|
||||
exporting (to make sure the information is added to the export) and when
|
||||
importing (to read this extra information in memory).
|
||||
This flag has no effect when importing a file that has been exported without
|
||||
the extended information.
|
||||
.Pp
|
||||
This enables viewing and sorting by the latest child mtime, or modified time,
|
||||
using 'm' and 'M', respectively.
|
||||
.It Fl \-ignore\-config
|
||||
Do not attempt to load any configuration files.
|
||||
.El
|
||||
.
|
||||
.Ss Scan Options
|
||||
These options affect the scanning progress, they have no effect when importing
|
||||
directory information from a file.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl x , \-one\-file\-system
|
||||
Do not cross filesystem boundaries, i.e. only count files and directories on
|
||||
the same filesystem as the directory being scanned.
|
||||
.It Fl \-cross\-file\-system
|
||||
Do cross filesystem boundaries.
|
||||
This is the default, but can be specified to overrule a previously configured
|
||||
.Fl x .
|
||||
.It Fl \-exclude Ar pattern
|
||||
Exclude files that match
|
||||
.Ar pattern .
|
||||
The files are still displayed by default, but are not counted towards the disk
|
||||
usage statistics.
|
||||
This argument can be added multiple times to add more patterns.
|
||||
.It Fl X , \-exclude\-from Ar file
|
||||
Exclude files that match any pattern in
|
||||
.Ar file .
|
||||
Patterns should be separated by a newline.
|
||||
.It Fl \-include\-caches , \-exclude\-caches
|
||||
Include (default) or exclude directories containing
|
||||
.Pa CACHEDIR.TAG .
|
||||
Excluded cache directories are still displayed, but their contents will not be
|
||||
scanned or counted towards the disk usage statistics.
|
||||
.Lk https://bford.info/cachedir/
|
||||
.It Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||
Follow (or not) symlinks and count the size of the file they point to.
|
||||
This option does not follow symlinks to directories and will cause each
|
||||
symlinked file to count as a unique file.
|
||||
This is different from how hard links are handled.
|
||||
The exact counting behavior of this flag is subject to change in the future.
|
||||
.It Fl \-include\-kernfs , \-exclude\-kernfs
|
||||
(Linux only) Include (default) or exclude Linux pseudo filesystems such as
|
||||
.Pa /proc
|
||||
(procfs) and
|
||||
.Pa /sys
|
||||
(sysfs).
|
||||
.Pp
|
||||
The complete list of currently known pseudo filesystems is: binfmt, bpf, cgroup,
|
||||
cgroup2, debug, devpts, proc, pstore, security, selinux, sys, trace.
|
||||
.It Fl t , \-threads Ar num
|
||||
Number of threads to use when scanning the filesystem, defaults to 1.
|
||||
.Pp
|
||||
In single-threaded mode, the JSON export (see
|
||||
.Fl o )
|
||||
can operate with very little memory, but in multi-threaded mode the entire
|
||||
directory tree is first constructed in memory and written out after the
|
||||
filesystem scan has completed,
|
||||
This causes a delay in output and requires significantly more memory for large
|
||||
directory trees.
|
||||
The binary format (see
|
||||
.Fl O )
|
||||
does not have this problem and supports efficient exporting with any number of
|
||||
threads.
|
||||
.El
|
||||
.
|
||||
.Ss Export Options
|
||||
These options affect behavior when exporting to file with the
|
||||
.Fl o
|
||||
or
|
||||
.Fl O
|
||||
options.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl c , \-compress , \-no\-compress
|
||||
Enable or disable Zstandard compression when exporting to JSON (see
|
||||
.Fl o ) .
|
||||
.It Fl \-compress\-level Ar num
|
||||
Set the Zstandard compression level when using
|
||||
.Fl O
|
||||
or
|
||||
.Fl c .
|
||||
Valid values are 1 (fastest) to 19 (slowest).
|
||||
Defaults to 4.
|
||||
.It Fl \-export\-block\-size Ar num
|
||||
Set the block size, in kibibytes, for the binary export format (see
|
||||
.Fl O ) .
|
||||
Larger blocks require more memory but result in better compression efficiency.
|
||||
This option can be combined with a higher
|
||||
.Fl \-compress\-level
|
||||
for even better compression.
|
||||
.Pp
|
||||
Accepted values are between 4 and 16000.
|
||||
The defaults is to start at 64 KiB and then gradually increase the block size
|
||||
for large exports.
|
||||
.El
|
||||
.
|
||||
.Ss Interface Options
|
||||
.Bl -tag -width Ds
|
||||
.It Fl 0
|
||||
Don't give any feedback while scanning a directory or importing a file, except
|
||||
when a fatal error occurs.
|
||||
Ncurses will not be initialized until the scan is complete.
|
||||
When exporting the data with
|
||||
.Fl o ,
|
||||
ncurses will not be initialized at all.
|
||||
This option is the default when exporting to standard output.
|
||||
.It Fl 1
|
||||
Write progress information to the terminal, but don't open a full-screen
|
||||
ncurses interface.
|
||||
This option is the default when exporting to a file.
|
||||
.Pp
|
||||
In some cases, the ncurses browser interface which you'll see after the
|
||||
scan/import is complete may look garbled when using this option.
|
||||
If you're not exporting to a file,
|
||||
.Fl 2
|
||||
is usually a better choice.
|
||||
.It Fl 2
|
||||
Show a full-screen ncurses interface while scanning a directory or importing
|
||||
a file.
|
||||
This is the only interface that provides feedback on any non-fatal errors while
|
||||
scanning.
|
||||
.It Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||
Change the UI update interval while scanning or importing.
|
||||
.Nm
|
||||
updates the screen 10 times a second by default (with
|
||||
.Fl \-fast\-ui\-updates
|
||||
), this can be decreased to once every 2 seconds with
|
||||
.Fl q
|
||||
or
|
||||
.Fl \-slow\-ui\-updates .
|
||||
This option can be used to save bandwidth over remote connections.
|
||||
This option has no effect in combination with
|
||||
.Fl 0 .
|
||||
.It Fl \-enable\-shell , \-disable\-shell
|
||||
Enable or disable shell spawning from the file browser.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
.It Fl \-enable\-delete , \-disable\-delete
|
||||
Enable or disable the built-in file deletion feature.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
Explicitly disabling the deletion feature can work as a safeguard to prevent
|
||||
accidental data loss.
|
||||
.It Fl \-enable\-refresh , \-disable\-refresh
|
||||
Enable or disable directory refreshing from the file browser.
|
||||
This feature is enabled by default when scanning a live directory and disabled
|
||||
when importing from file.
|
||||
.It Fl r
|
||||
Read-only mode.
|
||||
When given once, this is an alias for
|
||||
.Fl \-disable\-delete ,
|
||||
when given twice it will also add
|
||||
.Fl \-disable\-shell ,
|
||||
thus ensuring that there is no way to modify the file system from within
|
||||
.Nm .
|
||||
.It Fl \-si , \-no\-si
|
||||
List sizes using base 10 prefixes, that is, powers of 1000 (kB, MB, etc), as
|
||||
defined in the International System of Units (SI), instead of the usual base 2
|
||||
prefixes (KiB, MiB, etc).
|
||||
.It Fl \-disk\-usage , \-apparent\-size
|
||||
Select whether to display disk usage (default) or apparent sizes.
|
||||
Can also be toggled in the file browser with the 'a' key.
|
||||
.It Fl \-show\-hidden , \-hide\-hidden
|
||||
Show (default) or hide "hidden" and excluded files.
|
||||
Can also be toggled in the file browser with the 'e' key.
|
||||
.It Fl \-show\-itemcount , \-hide\-itemcount
|
||||
Show or hide (default) the item counts column.
|
||||
Can also be toggled in the file browser with the 'c' key.
|
||||
.It Fl \-show\-mtime , \-hide\-mtime
|
||||
Show or hide (default) the last modification time column.
|
||||
Can also be toggled in the file browser with the 'm' key.
|
||||
This option is ignored when not in extended mode, see
|
||||
.Fl e .
|
||||
.It Fl \-show\-graph , \-hide\-graph
|
||||
Show (default) or hide the relative size bar column.
|
||||
Can also be toggled in the file browser with the 'g' key.
|
||||
.It Fl \-show\-percent , \-hide\-percent
|
||||
Show (default) or hide the relative size percent column.
|
||||
Can also be toggled in the file browser with the 'g' key.
|
||||
.It Fl \-graph\-style Ar hash | half\-block | eighth\-block
|
||||
Change the way that the relative size bar column is drawn.
|
||||
Recognized values are
|
||||
.Ar hash
|
||||
to draw ASCII '#' characters (default and most portable),
|
||||
.Ar half\-block
|
||||
to use half-block drawing characters or
|
||||
.Ar eighth\-block
|
||||
to use eighth-block drawing characters.
|
||||
Eighth-block characters are the most precise but may not render correctly in
|
||||
all terminals.
|
||||
.It Fl \-shared\-column Ar off | shared | unique
|
||||
Set to
|
||||
.Ar off
|
||||
to disable the shared size column for directories,
|
||||
.Ar shared
|
||||
(default) to display shared directory sizes as a separate column or
|
||||
.Ar unique
|
||||
to display unique directory sizes as a separate column.
|
||||
These options can also be cycled through in the file browser with the 'u' key.
|
||||
.It Fl \-sort Ar column
|
||||
Change the default column to sort on.
|
||||
Accepted values are
|
||||
.Ar disk\-usage
|
||||
(the default),
|
||||
.Ar name , apparent\-size , itemcount
|
||||
or
|
||||
.Ar mtime .
|
||||
The latter only makes sense in extended mode, see
|
||||
.Fl e .
|
||||
.Pp
|
||||
The column name can be suffixed with
|
||||
.Li \-asc
|
||||
or
|
||||
.Li \-desc
|
||||
to change the order to ascending or descending, respectively.
|
||||
For example,
|
||||
.Li \-\-sort=name\-desc
|
||||
to sort by name in descending order.
|
||||
.It Fl \-enable\-natsort , \-disable\-natsort
|
||||
Enable (default) or disable natural sort when sorting by file name.
|
||||
.It Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||
Sort (or not) directories before files.
|
||||
.It Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||
Require a confirmation before quitting ncdu.
|
||||
Can be helpful when you accidentally press 'q' during or after a very long scan.
|
||||
.It Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||
Require a confirmation before deleting a file or directory.
|
||||
Enabled by default, but can be disabled if you're absolutely sure you won't
|
||||
accidentally press 'd'.
|
||||
.It Fl \-delete\-command Ar command
|
||||
When set to a non-empty string, replace the built-in file deletion feature with
|
||||
a custom shell command.
|
||||
.Pp
|
||||
The absolute path of the item to be deleted is appended to the given command
|
||||
and the result is evaluated in a shell.
|
||||
The command is run from the same directory that ncdu itself was started in.
|
||||
The
|
||||
.Ev NCDU_DELETE_PATH
|
||||
environment variable is set to the absolute path of the item to be deleted and
|
||||
.Ev NCDU_LEVEL
|
||||
is set in the same fashion as when spawning a shell from within ncdu.
|
||||
.Pp
|
||||
After command completion, the in-memory view of the selected item is refreshed
|
||||
and directory sizes are adjusted as necessary.
|
||||
This is not a full refresh of the complete directory tree, so if the item has
|
||||
been renamed or moved to another directory, it's new location is not
|
||||
automatically picked up.
|
||||
.Pp
|
||||
For example, to use
|
||||
.Xr rm 1
|
||||
interactive mode to prompt before each deletion:
|
||||
.Dl ncdu --no-confirm-delete --delete-command \[aq]rm -ri --\[aq]
|
||||
Or to move files to trash:
|
||||
.Dl ncdu --delete-command \[aq]gio trash --\[aq]
|
||||
.It Fl \-color Ar off | dark | dark-bg
|
||||
Set the color scheme.
|
||||
The following schemes are recognized:
|
||||
.Ar off
|
||||
to disable colors,
|
||||
.Ar dark
|
||||
for a color scheme intended for dark backgrounds and
|
||||
.Ar dark\-bg
|
||||
for a variation of the
|
||||
.Ar dark
|
||||
color scheme that also works in terminals with a light background.
|
||||
.Pp
|
||||
The default is
|
||||
.Ar off .
|
||||
.El
|
||||
.
|
||||
.Sh CONFIGURATION
|
||||
.Nm
|
||||
can be configured by placing command-line options in
|
||||
.Pa /etc/ncdu.conf
|
||||
or
|
||||
.Pa $HOME/.config/ncdu/config .
|
||||
If both files exist, the system configuration will be loaded before the user
|
||||
configuration, allowing users to override options set in the system
|
||||
configuration.
|
||||
Options given on the command line will override options set in the
|
||||
configuration files.
|
||||
The files will not be read at all when
|
||||
.Fl \-ignore\-config
|
||||
is given on the command line.
|
||||
.Pp
|
||||
The configuration file format is simply one command line option per line.
|
||||
Lines starting with '#' are ignored.
|
||||
A line can be prefixed with '@' to suppress errors while parsing the option.
|
||||
Example configuration file:
|
||||
.Bd -literal -offset indent
|
||||
# Always enable extended mode
|
||||
\-e
|
||||
|
||||
# Disable file deletion
|
||||
\-\-disable\-delete
|
||||
|
||||
# Exclude .git directories
|
||||
\-\-exclude .git
|
||||
|
||||
# Read excludes from ~/.ncduexcludes, ignore error if the file does not exist
|
||||
@--exclude-from ~/.ncduexcludes
|
||||
.Ed
|
||||
.
|
||||
.Sh KEYS
|
||||
.Bl -tag -width Ds
|
||||
.It ?
|
||||
Open help + keys + about screen
|
||||
.It up , down , j , k
|
||||
Cycle through the items
|
||||
.It right, enter, l
|
||||
Open selected directory
|
||||
.It left, <, h
|
||||
Go to parent directory
|
||||
.It n
|
||||
Order by filename (press again for descending order)
|
||||
.It s
|
||||
Order by filesize (press again for descending order)
|
||||
.It C
|
||||
Order by number of items (press again for descending order)
|
||||
.It a
|
||||
Toggle between showing disk usage and showing apparent size.
|
||||
.It M
|
||||
Order by latest child mtime, or modified time (press again for descending
|
||||
order).
|
||||
Requires the
|
||||
.Fl e
|
||||
flag.
|
||||
.It d
|
||||
Delete the selected file or directory.
|
||||
An error message will be shown when the contents of the directory do not match
|
||||
or do not exist anymore on the filesystem.
|
||||
.It t
|
||||
Toggle dirs before files when sorting.
|
||||
.It g
|
||||
Toggle between showing percentage, graph, both, or none.
|
||||
Percentage is relative to the size of the current directory, graph is relative
|
||||
to the largest item in the current directory.
|
||||
.It u
|
||||
Toggle display of the shared / unique size column for directories that share
|
||||
hard links.
|
||||
This column is only visible if the current listing contains directories with
|
||||
shared hard links.
|
||||
.It c
|
||||
Toggle display of child item counts.
|
||||
.It m
|
||||
Toggle display of latest child mtime, or modified time.
|
||||
Requires the
|
||||
.Fl e
|
||||
flag.
|
||||
.It e
|
||||
Show/hide 'hidden' or 'excluded' files and directories.
|
||||
Be aware that even if you can't see the hidden files and directories, they are
|
||||
still there and they are still included in the directory sizes.
|
||||
If you suspect that the totals shown at the bottom of the screen are not
|
||||
correct, make sure you haven't enabled this option.
|
||||
.It i
|
||||
Show information about the current selected item.
|
||||
.It r
|
||||
Refresh/recalculate the current directory.
|
||||
.It b
|
||||
Spawn shell in current directory.
|
||||
.Pp
|
||||
.Nm
|
||||
determines your preferred shell from the
|
||||
.Ev NCDU_SHELL
|
||||
or
|
||||
.Ev SHELL
|
||||
environment variable (in that order), or calls
|
||||
.Pa /bin/sh
|
||||
if neither are set.
|
||||
This allows you to also configure another command to be run when he 'b' key is
|
||||
pressed.
|
||||
For example, to spawn the
|
||||
.Xr vifm 1
|
||||
file manager instead of a shell, run
|
||||
.Nm
|
||||
as follows:
|
||||
.Dl NCDU_SHELL=vifm ncdu
|
||||
The
|
||||
.Ev NCDU_LEVEL
|
||||
environment variable is set or incremented before spawning the shell, allowing
|
||||
you to detect if your shell is running from within
|
||||
.Nm .
|
||||
This can be useful to avoid nesting multiple instances, although
|
||||
.Nm
|
||||
itself does not (currently) warn about or prevent this situation.
|
||||
.It q
|
||||
Quit
|
||||
.El
|
||||
.
|
||||
.Sh FILE FLAGS
|
||||
Entries in the browser interface may be prefixed by a one\-character flag.
|
||||
These flags have the following meaning:
|
||||
.Bl -tag -width Ds
|
||||
.It !
|
||||
An error occurred while reading this directory.
|
||||
.It \.
|
||||
An error occurred while reading a subdirectory, so the indicated size may not
|
||||
be correct.
|
||||
.It <
|
||||
File or directory is excluded from the statistics by using exclude patterns.
|
||||
.It >
|
||||
Directory is on another filesystem.
|
||||
.It ^
|
||||
Directory is excluded from the statistics due to being a Linux pseudo
|
||||
filesystem.
|
||||
.It @
|
||||
This is neither a file nor a folder (symlink, socket, ...).
|
||||
.It H
|
||||
Same file was already counted (hard link).
|
||||
.It e
|
||||
Empty directory.
|
||||
.El
|
||||
.
|
||||
.Sh EXAMPLES
|
||||
To scan and browse the directory you're currently in, all you need is a simple:
|
||||
.Dl ncdu
|
||||
To scan a full filesystem, for example your root filesystem, you'll want to use
|
||||
.Fl x :
|
||||
.Dl ncdu \-x /
|
||||
.Pp
|
||||
Since scanning a large directory may take a while, you can scan a directory and
|
||||
export the results for later viewing:
|
||||
.Bd -literal -offset indent
|
||||
ncdu \-1xO export.ncdu /
|
||||
# ...some time later:
|
||||
ncdu \-f export.ncdu
|
||||
.Ed
|
||||
To export from a cron job, make sure to replace
|
||||
.Fl 1
|
||||
with
|
||||
.Fl 0
|
||||
to suppress unnecessary progress output.
|
||||
.Pp
|
||||
You can also export a directory and browse it once scanning is done:
|
||||
.Dl ncdu \-co\- | tee export.json.zst | ./ncdu \-f\-
|
||||
.Pp
|
||||
To scan a system remotely, but browse through the files locally:
|
||||
.Dl ssh user@system ncdu \-co\- / | ./ncdu \-f\-
|
||||
Remote scanning and local viewing has two major advantages when
|
||||
compared to running
|
||||
.Nm
|
||||
directly on the remote system: You can browse through the scanned directory on
|
||||
the local system without any network latency, and
|
||||
.Nm
|
||||
does not keep the entire directory structure in memory when exporting, so this
|
||||
won't consume much memory on the remote system.
|
||||
.
|
||||
.Sh SEE ALSO
|
||||
.Xr du 1 ,
|
||||
.Xr tree 1 .
|
||||
.Pp
|
||||
.Nm
|
||||
has a website:
|
||||
.Lk https://dev.yorhel.nl/ncdu
|
||||
.
|
||||
.Sh AUTHORS
|
||||
Written by
|
||||
.An Yorhel Aq Mt projects@yorhel.nl
|
||||
.
|
||||
.Sh BUGS
|
||||
Directory hard links and firmlinks (MacOS) are not supported.
|
||||
They are not detected as being hard links and will thus get scanned and counted
|
||||
multiple times.
|
||||
.Pp
|
||||
Some minor glitches may appear when displaying filenames that contain multibyte
|
||||
or multicolumn characters.
|
||||
.Pp
|
||||
The unique and shared directory sizes are calculated based on the assumption
|
||||
that the link count of hard links does not change during a filesystem scan or
|
||||
in between refreshes.
|
||||
If this does happen, for example when a hard link is deleted, then these
|
||||
numbers will be very much incorrect and a full refresh by restarting ncdu is
|
||||
needed to get correct numbers again.
|
||||
.Pp
|
||||
All sizes are internally represented as a signed 64bit integer.
|
||||
If you have a directory larger than 8 EiB minus one byte, ncdu will clip its
|
||||
size to 8 EiB minus one byte.
|
||||
When deleting or refreshing items in a directory with a clipped size, the
|
||||
resulting sizes will be incorrect.
|
||||
Likewise, item counts are stored in a 32-bit integer, so will be incorrect in
|
||||
the unlikely event that you happen to have more than 4 billion items in a
|
||||
directory.
|
||||
.Pp
|
||||
Please report any other bugs you may find at the bug tracker, which can be
|
||||
found on the web site at
|
||||
.Lk https://dev.yorhel.nl/ncdu
|
||||
564
ncdu.pod
564
ncdu.pod
|
|
@ -1,564 +0,0 @@
|
|||
SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
SPDX-License-Identifier: MIT
|
||||
|
||||
=head1 NAME
|
||||
|
||||
B<ncdu> - NCurses Disk Usage
|
||||
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
B<ncdu> [I<options>] I<dir>
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
ncdu (NCurses Disk Usage) is a curses-based version of the well-known 'du', and
|
||||
provides a fast way to see what directories are using your disk space.
|
||||
|
||||
|
||||
=head1 OPTIONS
|
||||
|
||||
=head2 Mode Selection
|
||||
|
||||
=over
|
||||
|
||||
=item B<-h>, -B<-help>
|
||||
|
||||
Print a short help message and quit.
|
||||
|
||||
=item B<-v>, B<-V>, B<--version>
|
||||
|
||||
Print ncdu version and quit.
|
||||
|
||||
=item B<-f> I<FILE>
|
||||
|
||||
Load the given file, which has earlier been created with the C<-o> option. If
|
||||
I<FILE> is equivalent to C<->, the file is read from standard input.
|
||||
|
||||
For the sake of preventing a screw-up, the current version of ncdu will assume
|
||||
that the directory information in the imported file does not represent the
|
||||
filesystem on which the file is being imported. That is, the refresh, file
|
||||
deletion and shell spawning options in the browser will be disabled.
|
||||
|
||||
=item I<dir>
|
||||
|
||||
Scan the given directory.
|
||||
|
||||
=item B<-o> I<FILE>
|
||||
|
||||
Export all necessary information to I<FILE> instead of opening the browser
|
||||
interface. If I<FILE> is C<->, the data is written to standard output. See the
|
||||
examples section below for some handy use cases.
|
||||
|
||||
Be warned that the exported data may grow quite large when exporting a
|
||||
directory with many files. 10.000 files will get you an export in the order of
|
||||
600 to 700 KiB uncompressed, or a little over 100 KiB when compressed with
|
||||
gzip. This scales linearly, so be prepared to handle a few tens of megabytes
|
||||
when dealing with millions of files.
|
||||
|
||||
=item B<-e>, B<--extended>, B<--no-extended>
|
||||
|
||||
Enable/disable extended information mode. This will, in addition to the usual
|
||||
file information, also read the ownership, permissions and last modification
|
||||
time for each file. This will result in higher memory usage (by roughly ~30%)
|
||||
and in a larger output file when exporting.
|
||||
|
||||
When using the file export/import function, this flag will need to be added
|
||||
both when exporting (to make sure the information is added to the export), and
|
||||
when importing (to read this extra information in memory). This flag has no
|
||||
effect when importing a file that has been exported without the extended
|
||||
information.
|
||||
|
||||
This enables viewing and sorting by the latest child mtime, or modified time,
|
||||
using 'm' and 'M', respectively.
|
||||
|
||||
=item B<--ignore-config>
|
||||
|
||||
Do not attempt to load any configuration files.
|
||||
|
||||
=back
|
||||
|
||||
=head2 Scan Options
|
||||
|
||||
These options affect the scanning progress, and have no effect when importing
|
||||
directory information from a file.
|
||||
|
||||
=over
|
||||
|
||||
=item B<-x>, B<--one-file-system>
|
||||
|
||||
Do not cross filesystem boundaries, i.e. only count files and directories on
|
||||
the same filesystem as the directory being scanned.
|
||||
|
||||
=item B<--cross-file-system>
|
||||
|
||||
Do cross filesystem boundaries. This is the default, but can be specified to
|
||||
overrule a previously given C<-x>.
|
||||
|
||||
=item B<--exclude> I<PATTERN>
|
||||
|
||||
Exclude files that match I<PATTERN>. The files will still be displayed by
|
||||
default, but are not counted towards the disk usage statistics. This argument
|
||||
can be added multiple times to add more patterns.
|
||||
|
||||
=item B<-X> I<FILE>, B<--exclude-from> I<FILE>
|
||||
|
||||
Exclude files that match any pattern in I<FILE>. Patterns should be separated
|
||||
by a newline.
|
||||
|
||||
=item B<--include-caches>, B<--exclude-caches>
|
||||
|
||||
Include (default) or exclude directories containing CACHEDIR.TAG. The
|
||||
directories will still be displayed, but their contents will not be scanned or
|
||||
counted towards the disk usage statistics.
|
||||
L<http://www.brynosaurus.com/cachedir/>
|
||||
|
||||
=item B<-L>, B<--follow-symlinks>, B<--no-follow-symlinks>
|
||||
|
||||
Follow (or not) symlinks and count the size of the file they point to. As of
|
||||
ncdu 1.14, this option will not follow symlinks to directories and will count
|
||||
each symlinked file as a unique file (i.e. unlike how hard links are handled).
|
||||
This is subject to change in later versions.
|
||||
|
||||
=item B<--include-kernfs>, B<--exclude-kernfs>
|
||||
|
||||
(Linux only) Include (default) or exclude Linux pseudo filesystems, e.g. /proc
|
||||
(procfs), /sys (sysfs).
|
||||
|
||||
The complete list of currently known pseudo filesystems is: binfmt, bpf, cgroup,
|
||||
cgroup2, debug, devpts, proc, pstore, security, selinux, sys, trace.
|
||||
|
||||
=back
|
||||
|
||||
=head2 Interface options
|
||||
|
||||
=over
|
||||
|
||||
=item B<-0>
|
||||
|
||||
Don't give any feedback while scanning a directory or importing a file, other
|
||||
than when a fatal error occurs. Ncurses will not be initialized until the scan
|
||||
is complete. When exporting the data with C<-o>, ncurses will not be
|
||||
initialized at all. This option is the default when exporting to standard
|
||||
output.
|
||||
|
||||
=item B<-1>
|
||||
|
||||
Similar to C<-0>, but does give feedback on the scanning progress with a single
|
||||
line of output. This option is the default when exporting to a file.
|
||||
|
||||
In some cases, the ncurses browser interface which you'll see after the
|
||||
scan/import is complete may look garbled when using this option. If you're not
|
||||
exporting to a file, C<-2> is probably a better choice.
|
||||
|
||||
=item B<-2>
|
||||
|
||||
Provide a full-screen ncurses interface while scanning a directory or importing
|
||||
a file. This is the only interface that provides feedback on any non-fatal
|
||||
errors while scanning.
|
||||
|
||||
=item B<-q>, B<--slow-ui-updates>, B<--fast-ui-updates>
|
||||
|
||||
Change the UI update interval while scanning or importing. Ncdu will update the
|
||||
screen 10 times a second by default (C<--fast-ui-updates>), this can be
|
||||
decreased to once every 2 seconds with C<-q> or C<--slow-ui-updates>. This
|
||||
feature can be used to save bandwidth over remote connections. This option has
|
||||
no effect when C<-0> is used.
|
||||
|
||||
=item B<--enable-shell>, B<--disable-shell>
|
||||
|
||||
Enable or disable shell spawning from the browser. This feature is enabled by
|
||||
default when scanning a live directory and disabled when importing from file.
|
||||
|
||||
=item B<--enable-delete>, B<--disable-delete>
|
||||
|
||||
Enable or disable the built-in file deletion feature. This feature is enabled
|
||||
by default when scanning a live directory and disabled when importing from
|
||||
file. Explicitly disabling the deletion feature can work as a safeguard to
|
||||
prevent accidental data loss.
|
||||
|
||||
=item B<--enable-refresh>, B<--disable-refresh>
|
||||
|
||||
Enable or disable directory refreshing from the browser. This feature is
|
||||
enabled by default when scanning a live directory and disabled when importing
|
||||
from file.
|
||||
|
||||
=item B<-r>
|
||||
|
||||
Read-only mode. When given once, this is an alias for C<--disable-delete>, when
|
||||
given twice it will also add C<--disable-shell>, thus ensuring that there is no
|
||||
way to modify the file system from within ncdu.
|
||||
|
||||
=item B<--si>, B<--no-si>
|
||||
|
||||
List sizes using base 10 prefixes, that is, powers of 1000 (KB, MB, etc), as
|
||||
defined in the International System of Units (SI), instead of the usual base 2
|
||||
prefixes, that is, powers of 1024 (KiB, MiB, etc).
|
||||
|
||||
=item B<--disk-usage>, B<--apparent-size>
|
||||
|
||||
Select whether to display disk usage (default) or apparent sizes. Can also be
|
||||
toggled in the browser with the 'a' key.
|
||||
|
||||
=item B<--show-hidden>, B<--hide-hidden>
|
||||
|
||||
Show (default) or hide "hidden" and excluded files. Can also be toggled in the
|
||||
browser with the 'e' key.
|
||||
|
||||
=item B<--show-itemcount>, B<--hide-itemcount>
|
||||
|
||||
Show or hide (default) the item counts column. Can also be toggled in the
|
||||
browser with the 'c' key.
|
||||
|
||||
=item B<--show-mtime>, B<--hide-mtime>
|
||||
|
||||
Show or hide (default) the last modification time column. Can also be toggled
|
||||
in the browser with the 'm' key. This option is ignored when not in extended
|
||||
mode (see C<-e>).
|
||||
|
||||
=item B<--show-graph>, B<--hide-graph>
|
||||
|
||||
Show (default) or hide the relative size bar column. Can also be toggled in the
|
||||
browser with the 'g' key.
|
||||
|
||||
=item B<--show-percent>, B<--hide-percent>
|
||||
|
||||
Show (default) or hide the relative size percent column. Can also be toggled in
|
||||
the browser with the 'g' key.
|
||||
|
||||
=item B<--graph-style> I<OPTION>
|
||||
|
||||
Change the way that the relative size bar column is drawn. Recognized values
|
||||
are I<hash> to draw ASCII C<#> characters (most portable), I<half-block> to use
|
||||
half-block drawing characters (the default) or I<eigth-block> to use
|
||||
eigth-block drawing characters. Eigth-block characters are the most precise but
|
||||
may not render correctly in all terminals.
|
||||
|
||||
=item B<--shared-column> I<OPTION>
|
||||
|
||||
Set to I<off> to disable the shared size column for directories, I<shared>
|
||||
(default) to display shared directory sizes as a separate column or I<unique>
|
||||
to display unique directory sizes as a separate column. These options can also
|
||||
be cycled through in the browser with the 'u' key.
|
||||
|
||||
=item B<--sort> I<COLUMN>
|
||||
|
||||
Change the default column to sort on. Accepted values are I<disk-usage> (the
|
||||
default), I<name>, I<apparent-size>, I<itemcount> or I<mtime>. The latter only
|
||||
makes sense in extended mode, see C<-e>.
|
||||
|
||||
The column can be suffixed with I<-asc> or I<-desc> to set the order to
|
||||
ascending or descending, respectively. e.g. C<--sort=name-desc> will sort by
|
||||
name in descending order.
|
||||
|
||||
=item B<--group-directories-first>, B<--no-group-directories-first>
|
||||
|
||||
Sort (or not) directories before files.
|
||||
|
||||
=item B<--confirm-quit>, B<--no-confirm-quit>
|
||||
|
||||
Require a confirmation before quitting ncdu. Very helpful when you accidentally
|
||||
press 'q' during or after a very long scan.
|
||||
|
||||
=item B<--confirm-delete>, B<--no-confirm-delete>
|
||||
|
||||
Require a confirmation before deleting a file or directory. Enabled by default,
|
||||
but can be disabled if you're absolutely sure you won't accidentally press 'd'.
|
||||
|
||||
=item B<--color> I<SCHEME>
|
||||
|
||||
Select a color scheme. The following schemes are recognized: I<off> to disable
|
||||
colors, I<dark> for a color scheme intended for dark backgrounds and I<dark-bg>
|
||||
for a variation of the I<dark> color scheme that also works in terminals with a
|
||||
light background.
|
||||
|
||||
The default is I<dark-bg> unless the C<NO_COLOR> environment variable is set.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
Ncdu can be configured by placing command-line options in C</etc/ncdu.conf> or
|
||||
C<$HOME/.config/ncdu/config>. If both files exist, the system configuration
|
||||
will be loaded before the user configuration, allowing users to override
|
||||
options set in the system configuration. Options given on the command line will
|
||||
override options set in the configuration files. The files will not be read at
|
||||
all when C<--ignore-config> is given on the command line.
|
||||
|
||||
The configuration file format is simply one command line option per line. Lines
|
||||
starting with C<#> are ignored. Example configuration file:
|
||||
|
||||
# Always enable extended mode
|
||||
-e
|
||||
|
||||
# Disable file deletion
|
||||
--disable-delete
|
||||
|
||||
# Exclude .git directories
|
||||
--exclude .git
|
||||
|
||||
|
||||
=head1 KEYS
|
||||
|
||||
=over
|
||||
|
||||
=item B<?>
|
||||
|
||||
Show help + keys + about screen
|
||||
|
||||
=item B<up>, B<down>, B<j>, B<k>
|
||||
|
||||
Cycle through the items
|
||||
|
||||
=item B<right>, B<enter>, B<l>
|
||||
|
||||
Open selected directory
|
||||
|
||||
=item B<left>, B<< < >>, B<h>
|
||||
|
||||
Go to parent directory
|
||||
|
||||
=item B<n>
|
||||
|
||||
Order by filename (press again for descending order)
|
||||
|
||||
=item B<s>
|
||||
|
||||
Order by filesize (press again for descending order)
|
||||
|
||||
=item B<C>
|
||||
|
||||
Order by number of items (press again for descending order)
|
||||
|
||||
=item B<a>
|
||||
|
||||
Toggle between showing disk usage and showing apparent size.
|
||||
|
||||
=item B<M>
|
||||
|
||||
Order by latest child mtime, or modified time. (press again for descending order)
|
||||
Requires the -e flag.
|
||||
|
||||
=item B<d>
|
||||
|
||||
Delete the selected file or directory. An error message will be shown when the
|
||||
contents of the directory do not match or do not exist anymore on the
|
||||
filesystem.
|
||||
|
||||
=item B<t>
|
||||
|
||||
Toggle dirs before files when sorting.
|
||||
|
||||
=item B<g>
|
||||
|
||||
Toggle between showing percentage, graph, both, or none. Percentage is relative
|
||||
to the size of the current directory, graph is relative to the largest item in
|
||||
the current directory.
|
||||
|
||||
=item B<u>
|
||||
|
||||
Toggle display of the shared / unique size column for directories that share
|
||||
hard links. This column is only visible if the current listing contains
|
||||
directories with shared hard links.
|
||||
|
||||
=item B<c>
|
||||
|
||||
Toggle display of child item counts.
|
||||
|
||||
=item B<m>
|
||||
|
||||
Toggle display of latest child mtime, or modified time. Requires the -e flag.
|
||||
|
||||
=item B<e>
|
||||
|
||||
Show/hide 'hidden' or 'excluded' files and directories. Please note that even
|
||||
though you can't see the hidden files and directories, they are still there and
|
||||
they are still included in the directory sizes. If you suspect that the totals
|
||||
shown at the bottom of the screen are not correct, make sure you haven't
|
||||
enabled this option.
|
||||
|
||||
=item B<i>
|
||||
|
||||
Show information about the current selected item.
|
||||
|
||||
=item B<r>
|
||||
|
||||
Refresh/recalculate the current directory.
|
||||
|
||||
=item B<b>
|
||||
|
||||
Spawn shell in current directory.
|
||||
|
||||
Ncdu will determine your preferred shell from the C<NCDU_SHELL> or C<SHELL>
|
||||
variable (in that order), or will call C</bin/sh> if neither are set. This
|
||||
allows you to also configure another command to be run when he 'b' key is
|
||||
pressed. For example, to spawn the L<vifm(1)> file manager instead of a shell,
|
||||
run ncdu as follows:
|
||||
|
||||
export NCDU_SHELL=vifm
|
||||
ncdu
|
||||
|
||||
Ncdu will set the C<NCDU_LEVEL> environment variable or increment it before
|
||||
spawning the shell. This variable allows you to detect when your shell is
|
||||
running from within ncdu, which can be useful to avoid nesting multiple
|
||||
instances of ncdu. Ncdu itself does not (currently) warn when attempting to run
|
||||
nested instances.
|
||||
|
||||
=item B<q>
|
||||
|
||||
Quit
|
||||
|
||||
=back
|
||||
|
||||
|
||||
=head1 FILE FLAGS
|
||||
|
||||
Entries in the browser interface may be prefixed by a one-character flag. These
|
||||
flags have the following meaning:
|
||||
|
||||
=over
|
||||
|
||||
=item B<!>
|
||||
|
||||
An error occurred while reading this directory.
|
||||
|
||||
=item B<.>
|
||||
|
||||
An error occurred while reading a subdirectory, so the indicated size may not be
|
||||
correct.
|
||||
|
||||
=item B<< < >>
|
||||
|
||||
File or directory is excluded from the statistics by using exclude patterns.
|
||||
|
||||
=item B<< > >>
|
||||
|
||||
Directory is on another filesystem.
|
||||
|
||||
=item B<^>
|
||||
|
||||
Directory is excluded from the statistics due to being a Linux pseudo filesystem.
|
||||
|
||||
=item B<@>
|
||||
|
||||
This is neither a file nor a folder (symlink, socket, ...).
|
||||
|
||||
=item B<H>
|
||||
|
||||
Same file was already counted (hard link).
|
||||
|
||||
=item B<e>
|
||||
|
||||
Empty directory.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
=head1 EXAMPLES
|
||||
|
||||
To scan and browse the directory you're currently in, all you need is a simple:
|
||||
|
||||
ncdu
|
||||
|
||||
If you want to scan a full filesystem, your root filesystem, for example, then
|
||||
you'll want to use C<-x>:
|
||||
|
||||
ncdu -x /
|
||||
|
||||
Since scanning a large directory may take a while, you can scan a directory and
|
||||
export the results for later viewing:
|
||||
|
||||
ncdu -1xo- / | gzip >export.gz
|
||||
# ...some time later:
|
||||
zcat export.gz | ncdu -f-
|
||||
|
||||
To export from a cron job, make sure to replace C<-1> with C<-0> to suppress
|
||||
any unnecessary output.
|
||||
|
||||
You can also export a directory and browse it once scanning is done:
|
||||
|
||||
ncdu -o- | tee export.file | ./ncdu -f-
|
||||
|
||||
The same is possible with gzip compression, but is a bit kludgey:
|
||||
|
||||
ncdu -o- | gzip | tee export.gz | gunzip | ./ncdu -f-
|
||||
|
||||
To scan a system remotely, but browse through the files locally:
|
||||
|
||||
ssh -C user@system ncdu -o- / | ./ncdu -f-
|
||||
|
||||
The C<-C> option to ssh enables compression, which will be very useful over
|
||||
slow links. Remote scanning and local viewing has two major advantages when
|
||||
compared to running ncdu directly on the remote system: You can browse through
|
||||
the scanned directory on the local system without any network latency, and ncdu
|
||||
does not keep the entire directory structure in memory when exporting, so you
|
||||
won't consume much memory on the remote system.
|
||||
|
||||
|
||||
=head1 HARD LINKS
|
||||
|
||||
Every disk usage analysis utility has its own way of (not) counting hard links.
|
||||
There does not seem to be any universally agreed method of handling hard links,
|
||||
and it is even inconsistent among different versions of ncdu. This section
|
||||
explains what each version of ncdu does.
|
||||
|
||||
ncdu 1.5 and below does not support any hard link detection at all: each link
|
||||
is considered a separate inode and its size is counted for every link. This
|
||||
means that the displayed directory sizes are incorrect when analyzing
|
||||
directories which contain hard links.
|
||||
|
||||
ncdu 1.6 has basic hard link detection: When a link to a previously encountered
|
||||
inode is detected, the link is considered to have a file size of zero bytes.
|
||||
Its size is not counted again, and the link is indicated in the browser
|
||||
interface with a 'H' mark. The displayed directory sizes are only correct when
|
||||
all links to an inode reside within that directory. When this is not the case,
|
||||
the sizes may or may not be correct, depending on which links were considered
|
||||
as "duplicate" and which as "original". The indicated size of the topmost
|
||||
directory (that is, the one specified on the command line upon starting ncdu)
|
||||
is always correct.
|
||||
|
||||
ncdu 1.7 and later has improved hard link detection. Each file that has more
|
||||
than two links has the "H" mark visible in the browser interface. Each hard
|
||||
link is counted exactly once for every directory it appears in. The indicated
|
||||
size of each directory is therefore, correctly, the sum of the sizes of all
|
||||
unique inodes that can be found in that directory. Note, however, that this may
|
||||
not always be same as the space that will be reclaimed after deleting the
|
||||
directory, as some inodes may still be accessible from hard links outside it.
|
||||
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
Directory hard links and firmlinks (MacOS) are not supported. They will not be
|
||||
detected as being hard links, and may thus be scanned and counted multiple
|
||||
times.
|
||||
|
||||
Some minor glitches may appear when displaying filenames that contain multibyte
|
||||
or multicolumn characters.
|
||||
|
||||
The unique and shared directory sizes are calculated based on the assumption
|
||||
that the link count of hard links does not change during a filesystem scan or
|
||||
in between refreshes. If it does, for example after deleting a hard link, then
|
||||
these numbers will be very much incorrect and a full refresh by restarting ncdu
|
||||
is needed to get correct numbers again.
|
||||
|
||||
All sizes are internally represented as a signed 64bit integer. If you have a
|
||||
directory larger than 8 EiB minus one byte, ncdu will clip its size to 8 EiB
|
||||
minus one byte. When deleting or refreshing items in a directory with a clipped
|
||||
size, the resulting sizes will be incorrect. Likewise, item counts are stored
|
||||
in a 32-bit integer, so will be incorrect in the unlikely event that you happen
|
||||
to have more than 4 billion items in a directory.
|
||||
|
||||
Please report any other bugs you may find at the bug tracker, which can be
|
||||
found on the web site at https://dev.yorhel.nl/ncdu
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Written by Yoran Heling <projects@yorhel.nl>.
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<du(1)>
|
||||
468
src/bin_export.zig
Normal file
468
src/bin_export.zig
Normal file
|
|
@ -0,0 +1,468 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var file_off: u64 = 0;
|
||||
var lock: std.Thread.Mutex = .{};
|
||||
var root_itemref: u64 = 0;
|
||||
};
|
||||
|
||||
pub const SIGNATURE = "\xbfncduEX1";
|
||||
|
||||
pub const ItemKey = enum(u5) {
|
||||
// all items
|
||||
type = 0, // EType
|
||||
name = 1, // bytes
|
||||
prev = 2, // itemref
|
||||
// Only for non-specials
|
||||
asize = 3, // u64
|
||||
dsize = 4, // u64
|
||||
// Only for .dir
|
||||
dev = 5, // u64 only if different from parent dir
|
||||
rderr = 6, // bool true = error reading directory list, false = error in sub-item, absent = no error
|
||||
cumasize = 7, // u64
|
||||
cumdsize = 8, // u64
|
||||
shrasize = 9, // u64
|
||||
shrdsize = 10, // u64
|
||||
items = 11, // u64
|
||||
sub = 12, // itemref only if dir is not empty
|
||||
// Only for .link
|
||||
ino = 13, // u64
|
||||
nlink = 14, // u32
|
||||
// Extended mode
|
||||
uid = 15, // u32
|
||||
gid = 16, // u32
|
||||
mode = 17, // u16
|
||||
mtime = 18, // u64
|
||||
_,
|
||||
};
|
||||
|
||||
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
||||
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
||||
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).@"enum".fields.len;
|
||||
|
||||
pub const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
||||
|
||||
inline fn bigu16(v: u16) [2]u8 { return @bitCast(std.mem.nativeToBig(u16, v)); }
|
||||
inline fn bigu32(v: u32) [4]u8 { return @bitCast(std.mem.nativeToBig(u32, v)); }
|
||||
inline fn bigu64(v: u64) [8]u8 { return @bitCast(std.mem.nativeToBig(u64, v)); }
|
||||
|
||||
inline fn blockHeader(id: u4, len: u28) [4]u8 { return bigu32((@as(u32, id) << 28) | len); }
|
||||
|
||||
inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(major)) << 5) | arg; }
|
||||
|
||||
|
||||
// (Uncompressed) data block size.
|
||||
// Start with 64k, then use increasingly larger block sizes as the export file
|
||||
// grows. This is both to stay within the block number limit of the index block
|
||||
// and because, with a larger index block, the reader will end up using more
|
||||
// memory anyway.
|
||||
fn blockSize(num: u32) usize {
|
||||
// block size uncompressed data in this num range
|
||||
// # mil # KiB # GiB
|
||||
return main.config.export_block_size
|
||||
orelse if (num < ( 1<<20)) 64<<10 // 64
|
||||
else if (num < ( 2<<20)) 128<<10 // 128
|
||||
else if (num < ( 4<<20)) 256<<10 // 512
|
||||
else if (num < ( 8<<20)) 512<<10 // 2048
|
||||
else if (num < (16<<20)) 1024<<10 // 8192
|
||||
else 2048<<10; // 32768
|
||||
}
|
||||
|
||||
// Upper bound on the return value of blockSize()
|
||||
// (config.export_block_size may be larger than the sizes listed above, let's
|
||||
// stick with the maximum block size supported by the file format to be safe)
|
||||
const MAX_BLOCK_SIZE: usize = 1<<28;
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
buf: []u8 = undefined,
|
||||
off: usize = MAX_BLOCK_SIZE, // pretend we have a full block to trigger a flush() for the first write
|
||||
block_num: u32 = std.math.maxInt(u32),
|
||||
itemref: u64 = 0, // ref of item currently being written
|
||||
|
||||
// unused, but kept around for easy debugging
|
||||
fn compressNone(in: []const u8, out: []u8) usize {
|
||||
@memcpy(out[0..in.len], in);
|
||||
return in.len;
|
||||
}
|
||||
|
||||
fn compressZstd(in: []const u8, out: []u8) usize {
|
||||
while (true) {
|
||||
const r = c.ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
||||
if (c.ZSTD_isError(r) == 0) return r;
|
||||
ui.oom(); // That *ought* to be the only reason the above call can fail.
|
||||
}
|
||||
}
|
||||
|
||||
fn createBlock(t: *Thread) std.ArrayListUnmanaged(u8) {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
if (t.block_num == std.math.maxInt(u32) or t.off == 0) return out;
|
||||
|
||||
out.ensureTotalCapacityPrecise(main.allocator, 12 + @as(usize, @intCast(c.ZSTD_COMPRESSBOUND(@as(c_int, @intCast(t.off)))))) catch unreachable;
|
||||
out.items.len = out.capacity;
|
||||
const bodylen = compressZstd(t.buf[0..t.off], out.items[8..]);
|
||||
out.items.len = 12 + bodylen;
|
||||
|
||||
out.items[0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
out.items[4..8].* = bigu32(t.block_num);
|
||||
out.items[8+bodylen..][0..4].* = blockHeader(0, @intCast(out.items.len));
|
||||
return out;
|
||||
}
|
||||
|
||||
fn flush(t: *Thread, expected_len: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
var block = createBlock(t);
|
||||
defer block.deinit(main.allocator);
|
||||
|
||||
global.lock.lock();
|
||||
defer global.lock.unlock();
|
||||
// This can only really happen when the root path exceeds our block size,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (expected_len > t.buf.len) ui.die("Error writing data: path too long.\n", .{});
|
||||
|
||||
if (block.items.len > 0) {
|
||||
if (global.file_off >= (1<<40)) ui.die("Export data file has grown too large, please report a bug.\n", .{});
|
||||
global.index.items[4..][t.block_num*8..][0..8].* = bigu64((global.file_off << 24) + block.items.len);
|
||||
global.file_off += block.items.len;
|
||||
global.fd.writeAll(block.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
}
|
||||
|
||||
t.off = 0;
|
||||
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
||||
global.index.appendSlice(main.allocator, &[1]u8{0}**8) catch unreachable;
|
||||
if (global.index.items.len + 12 >= (1<<28)) ui.die("Too many data blocks, please report a bug.\n", .{});
|
||||
|
||||
const newsize = blockSize(t.block_num);
|
||||
if (t.buf.len != newsize) t.buf = main.allocator.realloc(t.buf, newsize) catch unreachable;
|
||||
}
|
||||
|
||||
fn cborHead(t: *Thread, major: CborMajor, arg: u64) void {
|
||||
if (arg <= 23) {
|
||||
t.buf[t.off] = cborByte(major, @intCast(arg));
|
||||
t.off += 1;
|
||||
} else if (arg <= std.math.maxInt(u8)) {
|
||||
t.buf[t.off] = cborByte(major, 24);
|
||||
t.buf[t.off+1] = @truncate(arg);
|
||||
t.off += 2;
|
||||
} else if (arg <= std.math.maxInt(u16)) {
|
||||
t.buf[t.off] = cborByte(major, 25);
|
||||
t.buf[t.off+1..][0..2].* = bigu16(@intCast(arg));
|
||||
t.off += 3;
|
||||
} else if (arg <= std.math.maxInt(u32)) {
|
||||
t.buf[t.off] = cborByte(major, 26);
|
||||
t.buf[t.off+1..][0..4].* = bigu32(@intCast(arg));
|
||||
t.off += 5;
|
||||
} else {
|
||||
t.buf[t.off] = cborByte(major, 27);
|
||||
t.buf[t.off+1..][0..8].* = bigu64(arg);
|
||||
t.off += 9;
|
||||
}
|
||||
}
|
||||
|
||||
fn cborIndef(t: *Thread, major: CborMajor) void {
|
||||
t.buf[t.off] = cborByte(major, 31);
|
||||
t.off += 1;
|
||||
}
|
||||
|
||||
fn itemKey(t: *Thread, key: ItemKey) void {
|
||||
t.cborHead(.pos, @intFromEnum(key));
|
||||
}
|
||||
|
||||
fn itemRef(t: *Thread, key: ItemKey, ref: ?u64) void {
|
||||
const r = ref orelse return;
|
||||
t.itemKey(key);
|
||||
// Full references compress like shit and most of the references point
|
||||
// into the same block, so optimize that case by using a negative
|
||||
// offset instead.
|
||||
if ((r >> 24) == t.block_num) t.cborHead(.neg, t.itemref - r - 1)
|
||||
else t.cborHead(.pos, r);
|
||||
}
|
||||
|
||||
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
||||
fn itemStart(t: *Thread, itype: model.EType, prev_item: ?u64, name: []const u8) u64 {
|
||||
const min_len = name.len + MAX_ITEM_LEN;
|
||||
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
||||
|
||||
t.itemref = (@as(u64, t.block_num) << 24) | t.off;
|
||||
t.cborIndef(.map);
|
||||
t.itemKey(.type);
|
||||
if (@intFromEnum(itype) >= 0) t.cborHead(.pos, @intCast(@intFromEnum(itype)))
|
||||
else t.cborHead(.neg, @intCast(-1 - @intFromEnum(itype)));
|
||||
t.itemKey(.name);
|
||||
t.cborHead(.bytes, name.len);
|
||||
@memcpy(t.buf[t.off..][0..name.len], name);
|
||||
t.off += name.len;
|
||||
t.itemRef(.prev, prev_item);
|
||||
return t.itemref;
|
||||
}
|
||||
|
||||
fn itemExt(t: *Thread, stat: *const sink.Stat) void {
|
||||
if (!main.config.extended) return;
|
||||
if (stat.ext.pack.hasuid) {
|
||||
t.itemKey(.uid);
|
||||
t.cborHead(.pos, stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
t.itemKey(.gid);
|
||||
t.cborHead(.pos, stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
t.itemKey(.mode);
|
||||
t.cborHead(.pos, stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
t.itemKey(.mtime);
|
||||
t.cborHead(.pos, stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
|
||||
fn itemEnd(t: *Thread) void {
|
||||
t.cborIndef(.simple);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
// TODO: When items are written out into blocks depth-first, parent dirs
|
||||
// will end up getting their items distributed over many blocks, which will
|
||||
// significantly slow down reading that dir's listing. It may be worth
|
||||
// buffering some items at the Dir level before flushing them out to the
|
||||
// Thread buffer.
|
||||
|
||||
// The lock protects all of the below, and is necessary because final()
|
||||
// accesses the parent dir and may be called from other threads.
|
||||
// I'm not expecting much lock contention, but it's possible to turn
|
||||
// last_item into an atomic integer and other fields could be split up for
|
||||
// subdir use.
|
||||
lock: std.Thread.Mutex = .{},
|
||||
last_sub: ?u64 = null,
|
||||
stat: sink.Stat,
|
||||
items: u64 = 0,
|
||||
size: u64 = 0,
|
||||
blocks: u64 = 0,
|
||||
err: bool = false,
|
||||
suberr: bool = false,
|
||||
shared_size: u64 = 0,
|
||||
shared_blocks: u64 = 0,
|
||||
inodes: Inodes = Inodes.init(main.allocator),
|
||||
|
||||
const Inodes = std.AutoHashMap(u64, Inode);
|
||||
const Inode = struct {
|
||||
size: u64,
|
||||
blocks: u64,
|
||||
nlink: u32,
|
||||
nfound: u32,
|
||||
};
|
||||
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (sp == .err) d.suberr = true;
|
||||
d.last_sub = t.itemStart(sp, d.last_sub, name);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
if (stat.etype != .link) {
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
}
|
||||
d.last_sub = t.itemStart(stat.etype, d.last_sub, name);
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(stat.blocks));
|
||||
|
||||
if (stat.etype == .link) {
|
||||
const lnk = d.inodes.getOrPut(stat.ino) catch unreachable;
|
||||
if (!lnk.found_existing) lnk.value_ptr.* = .{
|
||||
.size = stat.size,
|
||||
.blocks = stat.blocks,
|
||||
.nlink = stat.nlink,
|
||||
.nfound = 1,
|
||||
} else lnk.value_ptr.nfound += 1;
|
||||
t.itemKey(.ino);
|
||||
t.cborHead(.pos, stat.ino);
|
||||
t.itemKey(.nlink);
|
||||
t.cborHead(.pos, stat.nlink);
|
||||
}
|
||||
|
||||
t.itemExt(stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, stat: *const sink.Stat) Dir {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.items += 1;
|
||||
d.size +|= stat.size;
|
||||
d.blocks +|= stat.blocks;
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir) void {
|
||||
d.lock.lock();
|
||||
defer d.lock.unlock();
|
||||
d.err = true;
|
||||
}
|
||||
|
||||
// XXX: older JSON exports did not include the nlink count and have
|
||||
// this field set to '0'. We can deal with that when importing to
|
||||
// mem_sink, but the hardlink counting algorithm used here really does need
|
||||
// that information. Current code makes sure to count such links only once
|
||||
// per dir, but does not count them towards the shared_* fields. That
|
||||
// behavior is similar to ncdu 1.x, but the difference between memory
|
||||
// import and this file export might be surprising.
|
||||
fn countLinks(d: *Dir, parent: ?*Dir) void {
|
||||
var parent_new: u32 = 0;
|
||||
var it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
d.size +|= v.size;
|
||||
d.blocks +|= v.blocks;
|
||||
if (v.nlink > 1 and v.nfound < v.nlink) {
|
||||
d.shared_size +|= v.size;
|
||||
d.shared_blocks +|= v.blocks;
|
||||
}
|
||||
|
||||
const p = parent orelse continue;
|
||||
// All contained in this dir, no need to keep this entry around
|
||||
if (v.nlink > 0 and v.nfound >= v.nlink) {
|
||||
p.size +|= v.size;
|
||||
p.blocks +|= v.blocks;
|
||||
_ = d.inodes.remove(kv.key_ptr.*);
|
||||
} else if (!p.inodes.contains(kv.key_ptr.*))
|
||||
parent_new += 1;
|
||||
}
|
||||
|
||||
// Merge remaining inodes into parent
|
||||
const p = parent orelse return;
|
||||
if (d.inodes.count() == 0) return;
|
||||
|
||||
// If parent is empty, just transfer
|
||||
if (p.inodes.count() == 0) {
|
||||
p.inodes.deinit();
|
||||
p.inodes = d.inodes;
|
||||
d.inodes = Inodes.init(main.allocator); // So we can deinit() without affecting parent
|
||||
// Otherwise, merge
|
||||
} else {
|
||||
p.inodes.ensureUnusedCapacity(parent_new) catch unreachable;
|
||||
it = d.inodes.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const v = kv.value_ptr;
|
||||
const plnk = p.inodes.getOrPutAssumeCapacity(kv.key_ptr.*);
|
||||
if (!plnk.found_existing) plnk.value_ptr.* = v.*
|
||||
else plnk.value_ptr.*.nfound += v.nfound;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn final(d: *Dir, t: *Thread, name: []const u8, parent: ?*Dir) void {
|
||||
if (parent) |p| p.lock.lock();
|
||||
defer if (parent) |p| p.lock.unlock();
|
||||
|
||||
if (parent) |p| {
|
||||
// Different dev? Don't merge the 'inodes' sets, just count the
|
||||
// links here first so the sizes get added to the parent.
|
||||
if (p.stat.dev != d.stat.dev) d.countLinks(null);
|
||||
|
||||
p.items += d.items;
|
||||
p.size +|= d.size;
|
||||
p.blocks +|= d.blocks;
|
||||
if (d.suberr or d.err) p.suberr = true;
|
||||
|
||||
// Same dir, merge inodes
|
||||
if (p.stat.dev == d.stat.dev) d.countLinks(p);
|
||||
|
||||
p.last_sub = t.itemStart(.dir, p.last_sub, name);
|
||||
} else {
|
||||
d.countLinks(null);
|
||||
global.root_itemref = t.itemStart(.dir, null, name);
|
||||
}
|
||||
d.inodes.deinit();
|
||||
|
||||
t.itemKey(.asize);
|
||||
t.cborHead(.pos, d.stat.size);
|
||||
t.itemKey(.dsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.stat.blocks));
|
||||
if (parent == null or parent.?.stat.dev != d.stat.dev) {
|
||||
t.itemKey(.dev);
|
||||
t.cborHead(.pos, d.stat.dev);
|
||||
}
|
||||
if (d.err or d.suberr) {
|
||||
t.itemKey(.rderr);
|
||||
t.cborHead(.simple, if (d.err) 21 else 20);
|
||||
}
|
||||
t.itemKey(.cumasize);
|
||||
t.cborHead(.pos, d.size +| d.stat.size);
|
||||
t.itemKey(.cumdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.blocks +| d.stat.blocks));
|
||||
if (d.shared_size > 0) {
|
||||
t.itemKey(.shrasize);
|
||||
t.cborHead(.pos, d.shared_size);
|
||||
}
|
||||
if (d.shared_blocks > 0) {
|
||||
t.itemKey(.shrdsize);
|
||||
t.cborHead(.pos, util.blocksToSize(d.shared_blocks));
|
||||
}
|
||||
t.itemKey(.items);
|
||||
t.cborHead(.pos, d.items);
|
||||
t.itemRef(.sub, d.last_sub);
|
||||
t.itemExt(&d.stat);
|
||||
t.itemEnd();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub fn createRoot(stat: *const sink.Stat, threads: []sink.Thread) Dir {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.buf = main.allocator.alloc(u8, blockSize(0)) catch unreachable;
|
||||
}
|
||||
|
||||
return .{ .stat = stat.* };
|
||||
}
|
||||
|
||||
pub fn done(threads: []sink.Thread) void {
|
||||
for (threads) |*t| {
|
||||
t.sink.bin.flush(0);
|
||||
main.allocator.free(t.sink.bin.buf);
|
||||
}
|
||||
|
||||
while (std.mem.endsWith(u8, global.index.items, &[1]u8{0}**8))
|
||||
global.index.shrinkRetainingCapacity(global.index.items.len - 8);
|
||||
global.index.appendSlice(main.allocator, &bigu64(global.root_itemref)) catch unreachable;
|
||||
global.index.appendSlice(main.allocator, &blockHeader(1, @intCast(global.index.items.len + 4))) catch unreachable;
|
||||
global.index.items[0..4].* = blockHeader(1, @intCast(global.index.items.len));
|
||||
global.fd.writeAll(global.index.items) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.index.clearAndFree(main.allocator);
|
||||
|
||||
global.fd.close();
|
||||
}
|
||||
|
||||
pub fn setupOutput(fd: std.fs.File) void {
|
||||
global.fd = fd;
|
||||
fd.writeAll(SIGNATURE) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
global.file_off = 8;
|
||||
|
||||
// Placeholder for the index block header.
|
||||
global.index.appendSlice(main.allocator, "aaaa") catch unreachable;
|
||||
}
|
||||
521
src/bin_reader.zig
Normal file
521
src/bin_reader.zig
Normal file
|
|
@ -0,0 +1,521 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const util = @import("util.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const CborMajor = bin_export.CborMajor;
|
||||
const ItemKey = bin_export.ItemKey;
|
||||
|
||||
// Two ways to read a bin export:
|
||||
//
|
||||
// 1. Streaming import
|
||||
// - Read blocks sequentially, assemble items into model.Entry's and stitch
|
||||
// them together on the go.
|
||||
// - Does not use the sink.zig API, since sub-level items are read before their parent dirs.
|
||||
// - Useful when:
|
||||
// - User attempts to do a refresh or delete while browsing a file through (2)
|
||||
// - Reading from a stream
|
||||
//
|
||||
// 2. Random access browsing
|
||||
// - Read final block first to get the root item, then have browser.zig fetch
|
||||
// dir listings from this file.
|
||||
// - The default reader mode, requires much less memory than (1) and provides
|
||||
// a snappier first-browsing experience.
|
||||
//
|
||||
// The approach from (2) can also be used to walk through the entire directory
|
||||
// tree and stream it to sink.zig (either for importing or converting to JSON).
|
||||
// That would allow for better code reuse and low-memory conversion, but
|
||||
// performance will not be as good as a direct streaming read. Needs
|
||||
// benchmarks.
|
||||
//
|
||||
// This file only implements (2) at the moment.
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
var index: []u8 = undefined;
|
||||
var blocks: [8]Block = [1]Block{.{}}**8;
|
||||
var counter: u64 = 0;
|
||||
|
||||
// Last itemref being read/parsed. This is a hack to provide *some* context on error.
|
||||
// Providing more context mainly just bloats the binary and decreases
|
||||
// performance for fairly little benefit. Nobody's going to debug a corrupted export.
|
||||
var lastitem: ?u64 = null;
|
||||
};
|
||||
|
||||
|
||||
const Block = struct {
|
||||
num: u32 = std.math.maxInt(u32),
|
||||
last: u64 = 0,
|
||||
data: []u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
inline fn bigu16(v: [2]u8) u16 { return std.mem.bigToNative(u16, @bitCast(v)); }
|
||||
inline fn bigu32(v: [4]u8) u32 { return std.mem.bigToNative(u32, @bitCast(v)); }
|
||||
inline fn bigu64(v: [8]u8) u64 { return std.mem.bigToNative(u64, @bitCast(v)); }
|
||||
|
||||
fn die() noreturn {
|
||||
@branchHint(.cold);
|
||||
if (global.lastitem) |e| ui.die("Error reading item {x} from file\n", .{e})
|
||||
else ui.die("Error reading from file\n", .{});
|
||||
}
|
||||
|
||||
|
||||
fn readBlock(num: u32) []const u8 {
|
||||
// Simple linear search, only suitable if we keep the number of in-memory blocks small.
|
||||
var block: *Block = &global.blocks[0];
|
||||
for (&global.blocks) |*b| {
|
||||
if (b.num == num) {
|
||||
if (b.last != global.counter) {
|
||||
global.counter += 1;
|
||||
b.last = global.counter;
|
||||
}
|
||||
return b.data;
|
||||
}
|
||||
if (block.last > b.last) block = b;
|
||||
}
|
||||
if (block.num != std.math.maxInt(u32))
|
||||
main.allocator.free(block.data);
|
||||
block.num = num;
|
||||
global.counter += 1;
|
||||
block.last = global.counter;
|
||||
|
||||
if (num > global.index.len/8 - 1) die();
|
||||
const offlen = bigu64(global.index[num*8..][0..8].*);
|
||||
const off = offlen >> 24;
|
||||
const len = offlen & 0xffffff;
|
||||
if (len <= 12) die();
|
||||
|
||||
// Only read the compressed data part, assume block header, number and footer are correct.
|
||||
const buf = main.allocator.alloc(u8, @intCast(len - 12)) catch unreachable;
|
||||
defer main.allocator.free(buf);
|
||||
const rdlen = global.fd.preadAll(buf, off + 8)
|
||||
catch |e| ui.die("Error reading from file: {s}\n", .{ui.errorString(e)});
|
||||
if (rdlen != buf.len) die();
|
||||
|
||||
const rawlen = c.ZSTD_getFrameContentSize(buf.ptr, buf.len);
|
||||
if (rawlen <= 0 or rawlen >= (1<<24)) die();
|
||||
block.data = main.allocator.alloc(u8, @intCast(rawlen)) catch unreachable;
|
||||
|
||||
const res = c.ZSTD_decompress(block.data.ptr, block.data.len, buf.ptr, buf.len);
|
||||
if (res != block.data.len) ui.die("Error decompressing block {} (expected {} got {})\n", .{ num, block.data.len, res });
|
||||
|
||||
return block.data;
|
||||
}
|
||||
|
||||
|
||||
const CborReader = struct {
|
||||
buf: []const u8,
|
||||
|
||||
fn head(r: *CborReader) CborVal {
|
||||
if (r.buf.len < 1) die();
|
||||
var v = CborVal{
|
||||
.rd = r,
|
||||
.major = @enumFromInt(r.buf[0] >> 5),
|
||||
.indef = false,
|
||||
.arg = 0,
|
||||
};
|
||||
switch (r.buf[0] & 0x1f) {
|
||||
0x00...0x17 => |n| {
|
||||
v.arg = n;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
0x18 => {
|
||||
if (r.buf.len < 2) die();
|
||||
v.arg = r.buf[1];
|
||||
r.buf = r.buf[2..];
|
||||
},
|
||||
0x19 => {
|
||||
if (r.buf.len < 3) die();
|
||||
v.arg = bigu16(r.buf[1..3].*);
|
||||
r.buf = r.buf[3..];
|
||||
},
|
||||
0x1a => {
|
||||
if (r.buf.len < 5) die();
|
||||
v.arg = bigu32(r.buf[1..5].*);
|
||||
r.buf = r.buf[5..];
|
||||
},
|
||||
0x1b => {
|
||||
if (r.buf.len < 9) die();
|
||||
v.arg = bigu64(r.buf[1..9].*);
|
||||
r.buf = r.buf[9..];
|
||||
},
|
||||
0x1f => switch (v.major) {
|
||||
.bytes, .text, .array, .map, .simple => {
|
||||
v.indef = true;
|
||||
r.buf = r.buf[1..];
|
||||
},
|
||||
else => die(),
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
// Read the next CBOR value, skipping any tags
|
||||
fn next(r: *CborReader) CborVal {
|
||||
while (true) {
|
||||
const v = r.head();
|
||||
if (v.major != .tag) return v;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const CborVal = struct {
|
||||
rd: *CborReader,
|
||||
major: CborMajor,
|
||||
indef: bool,
|
||||
arg: u64,
|
||||
|
||||
fn end(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.indef;
|
||||
}
|
||||
|
||||
fn int(v: *const CborVal, T: type) T {
|
||||
switch (v.major) {
|
||||
.pos => return std.math.cast(T, v.arg) orelse die(),
|
||||
.neg => {
|
||||
if (std.math.minInt(T) == 0) die();
|
||||
if (v.arg > std.math.maxInt(T)) die();
|
||||
return -@as(T, @intCast(v.arg)) + (-1);
|
||||
},
|
||||
else => die(),
|
||||
}
|
||||
}
|
||||
|
||||
fn isTrue(v: *const CborVal) bool {
|
||||
return v.major == .simple and v.arg == 21;
|
||||
}
|
||||
|
||||
// Read either a byte or text string.
|
||||
// Doesn't validate UTF-8 strings, doesn't support indefinite-length strings.
|
||||
fn bytes(v: *const CborVal) []const u8 {
|
||||
if (v.indef or (v.major != .bytes and v.major != .text)) die();
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
defer v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
return v.rd.buf[0..@intCast(v.arg)];
|
||||
}
|
||||
|
||||
// Skip current value.
|
||||
fn skip(v: *const CborVal) void {
|
||||
// indefinite-length bytes, text, array or map; skip till break marker.
|
||||
if (v.major != .simple and v.indef) {
|
||||
while (true) {
|
||||
const n = v.rd.next();
|
||||
if (n.end()) return;
|
||||
n.skip();
|
||||
}
|
||||
}
|
||||
switch (v.major) {
|
||||
.bytes, .text => {
|
||||
if (v.rd.buf.len < v.arg) die();
|
||||
v.rd.buf = v.rd.buf[@intCast(v.arg)..];
|
||||
},
|
||||
.array => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg)) |_| v.rd.next().skip();
|
||||
},
|
||||
.map => {
|
||||
if (v.arg > (1<<24)) die();
|
||||
for (0..@intCast(v.arg*|2)) |_| v.rd.next().skip();
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn etype(v: *const CborVal) model.EType {
|
||||
const n = v.int(i32);
|
||||
return std.meta.intToEnum(model.EType, n)
|
||||
catch if (n < 0) .pattern else .nonreg;
|
||||
}
|
||||
|
||||
fn itemref(v: *const CborVal, cur: u64) u64 {
|
||||
if (v.major == .pos) return v.arg;
|
||||
if (v.major == .neg) {
|
||||
if (v.arg >= (cur & 0xffffff)) die();
|
||||
return cur - v.arg - 1;
|
||||
}
|
||||
return die();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
test "CBOR int parsing" {
|
||||
inline for (.{
|
||||
.{ .in = "\x00", .t = u1, .exp = 0 },
|
||||
.{ .in = "\x01", .t = u1, .exp = 1 },
|
||||
.{ .in = "\x18\x18", .t = u8, .exp = 0x18 },
|
||||
.{ .in = "\x18\xff", .t = u8, .exp = 0xff },
|
||||
.{ .in = "\x19\x07\xff", .t = u64, .exp = 0x7ff },
|
||||
.{ .in = "\x19\xff\xff", .t = u64, .exp = 0xffff },
|
||||
.{ .in = "\x1a\x00\x01\x00\x00", .t = u64, .exp = 0x10000 },
|
||||
.{ .in = "\x1b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.maxInt(i64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = u64, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.maxInt(u64) },
|
||||
.{ .in = "\x20", .t = i1, .exp = -1 },
|
||||
.{ .in = "\x38\x18", .t = i8, .exp = -0x19 },
|
||||
.{ .in = "\x39\x01\xf3", .t = i16, .exp = -500 },
|
||||
.{ .in = "\x3a\xfe\xdc\xba\x97", .t = i33, .exp = -0xfedc_ba98 },
|
||||
.{ .in = "\x3b\x7f\xff\xff\xff\xff\xff\xff\xff", .t = i64, .exp = std.math.minInt(i64) },
|
||||
.{ .in = "\x3b\xff\xff\xff\xff\xff\xff\xff\xff", .t = i65, .exp = std.math.minInt(i65) },
|
||||
}) |t| {
|
||||
var r = CborReader{.buf = t.in};
|
||||
try std.testing.expectEqual(@as(t.t, t.exp), r.next().int(t.t));
|
||||
try std.testing.expectEqual(0, r.buf.len);
|
||||
}
|
||||
}
|
||||
|
||||
test "CBOR string parsing" {
|
||||
var r = CborReader{.buf="\x40"};
|
||||
try std.testing.expectEqualStrings("", r.next().bytes());
|
||||
r.buf = "\x45\x00\x01\x02\x03\x04x";
|
||||
try std.testing.expectEqualStrings("\x00\x01\x02\x03\x04", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("x", r.buf);
|
||||
r.buf = "\x78\x241234567890abcdefghijklmnopqrstuvwxyz-end";
|
||||
try std.testing.expectEqualStrings("1234567890abcdefghijklmnopqrstuvwxyz", r.next().bytes());
|
||||
try std.testing.expectEqualStrings("-end", r.buf);
|
||||
}
|
||||
|
||||
test "CBOR skip parsing" {
|
||||
inline for (.{
|
||||
"\x00",
|
||||
"\x40",
|
||||
"\x41a",
|
||||
"\x5f\xff",
|
||||
"\x5f\x41a\xff",
|
||||
"\x80",
|
||||
"\x81\x00",
|
||||
"\x9f\xff",
|
||||
"\x9f\x9f\xff\xff",
|
||||
"\x9f\x9f\x81\x00\xff\xff",
|
||||
"\xa0",
|
||||
"\xa1\x00\x01",
|
||||
"\xbf\xff",
|
||||
"\xbf\xc0\x00\x9f\xff\xff",
|
||||
}) |s| {
|
||||
var r = CborReader{.buf = s ++ "garbage"};
|
||||
r.next().skip();
|
||||
try std.testing.expectEqualStrings(r.buf, "garbage");
|
||||
}
|
||||
}
|
||||
|
||||
const ItemParser = struct {
|
||||
r: CborReader,
|
||||
len: ?u64 = null,
|
||||
|
||||
const Field = struct {
|
||||
key: ItemKey,
|
||||
val: CborVal,
|
||||
};
|
||||
|
||||
fn init(buf: []const u8) ItemParser {
|
||||
var r = ItemParser{.r = .{.buf = buf}};
|
||||
const head = r.r.next();
|
||||
if (head.major != .map) die();
|
||||
if (!head.indef) r.len = head.arg;
|
||||
return r;
|
||||
}
|
||||
|
||||
fn key(r: *ItemParser) ?CborVal {
|
||||
if (r.len) |*l| {
|
||||
if (l.* == 0) return null;
|
||||
l.* -= 1;
|
||||
return r.r.next();
|
||||
} else {
|
||||
const v = r.r.next();
|
||||
return if (v.end()) null else v;
|
||||
}
|
||||
}
|
||||
|
||||
// Skips over any fields that don't fit into an ItemKey.
|
||||
fn next(r: *ItemParser) ?Field {
|
||||
while (r.key()) |k| {
|
||||
if (k.major == .pos and k.arg <= std.math.maxInt(@typeInfo(ItemKey).@"enum".tag_type)) return .{
|
||||
.key = @enumFromInt(k.arg),
|
||||
.val = r.r.next(),
|
||||
} else {
|
||||
k.skip();
|
||||
r.r.next().skip();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
// Returned buffer is valid until the next readItem().
|
||||
fn readItem(ref: u64) ItemParser {
|
||||
global.lastitem = ref;
|
||||
if (ref >= (1 << (24 + 32))) die();
|
||||
const block = readBlock(@intCast(ref >> 24));
|
||||
if ((ref & 0xffffff) >= block.len) die();
|
||||
return ItemParser.init(block[@intCast(ref & 0xffffff)..]);
|
||||
}
|
||||
|
||||
const Import = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
fields: Fields = .{},
|
||||
p: ItemParser = undefined,
|
||||
|
||||
const Fields = struct {
|
||||
name: []const u8 = "",
|
||||
rderr: bool = false,
|
||||
prev: ?u64 = null,
|
||||
sub: ?u64 = null,
|
||||
};
|
||||
|
||||
fn readFields(ctx: *Import, ref: u64) void {
|
||||
ctx.p = readItem(ref);
|
||||
var hastype = false;
|
||||
|
||||
while (ctx.p.next()) |kv| switch (kv.key) {
|
||||
.type => {
|
||||
ctx.stat.etype = kv.val.etype();
|
||||
hastype = true;
|
||||
},
|
||||
.name => ctx.fields.name = kv.val.bytes(),
|
||||
.prev => ctx.fields.prev = kv.val.itemref(ref),
|
||||
.asize => ctx.stat.size = kv.val.int(u64),
|
||||
.dsize => ctx.stat.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.dev => ctx.stat.dev = kv.val.int(u64),
|
||||
.rderr => ctx.fields.rderr = kv.val.isTrue(),
|
||||
.sub => ctx.fields.sub = kv.val.itemref(ref),
|
||||
.ino => ctx.stat.ino = kv.val.int(u64),
|
||||
.nlink => ctx.stat.nlink = kv.val.int(u31),
|
||||
.uid => { ctx.stat.ext.uid = kv.val.int(u32); ctx.stat.ext.pack.hasuid = true; },
|
||||
.gid => { ctx.stat.ext.gid = kv.val.int(u32); ctx.stat.ext.pack.hasgid = true; },
|
||||
.mode => { ctx.stat.ext.mode = kv.val.int(u16); ctx.stat.ext.pack.hasmode = true; },
|
||||
.mtime => { ctx.stat.ext.mtime = kv.val.int(u64); ctx.stat.ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
|
||||
if (!hastype) die();
|
||||
if (ctx.fields.name.len == 0) die();
|
||||
}
|
||||
|
||||
fn import(ctx: *Import, ref: u64, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.fields = .{};
|
||||
ctx.readFields(ref);
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const prev = ctx.fields.prev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, ctx.fields.name, &ctx.stat)
|
||||
else sink.createRoot(ctx.fields.name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.fields.rderr) dir.setReadError(ctx.sink);
|
||||
|
||||
ctx.fields.prev = ctx.fields.sub;
|
||||
while (ctx.fields.prev) |n| ctx.import(n, dir, ctx.stat.dev);
|
||||
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
ctx.fields.prev = prev;
|
||||
|
||||
} else {
|
||||
const p = parent orelse die();
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
p.addSpecial(ctx.sink, ctx.fields.name, ctx.stat.etype)
|
||||
else
|
||||
p.addStat(ctx.sink, ctx.fields.name, &ctx.stat);
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve an itemref and return a newly allocated entry.
|
||||
// Dir.parent and Link.next/prev are left uninitialized.
|
||||
pub fn get(ref: u64, alloc: std.mem.Allocator) *model.Entry {
|
||||
const parser = readItem(ref);
|
||||
|
||||
var etype: ?model.EType = null;
|
||||
var name: []const u8 = "";
|
||||
var p = parser;
|
||||
var ext = model.Ext{};
|
||||
while (p.next()) |kv| {
|
||||
switch (kv.key) {
|
||||
.type => etype = kv.val.etype(),
|
||||
.name => name = kv.val.bytes(),
|
||||
.uid => { ext.uid = kv.val.int(u32); ext.pack.hasuid = true; },
|
||||
.gid => { ext.gid = kv.val.int(u32); ext.pack.hasgid = true; },
|
||||
.mode => { ext.mode = kv.val.int(u16); ext.pack.hasmode = true; },
|
||||
.mtime => { ext.mtime = kv.val.int(u64); ext.pack.hasmtime = true; },
|
||||
else => kv.val.skip(),
|
||||
}
|
||||
}
|
||||
if (etype == null or name.len == 0) die();
|
||||
|
||||
var entry = model.Entry.create(alloc, etype.?, main.config.extended and !ext.isEmpty(), name);
|
||||
entry.next = .{ .ref = std.math.maxInt(u64) };
|
||||
if (entry.ext()) |e| e.* = ext;
|
||||
if (entry.dir()) |d| d.sub = .{ .ref = std.math.maxInt(u64) };
|
||||
p = parser;
|
||||
while (p.next()) |kv| switch (kv.key) {
|
||||
.prev => entry.next = .{ .ref = kv.val.itemref(ref) },
|
||||
.asize => { if (entry.pack.etype != .dir) entry.size = kv.val.int(u64); },
|
||||
.dsize => { if (entry.pack.etype != .dir) entry.pack.blocks = @intCast(kv.val.int(u64)/512); },
|
||||
|
||||
.rderr => { if (entry.dir()) |d| {
|
||||
if (kv.val.isTrue()) d.pack.err = true
|
||||
else d.pack.suberr = true;
|
||||
} },
|
||||
.dev => { if (entry.dir()) |d| d.pack.dev = model.devices.getId(kv.val.int(u64)); },
|
||||
.cumasize => entry.size = kv.val.int(u64),
|
||||
.cumdsize => entry.pack.blocks = @intCast(kv.val.int(u64)/512),
|
||||
.shrasize => { if (entry.dir()) |d| d.shared_size = kv.val.int(u64); },
|
||||
.shrdsize => { if (entry.dir()) |d| d.shared_blocks = kv.val.int(u64)/512; },
|
||||
.items => { if (entry.dir()) |d| d.items = util.castClamp(u32, kv.val.int(u64)); },
|
||||
.sub => { if (entry.dir()) |d| d.sub = .{ .ref = kv.val.itemref(ref) }; },
|
||||
|
||||
.ino => { if (entry.link()) |l| l.ino = kv.val.int(u64); },
|
||||
.nlink => { if (entry.link()) |l| l.pack.nlink = kv.val.int(u31); },
|
||||
else => kv.val.skip(),
|
||||
};
|
||||
return entry;
|
||||
}
|
||||
|
||||
pub fn getRoot() u64 {
|
||||
return bigu64(global.index[global.index.len-8..][0..8].*);
|
||||
}
|
||||
|
||||
// Walk through the directory tree in depth-first order and pass results to sink.zig.
|
||||
// Depth-first is required for JSON export, but more efficient strategies are
|
||||
// possible for other sinks. Parallel import is also an option, but that's more
|
||||
// complex and likely less efficient than a streaming import.
|
||||
pub fn import() void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
var ctx = Import{.sink = &sink_threads[0]};
|
||||
ctx.import(getRoot(), null, 0);
|
||||
sink.done();
|
||||
}
|
||||
|
||||
// Assumes that the file signature has already been read and validated.
|
||||
pub fn open(fd: std.fs.File) !void {
|
||||
global.fd = fd;
|
||||
|
||||
// Do not use fd.getEndPos() because that requires newer kernels supporting statx() #261.
|
||||
try fd.seekFromEnd(0);
|
||||
const size = try fd.getPos();
|
||||
if (size < 16) return error.EndOfStream;
|
||||
|
||||
// Read index block
|
||||
var buf: [4]u8 = undefined;
|
||||
if (try fd.preadAll(&buf, size - 4) != 4) return error.EndOfStream;
|
||||
const index_header = bigu32(buf);
|
||||
if ((index_header >> 28) != 1 or (index_header & 7) != 0) die();
|
||||
const len = (index_header & 0x0fffffff) - 8; // excluding block header & footer
|
||||
if (len >= size) die();
|
||||
global.index = main.allocator.alloc(u8, len) catch unreachable;
|
||||
if (try fd.preadAll(global.index, size - len - 4) != global.index.len) return error.EndOfStream;
|
||||
}
|
||||
426
src/browser.zig
426
src/browser.zig
|
|
@ -1,25 +1,35 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const bin_reader = @import("bin_reader.zig");
|
||||
const delete = @import("delete.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @cImport(@cInclude("time.h"));
|
||||
const c = @import("c.zig").c;
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Currently opened directory.
|
||||
pub var dir_parent: *model.Dir = undefined;
|
||||
pub var dir_path: [:0]u8 = undefined;
|
||||
var dir_parents: std.ArrayListUnmanaged(model.Ref) = .empty;
|
||||
var dir_alloc = std.heap.ArenaAllocator.init(main.allocator);
|
||||
|
||||
// Used to keep track of which dir is which ref, so we can enter it.
|
||||
// Only used for binreader browsing.
|
||||
var dir_refs: std.ArrayListUnmanaged(struct { ptr: *model.Dir, ref: u64 }) = .empty;
|
||||
|
||||
// Sorted list of all items in the currently opened directory.
|
||||
// (first item may be null to indicate the "parent directory" item)
|
||||
var dir_items = std.ArrayList(?*model.Entry).init(main.allocator);
|
||||
var dir_items: std.ArrayListUnmanaged(?*model.Entry) = .empty;
|
||||
|
||||
var dir_max_blocks: u64 = 0;
|
||||
var dir_max_size: u64 = 0;
|
||||
var dir_has_shared: bool = false;
|
||||
var dir_loading: u64 = 0;
|
||||
|
||||
// Index into dir_items that is currently selected.
|
||||
var cursor_idx: usize = 0;
|
||||
|
|
@ -32,28 +42,28 @@ const View = struct {
|
|||
|
||||
// The hash(name) of the selected entry (cursor), this is used to derive
|
||||
// cursor_idx after sorting or changing directory.
|
||||
// (collisions may cause the wrong entry to be selected, but dealing with
|
||||
// string allocations sucks and I expect collisions to be rare enough)
|
||||
cursor_hash: u64 = 0,
|
||||
|
||||
fn hashEntry(entry: ?*model.Entry) u64 {
|
||||
return if (entry) |e| std.hash.Wyhash.hash(0, e.name()) else 0;
|
||||
fn dirHash() u64 {
|
||||
return std.hash.Wyhash.hash(0, dir_path);
|
||||
}
|
||||
|
||||
// Update cursor_hash and save the current view to the hash table.
|
||||
fn save(self: *@This()) void {
|
||||
self.cursor_hash = if (dir_items.items.len == 0) 0
|
||||
else hashEntry(dir_items.items[cursor_idx]);
|
||||
opened_dir_views.put(@ptrToInt(dir_parent), self.*) catch {};
|
||||
else if (dir_items.items[cursor_idx]) |e| e.nameHash()
|
||||
else 0;
|
||||
opened_dir_views.put(dirHash(), self.*) catch {};
|
||||
}
|
||||
|
||||
// Should be called after dir_parent or dir_items has changed, will load the last saved view and find the proper cursor_idx.
|
||||
fn load(self: *@This(), sel: ?*const model.Entry) void {
|
||||
if (opened_dir_views.get(@ptrToInt(dir_parent))) |v| self.* = v
|
||||
fn load(self: *@This(), sel: u64) void {
|
||||
if (opened_dir_views.get(dirHash())) |v| self.* = v
|
||||
else self.* = @This(){};
|
||||
cursor_idx = 0;
|
||||
for (dir_items.items) |e, i| {
|
||||
if (if (sel != null) e == sel else self.cursor_hash == hashEntry(e)) {
|
||||
for (dir_items.items, 0..) |e, i| {
|
||||
const h = if (e) |x| x.nameHash() else 0;
|
||||
if (if (sel != 0) h == sel else self.cursor_hash == h) {
|
||||
cursor_idx = i;
|
||||
break;
|
||||
}
|
||||
|
|
@ -64,10 +74,8 @@ const View = struct {
|
|||
var current_view = View{};
|
||||
|
||||
// Directories the user has browsed to before, and which item was last selected.
|
||||
// The key is the @ptrToInt() of the opened *Dir; An int because the pointer
|
||||
// itself may have gone stale after deletion or refreshing. They're only for
|
||||
// lookups, not dereferencing.
|
||||
var opened_dir_views = std.AutoHashMap(usize, View).init(main.allocator);
|
||||
// The key is the hash of dir_path;
|
||||
var opened_dir_views = std.AutoHashMap(u64, View).init(main.allocator);
|
||||
|
||||
fn sortIntLt(a: anytype, b: @TypeOf(a)) ?bool {
|
||||
return if (a == b) null else if (main.config.sort_order == .asc) a < b else a > b;
|
||||
|
|
@ -77,47 +85,47 @@ fn sortLt(_: void, ap: ?*model.Entry, bp: ?*model.Entry) bool {
|
|||
const a = ap.?;
|
||||
const b = bp.?;
|
||||
|
||||
if (main.config.sort_dirsfirst and a.isDirectory() != b.isDirectory())
|
||||
return a.isDirectory();
|
||||
if (main.config.sort_dirsfirst and a.pack.etype.isDirectory() != b.pack.etype.isDirectory())
|
||||
return a.pack.etype.isDirectory();
|
||||
|
||||
switch (main.config.sort_col) {
|
||||
.name => {}, // name sorting is the fallback
|
||||
.blocks => {
|
||||
if (sortIntLt(a.blocks, b.blocks)) |r| return r;
|
||||
if (sortIntLt(a.pack.blocks, b.pack.blocks)) |r| return r;
|
||||
if (sortIntLt(a.size, b.size)) |r| return r;
|
||||
},
|
||||
.size => {
|
||||
if (sortIntLt(a.size, b.size)) |r| return r;
|
||||
if (sortIntLt(a.blocks, b.blocks)) |r| return r;
|
||||
if (sortIntLt(a.pack.blocks, b.pack.blocks)) |r| return r;
|
||||
},
|
||||
.items => {
|
||||
const ai = if (a.dir()) |d| d.items else 0;
|
||||
const bi = if (b.dir()) |d| d.items else 0;
|
||||
if (sortIntLt(ai, bi)) |r| return r;
|
||||
if (sortIntLt(a.blocks, b.blocks)) |r| return r;
|
||||
if (sortIntLt(a.pack.blocks, b.pack.blocks)) |r| return r;
|
||||
if (sortIntLt(a.size, b.size)) |r| return r;
|
||||
},
|
||||
.mtime => {
|
||||
if (!a.isext or !b.isext) return a.isext;
|
||||
if (!a.pack.isext or !b.pack.isext) return a.pack.isext;
|
||||
if (sortIntLt(a.ext().?.mtime, b.ext().?.mtime)) |r| return r;
|
||||
},
|
||||
}
|
||||
|
||||
const an = a.name();
|
||||
const bn = b.name();
|
||||
return if (main.config.sort_order == .asc) util.strnatcmp(an, bn) == .lt
|
||||
else util.strnatcmp(bn, an) == .lt;
|
||||
const an = (if (main.config.sort_order == .asc) a else b).name();
|
||||
const bn = (if (main.config.sort_order == .asc) b else a).name();
|
||||
return if (main.config.sort_natural) util.strnatcmp(an, bn) == .lt
|
||||
else std.mem.lessThan(u8, an, bn);
|
||||
}
|
||||
|
||||
// Should be called when:
|
||||
// - config.sort_* changes
|
||||
// - dir_items changes (i.e. from loadDir())
|
||||
// - files in this dir have changed in a way that affects their ordering
|
||||
fn sortDir(next_sel: ?*const model.Entry) void {
|
||||
fn sortDir(next_sel: u64) void {
|
||||
// No need to sort the first item if that's the parent dir reference,
|
||||
// excluding that allows sortLt() to ignore null values.
|
||||
const lst = dir_items.items[(if (dir_items.items.len > 0 and dir_items.items[0] == null) @as(usize, 1) else 0)..];
|
||||
std.sort.sort(?*model.Entry, lst, @as(void, undefined), sortLt);
|
||||
std.mem.sort(?*model.Entry, lst, {}, sortLt);
|
||||
current_view.load(next_sel);
|
||||
}
|
||||
|
||||
|
|
@ -125,32 +133,103 @@ fn sortDir(next_sel: ?*const model.Entry) void {
|
|||
// - dir_parent changes (i.e. we change directory)
|
||||
// - config.show_hidden changes
|
||||
// - files in this dir have been added or removed
|
||||
pub fn loadDir(next_sel: ?*const model.Entry) void {
|
||||
pub fn loadDir(next_sel: u64) void {
|
||||
// XXX: The current dir listing is wiped before loading the new one, which
|
||||
// causes the screen to flicker a bit when the loading indicator is drawn.
|
||||
// Should we keep the old listing around?
|
||||
main.event_delay_timer.reset();
|
||||
_ = dir_alloc.reset(.free_all);
|
||||
dir_items.shrinkRetainingCapacity(0);
|
||||
dir_refs.shrinkRetainingCapacity(0);
|
||||
dir_max_size = 1;
|
||||
dir_max_blocks = 1;
|
||||
dir_has_shared = false;
|
||||
|
||||
if (dir_parent != model.root)
|
||||
dir_items.append(null) catch unreachable;
|
||||
var it = dir_parent.sub;
|
||||
while (it) |e| {
|
||||
if (e.blocks > dir_max_blocks) dir_max_blocks = e.blocks;
|
||||
if (dir_parents.items.len > 1)
|
||||
dir_items.append(main.allocator, null) catch unreachable;
|
||||
var ref = dir_parent.sub;
|
||||
while (!ref.isNull()) {
|
||||
const e =
|
||||
if (main.config.binreader) bin_reader.get(ref.ref, dir_alloc.allocator())
|
||||
else ref.ptr.?;
|
||||
|
||||
if (e.pack.blocks > dir_max_blocks) dir_max_blocks = e.pack.blocks;
|
||||
if (e.size > dir_max_size) dir_max_size = e.size;
|
||||
const shown = main.config.show_hidden or blk: {
|
||||
const excl = if (e.file()) |f| f.excluded else false;
|
||||
const excl = switch (e.pack.etype) {
|
||||
.pattern, .otherfs, .kernfs => true,
|
||||
else => false,
|
||||
};
|
||||
const name = e.name();
|
||||
break :blk !excl and name[0] != '.' and name[name.len-1] != '~';
|
||||
};
|
||||
if (shown) {
|
||||
dir_items.append(e) catch unreachable;
|
||||
if (e.dir()) |d| if (d.shared_blocks > 0 or d.shared_size > 0) { dir_has_shared = true; };
|
||||
dir_items.append(main.allocator, e) catch unreachable;
|
||||
if (e.dir()) |d| {
|
||||
if (d.shared_blocks > 0 or d.shared_size > 0) dir_has_shared = true;
|
||||
if (main.config.binreader) dir_refs.append(main.allocator, .{ .ptr = d, .ref = ref.ref }) catch unreachable;
|
||||
}
|
||||
it = e.next;
|
||||
}
|
||||
|
||||
ref = e.next;
|
||||
dir_loading += 1;
|
||||
if ((dir_loading & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
sortDir(next_sel);
|
||||
dir_loading = 0;
|
||||
}
|
||||
|
||||
|
||||
pub fn initRoot() void {
|
||||
if (main.config.binreader) {
|
||||
const ref = bin_reader.getRoot();
|
||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse ui.die("Invalid import\n", .{});
|
||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
||||
} else {
|
||||
dir_parent = model.root;
|
||||
dir_parents.append(main.allocator, .{ .ptr = &dir_parent.entry }) catch unreachable;
|
||||
}
|
||||
dir_path = main.allocator.dupeZ(u8, dir_parent.entry.name()) catch unreachable;
|
||||
loadDir(0);
|
||||
}
|
||||
|
||||
fn enterSub(e: *model.Dir) void {
|
||||
if (main.config.binreader) {
|
||||
const ref = blk: {
|
||||
for (dir_refs.items) |r| if (r.ptr == e) break :blk r.ref;
|
||||
return;
|
||||
};
|
||||
dir_parent.entry.destroy(main.allocator);
|
||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse unreachable;
|
||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
||||
} else {
|
||||
dir_parent = e;
|
||||
dir_parents.append(main.allocator, .{ .ptr = &e.entry }) catch unreachable;
|
||||
}
|
||||
|
||||
const newpath = std.fs.path.joinZ(main.allocator, &[_][]const u8{ dir_path, e.entry.name() }) catch unreachable;
|
||||
main.allocator.free(dir_path);
|
||||
dir_path = newpath;
|
||||
}
|
||||
|
||||
fn enterParent() void {
|
||||
std.debug.assert(dir_parents.items.len > 1);
|
||||
|
||||
_ = dir_parents.pop();
|
||||
const p = dir_parents.items[dir_parents.items.len-1];
|
||||
if (main.config.binreader) {
|
||||
dir_parent.entry.destroy(main.allocator);
|
||||
dir_parent = bin_reader.get(p.ref, main.allocator).dir() orelse unreachable;
|
||||
} else
|
||||
dir_parent = p.ptr.?.dir() orelse unreachable;
|
||||
|
||||
const newpath = main.allocator.dupeZ(u8, std.fs.path.dirname(dir_path) orelse unreachable) catch unreachable;
|
||||
main.allocator.free(dir_path);
|
||||
dir_path = newpath;
|
||||
}
|
||||
|
||||
|
||||
const Row = struct {
|
||||
row: u32,
|
||||
col: u32 = 0,
|
||||
|
|
@ -162,19 +241,17 @@ const Row = struct {
|
|||
fn flag(self: *Self) void {
|
||||
defer self.col += 2;
|
||||
const item = self.item orelse return;
|
||||
const ch: u7 = ch: {
|
||||
if (item.file()) |f| {
|
||||
if (f.err) break :ch '!';
|
||||
if (f.excluded) break :ch '<';
|
||||
if (f.other_fs) break :ch '>';
|
||||
if (f.kernfs) break :ch '^';
|
||||
if (f.notreg) break :ch '@';
|
||||
} else if (item.dir()) |d| {
|
||||
if (d.err) break :ch '!';
|
||||
if (d.suberr) break :ch '.';
|
||||
if (d.sub == null) break :ch 'e';
|
||||
} else if (item.link()) |_| break :ch 'H';
|
||||
return;
|
||||
const ch: u7 = switch (item.pack.etype) {
|
||||
.dir => if (item.dir().?.pack.err) '!'
|
||||
else if (item.dir().?.pack.suberr) '.'
|
||||
else if (item.dir().?.sub.isNull()) 'e'
|
||||
else return,
|
||||
.link => 'H',
|
||||
.pattern => '<',
|
||||
.otherfs => '>',
|
||||
.kernfs => '^',
|
||||
.nonreg => '@',
|
||||
else => return,
|
||||
};
|
||||
ui.move(self.row, self.col);
|
||||
self.bg.fg(.flag);
|
||||
|
|
@ -187,7 +264,7 @@ const Row = struct {
|
|||
width += 2 + width;
|
||||
defer self.col += width;
|
||||
const item = self.item orelse return;
|
||||
const siz = if (main.config.show_blocks) util.blocksToSize(item.blocks) else item.size;
|
||||
const siz = if (main.config.show_blocks) util.blocksToSize(item.pack.blocks) else item.size;
|
||||
var shr = if (item.dir()) |d| (if (main.config.show_blocks) util.blocksToSize(d.shared_blocks) else d.shared_size) else 0;
|
||||
if (main.config.show_shared == .unique) shr = siz -| shr;
|
||||
|
||||
|
|
@ -203,7 +280,7 @@ const Row = struct {
|
|||
fn graph(self: *Self) void {
|
||||
if ((!main.config.show_graph and !main.config.show_percent) or self.col + 20 > ui.cols) return;
|
||||
|
||||
const bar_size = std.math.max(ui.cols/7, 10);
|
||||
const bar_size = @max(ui.cols/7, 10);
|
||||
defer self.col += 3
|
||||
+ (if (main.config.show_graph) bar_size else 0)
|
||||
+ (if (main.config.show_percent) @as(u32, 6) else 0)
|
||||
|
|
@ -215,30 +292,33 @@ const Row = struct {
|
|||
ui.addch('[');
|
||||
if (main.config.show_percent) {
|
||||
self.bg.fg(.num);
|
||||
ui.addprint("{d:>5.1}", .{ 100*
|
||||
if (main.config.show_blocks) @intToFloat(f32, item.blocks) / @intToFloat(f32, std.math.max(1, dir_parent.entry.blocks))
|
||||
else @intToFloat(f32, item.size) / @intToFloat(f32, std.math.max(1, dir_parent.entry.size))
|
||||
});
|
||||
var num : u64 = if (main.config.show_blocks) item.pack.blocks else item.size;
|
||||
var denom : u64 = if (main.config.show_blocks) dir_parent.entry.pack.blocks else dir_parent.entry.size;
|
||||
if (num > (1<<54)) { // avoid overflow
|
||||
num >>= 10;
|
||||
denom >>= 10;
|
||||
}
|
||||
ui.addstr(&util.fmt5dec(@intCast( @min(1000, (num * 1000 + (denom / 2)) / @max(1, denom) ))));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('%');
|
||||
}
|
||||
if (main.config.show_graph and main.config.show_percent) ui.addch(' ');
|
||||
if (main.config.show_graph) {
|
||||
var max = if (main.config.show_blocks) dir_max_blocks else dir_max_size;
|
||||
var num = if (main.config.show_blocks) item.blocks else item.size;
|
||||
var num = if (main.config.show_blocks) item.pack.blocks else item.size;
|
||||
if (max < bar_size) {
|
||||
max *= bar_size;
|
||||
num *= bar_size;
|
||||
}
|
||||
|
||||
const perblock = std.math.divFloor(u64, max, bar_size) catch unreachable;
|
||||
var i: u32 = 0;
|
||||
self.bg.fg(.graph);
|
||||
while (i < bar_size) : (i += 1) {
|
||||
const frac = std.math.min(@as(usize, 8), (num *| 8) / perblock);
|
||||
for (0..bar_size) |_| {
|
||||
const frac = @min(@as(usize, 8), (num *| 8) / perblock);
|
||||
ui.addstr(switch (main.config.graph_style) {
|
||||
.hash => ([_][:0]const u8{ " ", " ", " ", " ", " ", " ", " ", " ", "#" })[frac],
|
||||
.half => ([_][:0]const u8{ " ", " ", " ", " ", "▌", "▌", "▌", "▌", "█" })[frac],
|
||||
.eigth => ([_][:0]const u8{ " ", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█" })[frac],
|
||||
.eighth => ([_][:0]const u8{ " ", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█" })[frac],
|
||||
});
|
||||
num -|= perblock;
|
||||
}
|
||||
|
|
@ -260,12 +340,12 @@ const Row = struct {
|
|||
ui.addnum(self.bg, n);
|
||||
} else if (n < 100_000)
|
||||
ui.addnum(self.bg, n)
|
||||
else if (n < 1000_000) {
|
||||
ui.addprint("{d:>5.1}", .{ @intToFloat(f32, n) / 1000 });
|
||||
else if (n < 999_950) {
|
||||
ui.addstr(&util.fmt5dec(@intCast( (n + 50) / 100 )));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('k');
|
||||
} else if (n < 1000_000_000) {
|
||||
ui.addprint("{d:>5.1}", .{ @intToFloat(f32, n) / 1000_000 });
|
||||
} else if (n < 999_950_000) {
|
||||
ui.addstr(&util.fmt5dec(@intCast( (n + 50_000) / 100_000 )));
|
||||
self.bg.fg(.default);
|
||||
ui.addch('M');
|
||||
} else {
|
||||
|
|
@ -282,16 +362,21 @@ const Row = struct {
|
|||
if (!main.config.show_mtime or self.col + 37 > ui.cols) return;
|
||||
defer self.col += 27;
|
||||
ui.move(self.row, self.col+1);
|
||||
const ext = (if (self.item) |e| e.ext() else @as(?*model.Ext, null)) orelse dir_parent.entry.ext();
|
||||
if (ext) |e| ui.addts(self.bg, e.mtime)
|
||||
else ui.addstr(" no mtime");
|
||||
const ext = if (self.item) |e| e.ext() else dir_parent.entry.ext();
|
||||
if (ext) |e| {
|
||||
if (e.pack.hasmtime) {
|
||||
ui.addts(self.bg, e.mtime);
|
||||
return;
|
||||
}
|
||||
}
|
||||
ui.addstr(" no mtime");
|
||||
}
|
||||
|
||||
fn name(self: *Self) void {
|
||||
ui.move(self.row, self.col);
|
||||
if (self.item) |i| {
|
||||
self.bg.fg(if (i.etype == .dir) .dir else .default);
|
||||
ui.addch(if (i.isDirectory()) '/' else ' ');
|
||||
self.bg.fg(if (i.pack.etype == .dir) .dir else .default);
|
||||
ui.addch(if (i.pack.etype.isDirectory()) '/' else ' ');
|
||||
ui.addstr(ui.shorten(ui.toUtf8(i.name()), ui.cols -| self.col -| 1));
|
||||
} else {
|
||||
self.bg.fg(.dir);
|
||||
|
|
@ -315,7 +400,7 @@ const Row = struct {
|
|||
};
|
||||
|
||||
var state: enum { main, quit, help, info } = .main;
|
||||
var message: ?[:0]const u8 = null;
|
||||
var message: ?[]const [:0]const u8 = null;
|
||||
|
||||
const quit = struct {
|
||||
fn draw() void {
|
||||
|
|
@ -345,13 +430,13 @@ const info = struct {
|
|||
|
||||
var tab: Tab = .info;
|
||||
var entry: ?*model.Entry = null;
|
||||
var links: ?std.ArrayList(*model.Link) = null;
|
||||
var links: ?std.ArrayListUnmanaged(*model.Link) = null;
|
||||
var links_top: usize = 0;
|
||||
var links_idx: usize = 0;
|
||||
|
||||
fn lt(_: void, a: *model.Link, b: *model.Link) bool {
|
||||
var pa = a.path(false);
|
||||
var pb = b.path(false);
|
||||
const pa = a.path(false);
|
||||
const pb = b.path(false);
|
||||
defer main.allocator.free(pa);
|
||||
defer main.allocator.free(pb);
|
||||
return std.mem.lessThan(u8, pa, pb);
|
||||
|
|
@ -360,7 +445,7 @@ const info = struct {
|
|||
// Set the displayed entry to the currently selected item and open the tab.
|
||||
fn set(e: ?*model.Entry, t: Tab) void {
|
||||
if (e != entry) {
|
||||
if (links) |*l| l.deinit();
|
||||
if (links) |*l| l.deinit(main.allocator);
|
||||
links = null;
|
||||
links_top = 0;
|
||||
links_idx = 0;
|
||||
|
|
@ -372,31 +457,32 @@ const info = struct {
|
|||
}
|
||||
state = .info;
|
||||
tab = t;
|
||||
if (tab == .links and links == null) {
|
||||
var list = std.ArrayList(*model.Link).init(main.allocator);
|
||||
if (tab == .links and links == null and !main.config.binreader) {
|
||||
var list: std.ArrayListUnmanaged(*model.Link) = .empty;
|
||||
var l = e.?.link().?;
|
||||
while (true) {
|
||||
list.append(l) catch unreachable;
|
||||
list.append(main.allocator, l) catch unreachable;
|
||||
l = l.next;
|
||||
if (&l.entry == e)
|
||||
break;
|
||||
}
|
||||
// TODO: Zig's sort() implementation is type-generic and not very
|
||||
// small. I suspect we can get a good save on our binary size by using
|
||||
// a smaller or non-generic sort. This doesn't have to be very fast.
|
||||
std.sort.sort(*model.Link, list.items, @as(void, undefined), lt);
|
||||
for (list.items) |n,i| if (&n.entry == e) { links_idx = i; };
|
||||
std.sort.heap(*model.Link, list.items, {}, lt);
|
||||
for (list.items, 0..) |n,i| if (&n.entry == e) { links_idx = i; };
|
||||
links = list;
|
||||
}
|
||||
}
|
||||
|
||||
fn drawLinks(box: ui.Box, row: *u32, rows: u32, cols: u32) void {
|
||||
if (main.config.binreader) {
|
||||
box.move(2, 2);
|
||||
ui.addstr("This feature is not available when reading from file.");
|
||||
return;
|
||||
}
|
||||
const numrows = rows -| 4;
|
||||
if (links_idx < links_top) links_top = links_idx;
|
||||
if (links_idx >= links_top + numrows) links_top = links_idx - numrows + 1;
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < numrows) : (i += 1) {
|
||||
for (0..numrows) |i| {
|
||||
if (i + links_top >= links.?.items.len) break;
|
||||
const e = links.?.items[i+links_top];
|
||||
ui.style(if (i+links_top == links_idx) .sel else .default);
|
||||
|
|
@ -445,36 +531,49 @@ const info = struct {
|
|||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
if (e.ext()) |ext| {
|
||||
var buf: [32]u8 = undefined;
|
||||
if (ext.pack.hasmode) {
|
||||
ui.addstr("Mode: ");
|
||||
ui.style(.default);
|
||||
ui.addmode(ext.mode);
|
||||
var buf: [32]u8 = undefined;
|
||||
ui.style(.bold);
|
||||
}
|
||||
if (ext.pack.hasuid) {
|
||||
ui.addstr(" UID: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(std.fmt.bufPrintZ(&buf, "{d:<6}", .{ ext.uid }) catch unreachable);
|
||||
ui.style(.bold);
|
||||
}
|
||||
if (ext.pack.hasgid) {
|
||||
ui.addstr(" GID: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(std.fmt.bufPrintZ(&buf, "{d:<6}", .{ ext.gid }) catch unreachable);
|
||||
}
|
||||
} else {
|
||||
ui.addstr("Type: ");
|
||||
ui.style(.default);
|
||||
ui.addstr(if (e.isDirectory()) "Directory" else if (if (e.file()) |f| f.notreg else false) "Other" else "File");
|
||||
ui.addstr(switch (e.pack.etype) {
|
||||
.dir => "Directory",
|
||||
.nonreg => "Other",
|
||||
.reg, .link => "File",
|
||||
else => "Excluded",
|
||||
});
|
||||
}
|
||||
row.* += 1;
|
||||
|
||||
// Last modified
|
||||
if (e.ext()) |ext| {
|
||||
if (ext.pack.hasmtime) {
|
||||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
ui.addstr("Last modified: ");
|
||||
ui.addts(.default, ext.mtime);
|
||||
row.* += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Disk usage & Apparent size
|
||||
drawSize(box, row, " Disk usage: ", util.blocksToSize(e.blocks), if (e.dir()) |d| util.blocksToSize(d.shared_blocks) else 0);
|
||||
drawSize(box, row, " Disk usage: ", util.blocksToSize(e.pack.blocks), if (e.dir()) |d| util.blocksToSize(d.shared_blocks) else 0);
|
||||
drawSize(box, row, "Apparent size: ", e.size, if (e.dir()) |d| d.shared_size else 0);
|
||||
|
||||
// Number of items
|
||||
|
|
@ -491,7 +590,7 @@ const info = struct {
|
|||
box.move(row.*, 3);
|
||||
ui.style(.bold);
|
||||
ui.addstr(" Link count: ");
|
||||
ui.addnum(.default, model.inodes.map.get(l).?.nlink);
|
||||
ui.addnum(.default, l.pack.nlink);
|
||||
box.move(row.*, 23);
|
||||
ui.style(.bold);
|
||||
ui.addstr(" Inode: ");
|
||||
|
|
@ -509,7 +608,7 @@ const info = struct {
|
|||
// for each item. Think it's better to have a dynamic height based on
|
||||
// terminal size and scroll if the content doesn't fit.
|
||||
const rows = 5 // border + padding + close message
|
||||
+ if (tab == .links) 8 else
|
||||
+ if (tab == .links and !main.config.binreader) 8 else
|
||||
4 // name + type + disk usage + apparent size
|
||||
+ (if (e.ext() != null) @as(u32, 1) else 0) // last modified
|
||||
+ (if (e.link() != null) @as(u32, 1) else 0) // link count
|
||||
|
|
@ -522,7 +621,7 @@ const info = struct {
|
|||
var row: u32 = 2;
|
||||
|
||||
// Tabs
|
||||
if (e.etype == .link) {
|
||||
if (e.pack.etype == .link) {
|
||||
box.tab(cols-19, tab == .info, 1, "Info");
|
||||
box.tab(cols-10, tab == .links, 2, "Links");
|
||||
}
|
||||
|
|
@ -543,20 +642,20 @@ const info = struct {
|
|||
}
|
||||
|
||||
fn keyInput(ch: i32) bool {
|
||||
if (entry.?.etype == .link) {
|
||||
if (entry.?.pack.etype == .link) {
|
||||
switch (ch) {
|
||||
'1', 'h', ui.c.KEY_LEFT => { set(entry, .info); return true; },
|
||||
'2', 'l', ui.c.KEY_RIGHT => { set(entry, .links); return true; },
|
||||
'1', 'h', c.KEY_LEFT => { set(entry, .info); return true; },
|
||||
'2', 'l', c.KEY_RIGHT => { set(entry, .links); return true; },
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
if (tab == .links) {
|
||||
if (tab == .links and !main.config.binreader) {
|
||||
if (keyInputSelection(ch, &links_idx, links.?.items.len, 5))
|
||||
return true;
|
||||
if (ch == 10) { // Enter - go to selected entry
|
||||
const l = links.?.items[links_idx];
|
||||
dir_parent = l.parent;
|
||||
loadDir(&l.entry);
|
||||
loadDir(l.entry.nameHash());
|
||||
set(null, .info);
|
||||
}
|
||||
}
|
||||
|
|
@ -625,7 +724,7 @@ const help = struct {
|
|||
var i = offset*2;
|
||||
while (i < (offset + keylines)*2) : (i += 2) {
|
||||
line += 1;
|
||||
box.move(line, 13 - @intCast(u32, keys[i].len));
|
||||
box.move(line, 13 - @as(u32, @intCast(keys[i].len)));
|
||||
ui.style(.key);
|
||||
ui.addstr(keys[i]);
|
||||
ui.style(.default);
|
||||
|
|
@ -657,15 +756,12 @@ const help = struct {
|
|||
}
|
||||
|
||||
fn drawAbout(box: ui.Box) void {
|
||||
for (logo) |s, n| {
|
||||
box.move(@intCast(u32, n)+3, 12);
|
||||
for (logo, 0..) |s, n| {
|
||||
box.move(@as(u32, @intCast(n+3)), 12);
|
||||
var i: u5 = 28;
|
||||
while (true) {
|
||||
while (i != 0) : (i -= 1) {
|
||||
ui.style(if (s & (@as(u29,1)<<i) > 0) .sel else .default);
|
||||
ui.addch(' ');
|
||||
if (i == 0)
|
||||
break;
|
||||
i -= 1;
|
||||
}
|
||||
}
|
||||
ui.style(.default);
|
||||
|
|
@ -675,7 +771,7 @@ const help = struct {
|
|||
ui.style(.num);
|
||||
box.move(7, 43); ui.addstr(main.program_version);
|
||||
ui.style(.default);
|
||||
box.move(9, 9); ui.addstr("Written by Yoran Heling <projects@yorhel.nl>");
|
||||
box.move(9, 11); ui.addstr("Written by Yorhel <projects@yorhel.nl>");
|
||||
box.move(10,16); ui.addstr("https://dev.yorhel.nl/ncdu");
|
||||
}
|
||||
|
||||
|
|
@ -706,9 +802,9 @@ const help = struct {
|
|||
'1' => tab = .keys,
|
||||
'2' => tab = .flags,
|
||||
'3' => tab = .about,
|
||||
'h', ui.c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
||||
'l', ui.c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
||||
'j', ' ', ui.c.KEY_DOWN, ui.c.KEY_NPAGE => {
|
||||
'h', c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
||||
'l', c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
||||
'j', ' ', c.KEY_DOWN, c.KEY_NPAGE => {
|
||||
const max = switch (tab) {
|
||||
.keys => keys.len/2 - keylines,
|
||||
else => @as(u32, 0),
|
||||
|
|
@ -716,7 +812,7 @@ const help = struct {
|
|||
if (offset < max)
|
||||
offset += 1;
|
||||
},
|
||||
'k', ui.c.KEY_UP, ui.c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
||||
'k', c.KEY_UP, c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
||||
else => state = .main,
|
||||
}
|
||||
}
|
||||
|
|
@ -732,7 +828,10 @@ pub fn draw() void {
|
|||
ui.addch('?');
|
||||
ui.style(.hd);
|
||||
ui.addstr(" for help");
|
||||
if (main.config.imported) {
|
||||
if (main.config.binreader) {
|
||||
ui.move(0, ui.cols -| 11);
|
||||
ui.addstr("[from file]");
|
||||
} else if (main.config.imported) {
|
||||
ui.move(0, ui.cols -| 10);
|
||||
ui.addstr("[imported]");
|
||||
} else if (!main.config.can_delete.?) {
|
||||
|
|
@ -746,12 +845,7 @@ pub fn draw() void {
|
|||
ui.move(1,3);
|
||||
ui.addch(' ');
|
||||
ui.style(.dir);
|
||||
|
||||
var pathbuf = std.ArrayList(u8).init(main.allocator);
|
||||
dir_parent.fmtPath(true, &pathbuf);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&pathbuf)), ui.cols -| 5));
|
||||
pathbuf.deinit();
|
||||
|
||||
ui.addstr(ui.shorten(ui.toUtf8(dir_path), ui.cols -| 5));
|
||||
ui.style(.default);
|
||||
ui.addch(' ');
|
||||
|
||||
|
|
@ -759,7 +853,7 @@ pub fn draw() void {
|
|||
if (cursor_idx < current_view.top) current_view.top = cursor_idx;
|
||||
if (cursor_idx >= current_view.top + numrows) current_view.top = cursor_idx - numrows + 1;
|
||||
|
||||
var i: u32 = 0;
|
||||
var i: u32 = if (dir_loading > 0) numrows else 0;
|
||||
var sel_row: u32 = 0;
|
||||
while (i < numrows) : (i += 1) {
|
||||
if (i+current_view.top >= dir_items.items.len) break;
|
||||
|
|
@ -775,15 +869,23 @@ pub fn draw() void {
|
|||
ui.style(.hd);
|
||||
ui.move(ui.rows-1, 0);
|
||||
ui.hline(' ', ui.cols);
|
||||
ui.move(ui.rows-1, 1);
|
||||
ui.move(ui.rows-1, 0);
|
||||
if (dir_loading > 0) {
|
||||
ui.addstr(" Loading... ");
|
||||
ui.addnum(.hd, dir_loading);
|
||||
} else {
|
||||
ui.addch(if (main.config.show_blocks) '*' else ' ');
|
||||
ui.style(if (main.config.show_blocks) .bold_hd else .hd);
|
||||
ui.addstr("Total disk usage: ");
|
||||
ui.addsize(.hd, util.blocksToSize(dir_parent.entry.blocks));
|
||||
ui.addsize(.hd, util.blocksToSize(dir_parent.entry.pack.blocks));
|
||||
ui.style(if (main.config.show_blocks) .hd else .bold_hd);
|
||||
ui.addstr(" Apparent size: ");
|
||||
ui.addstr(" ");
|
||||
ui.addch(if (main.config.show_blocks) ' ' else '*');
|
||||
ui.addstr("Apparent size: ");
|
||||
ui.addsize(.hd, dir_parent.entry.size);
|
||||
ui.addstr(" Items: ");
|
||||
ui.addnum(.hd, dir_parent.items);
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
.main => {},
|
||||
|
|
@ -792,10 +894,14 @@ pub fn draw() void {
|
|||
.info => info.draw(),
|
||||
}
|
||||
if (message) |m| {
|
||||
const box = ui.Box.create(6, 60, "Message");
|
||||
box.move(2, 2);
|
||||
ui.addstr(m);
|
||||
box.move(4, 33);
|
||||
const box = ui.Box.create(@intCast(m.len + 5), 60, "Message");
|
||||
i = 2;
|
||||
for (m) |ln| {
|
||||
box.move(i, 2);
|
||||
ui.addstr(ln);
|
||||
i += 1;
|
||||
}
|
||||
box.move(i+1, 33);
|
||||
ui.addstr("Press any key to continue");
|
||||
}
|
||||
if (sel_row > 0) ui.move(sel_row, 0);
|
||||
|
|
@ -806,27 +912,29 @@ fn sortToggle(col: main.config.SortCol, default_order: main.config.SortOrder) vo
|
|||
else if (main.config.sort_order == .asc) main.config.sort_order = .desc
|
||||
else main.config.sort_order = .asc;
|
||||
main.config.sort_col = col;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
|
||||
fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool {
|
||||
switch (ch) {
|
||||
'j', ui.c.KEY_DOWN => {
|
||||
'j', c.KEY_DOWN => {
|
||||
if (idx.*+1 < len) idx.* += 1;
|
||||
},
|
||||
'k', ui.c.KEY_UP => {
|
||||
'k', c.KEY_UP => {
|
||||
if (idx.* > 0) idx.* -= 1;
|
||||
},
|
||||
ui.c.KEY_HOME => idx.* = 0,
|
||||
ui.c.KEY_END, ui.c.KEY_LL => idx.* = len -| 1,
|
||||
ui.c.KEY_PPAGE => idx.* = idx.* -| page,
|
||||
ui.c.KEY_NPAGE => idx.* = std.math.min(len -| 1, idx.* + page),
|
||||
c.KEY_HOME => idx.* = 0,
|
||||
c.KEY_END, c.KEY_LL => idx.* = len -| 1,
|
||||
c.KEY_PPAGE => idx.* = idx.* -| page,
|
||||
c.KEY_NPAGE => idx.* = @min(len -| 1, idx.* + page),
|
||||
else => return false,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
if (dir_loading > 0) return;
|
||||
|
||||
defer current_view.save();
|
||||
|
||||
if (message != null) {
|
||||
|
|
@ -846,23 +954,32 @@ pub fn keyInput(ch: i32) void {
|
|||
'?' => state = .help,
|
||||
'i' => if (dir_items.items.len > 0) info.set(dir_items.items[cursor_idx], .info),
|
||||
'r' => {
|
||||
if (!main.config.can_refresh.?)
|
||||
message = "Directory refresh feature disabled."
|
||||
if (main.config.binreader)
|
||||
message = &.{"Refresh feature is not available when reading from file."}
|
||||
else if (!main.config.can_refresh.? and main.config.imported)
|
||||
message = &.{"Refresh feature disabled.", "Re-run with --enable-refresh to enable this option."}
|
||||
else if (!main.config.can_refresh.?)
|
||||
message = &.{"Directory refresh feature disabled."}
|
||||
else {
|
||||
main.state = .refresh;
|
||||
scan.setupRefresh(dir_parent);
|
||||
sink.global.sink = .mem;
|
||||
mem_sink.global.root = dir_parent;
|
||||
}
|
||||
},
|
||||
'b' => {
|
||||
if (!main.config.can_shell.?)
|
||||
message = "Shell feature disabled."
|
||||
message = &.{"Shell feature disabled.", "Re-run with --enable-shell to enable this option."}
|
||||
else
|
||||
main.state = .shell;
|
||||
},
|
||||
'd' => {
|
||||
if (dir_items.items.len == 0) {
|
||||
} else if (!main.config.can_delete.?)
|
||||
message = "Deletion feature disabled."
|
||||
} else if (main.config.binreader)
|
||||
message = &.{"File deletion is not available when reading from file."}
|
||||
else if (!main.config.can_delete.? and main.config.imported)
|
||||
message = &.{"File deletion is disabled.", "Re-run with --enable-delete to enable this option."}
|
||||
else if (!main.config.can_delete.?)
|
||||
message = &.{"File deletion is disabled."}
|
||||
else if (dir_items.items[cursor_idx]) |e| {
|
||||
main.state = .delete;
|
||||
const next =
|
||||
|
|
@ -880,45 +997,46 @@ pub fn keyInput(ch: i32) void {
|
|||
'M' => if (main.config.extended) sortToggle(.mtime, .desc),
|
||||
'e' => {
|
||||
main.config.show_hidden = !main.config.show_hidden;
|
||||
loadDir(null);
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
},
|
||||
't' => {
|
||||
main.config.sort_dirsfirst = !main.config.sort_dirsfirst;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
},
|
||||
'a' => {
|
||||
main.config.show_blocks = !main.config.show_blocks;
|
||||
if (main.config.show_blocks and main.config.sort_col == .size) {
|
||||
main.config.sort_col = .blocks;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
if (!main.config.show_blocks and main.config.sort_col == .blocks) {
|
||||
main.config.sort_col = .size;
|
||||
sortDir(null);
|
||||
sortDir(0);
|
||||
}
|
||||
},
|
||||
|
||||
// Navigation
|
||||
10, 'l', ui.c.KEY_RIGHT => {
|
||||
10, 'l', c.KEY_RIGHT => {
|
||||
if (dir_items.items.len == 0) {
|
||||
} else if (dir_items.items[cursor_idx]) |e| {
|
||||
if (e.dir()) |d| {
|
||||
dir_parent = d;
|
||||
loadDir(null);
|
||||
enterSub(d);
|
||||
//dir_parent = d;
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
} else if (dir_parent.parent) |p| {
|
||||
dir_parent = p;
|
||||
loadDir(null);
|
||||
} else if (dir_parents.items.len > 1) {
|
||||
enterParent();
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
},
|
||||
'h', '<', ui.c.KEY_BACKSPACE, ui.c.KEY_LEFT => {
|
||||
if (dir_parent.parent) |p| {
|
||||
const e = dir_parent;
|
||||
dir_parent = p;
|
||||
loadDir(&e.entry);
|
||||
'h', '<', c.KEY_BACKSPACE, c.KEY_LEFT => {
|
||||
if (dir_parents.items.len > 1) {
|
||||
//const h = dir_parent.entry.nameHash();
|
||||
enterParent();
|
||||
loadDir(0);
|
||||
state = .main;
|
||||
}
|
||||
},
|
||||
|
|
|
|||
20
src/c.zig
Normal file
20
src/c.zig
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const c = @cImport({
|
||||
@cDefine("_XOPEN_SOURCE", "1"); // for wcwidth()
|
||||
@cInclude("stdio.h"); // fopen(), used to initialize ncurses
|
||||
@cInclude("string.h"); // strerror()
|
||||
@cInclude("time.h"); // strftime()
|
||||
@cInclude("wchar.h"); // wcwidth()
|
||||
@cInclude("locale.h"); // setlocale() and localeconv()
|
||||
@cInclude("fnmatch.h"); // fnmatch()
|
||||
@cInclude("unistd.h"); // getuid()
|
||||
@cInclude("sys/types.h"); // struct passwd
|
||||
@cInclude("pwd.h"); // getpwnam(), getpwuid()
|
||||
if (@import("builtin").os.tag == .linux) {
|
||||
@cInclude("sys/vfs.h"); // statfs()
|
||||
}
|
||||
@cInclude("curses.h");
|
||||
@cInclude("zstd.h");
|
||||
});
|
||||
141
src/delete.zig
141
src/delete.zig
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
|
|
@ -6,7 +6,11 @@ const main = @import("main.zig");
|
|||
const model = @import("model.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
var parent: *model.Dir = undefined;
|
||||
var entry: *model.Entry = undefined;
|
||||
|
|
@ -45,9 +49,8 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
|
|||
return true;
|
||||
|
||||
if (entry.dir()) |d| {
|
||||
var fd = dir.openDirZ(path, .{ .access_sub_paths = true, .iterate = false })
|
||||
catch |e| return err(e);
|
||||
var it = &d.sub;
|
||||
var fd = dir.openDirZ(path, .{ .no_follow = true, .iterate = false }) catch |e| return err(e);
|
||||
var it = &d.sub.ptr;
|
||||
parent = d;
|
||||
defer parent = parent.parent.?;
|
||||
while (it.*) |n| {
|
||||
|
|
@ -56,15 +59,66 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
|
|||
return true;
|
||||
}
|
||||
if (it.* == n) // item deletion failed, make sure to still advance to next
|
||||
it = &n.next;
|
||||
it = &n.next.ptr;
|
||||
}
|
||||
fd.close();
|
||||
dir.deleteDirZ(path) catch |e|
|
||||
return if (e != error.DirNotEmpty or d.sub == null) err(e) else false;
|
||||
return if (e != error.DirNotEmpty or d.sub.ptr == null) err(e) else false;
|
||||
} else
|
||||
dir.deleteFileZ(path) catch |e| return err(e);
|
||||
ptr.*.?.delStats(parent);
|
||||
ptr.* = ptr.*.?.next;
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the item has been deleted successfully.
|
||||
fn deleteCmd(path: [:0]const u8, ptr: *align(1) ?*model.Entry) bool {
|
||||
{
|
||||
var env = std.process.getEnvMap(main.allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
env.put("NCDU_DELETE_PATH", path) catch unreachable;
|
||||
|
||||
// Since we're passing the path as an environment variable and go through
|
||||
// the shell anyway, we can refer to the variable and avoid error-prone
|
||||
// shell escaping.
|
||||
const cmd = std.fmt.allocPrint(main.allocator, "{s} \"$NCDU_DELETE_PATH\"", .{main.config.delete_command}) catch unreachable;
|
||||
defer main.allocator.free(cmd);
|
||||
ui.runCmd(&.{"/bin/sh", "-c", cmd}, null, &env, true);
|
||||
}
|
||||
|
||||
const stat = scan.statAt(std.fs.cwd(), path, false, null) catch {
|
||||
// Stat failed. Would be nice to display an error if it's not
|
||||
// 'FileNotFound', but w/e, let's just assume the item has been
|
||||
// deleted as expected.
|
||||
ptr.*.?.zeroStats(parent);
|
||||
ptr.* = ptr.*.?.next.ptr;
|
||||
return true;
|
||||
};
|
||||
|
||||
// If either old or new entry is not a dir, remove & re-add entry in the in-memory tree.
|
||||
if (ptr.*.?.pack.etype != .dir or stat.etype != .dir) {
|
||||
ptr.*.?.zeroStats(parent);
|
||||
const e = model.Entry.create(main.allocator, stat.etype, main.config.extended and !stat.ext.isEmpty(), ptr.*.?.name());
|
||||
e.next.ptr = ptr.*.?.next.ptr;
|
||||
mem_sink.statToEntry(&stat, e, parent);
|
||||
ptr.* = e;
|
||||
|
||||
var it : ?*model.Dir = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
if (stat.etype != .link) {
|
||||
p.entry.pack.blocks +|= e.pack.blocks;
|
||||
p.entry.size +|= e.size;
|
||||
}
|
||||
p.items +|= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If new entry is a dir, recursively scan.
|
||||
if (ptr.*.?.dir()) |d| {
|
||||
main.state = .refresh;
|
||||
sink.global.sink = .mem;
|
||||
mem_sink.global.root = d;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -77,36 +131,52 @@ pub fn delete() ?*model.Entry {
|
|||
|
||||
// Find the pointer to this entry
|
||||
const e = entry;
|
||||
var it = &parent.sub;
|
||||
while (it.*) |n| : (it = &n.next)
|
||||
var it = &parent.sub.ptr;
|
||||
while (it.*) |n| : (it = &n.next.ptr)
|
||||
if (it.* == entry)
|
||||
break;
|
||||
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(true, &path);
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, true, &path);
|
||||
if (path.items.len == 0 or path.items[path.items.len-1] != '/')
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path), it);
|
||||
if (main.config.delete_command.len == 0) {
|
||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (it.* == e) e else next_sel;
|
||||
} else {
|
||||
const isdel = deleteCmd(util.arrayListBufZ(&path, main.allocator), it);
|
||||
model.inodes.addAllStats();
|
||||
return if (isdel) next_sel else it.*;
|
||||
}
|
||||
}
|
||||
|
||||
fn drawConfirm() void {
|
||||
browser.draw();
|
||||
const box = ui.Box.create(6, 60, "Confirm delete");
|
||||
box.move(1, 2);
|
||||
if (main.config.delete_command.len == 0) {
|
||||
ui.addstr("Are you sure you want to delete \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 21));
|
||||
ui.addch('"');
|
||||
if (entry.etype != .dir)
|
||||
if (entry.pack.etype != .dir)
|
||||
ui.addch('?')
|
||||
else {
|
||||
box.move(2, 18);
|
||||
ui.addstr("and all of its contents?");
|
||||
}
|
||||
} else {
|
||||
ui.addstr("Are you sure you want to run \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(main.config.delete_command), 25));
|
||||
ui.addch('"');
|
||||
box.move(2, 4);
|
||||
ui.addstr("on \"");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 50));
|
||||
ui.addch('"');
|
||||
}
|
||||
|
||||
box.move(4, 15);
|
||||
ui.style(if (confirm == .yes) .sel else .default);
|
||||
|
|
@ -119,20 +189,25 @@ fn drawConfirm() void {
|
|||
box.move(4, 31);
|
||||
ui.style(if (confirm == .ignore) .sel else .default);
|
||||
ui.addstr("don't ask me again");
|
||||
box.move(4, switch (confirm) {
|
||||
.yes => 15,
|
||||
.no => 25,
|
||||
.ignore => 31
|
||||
});
|
||||
}
|
||||
|
||||
fn drawProgress() void {
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(false, &path);
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
// TODO: Item counts and progress bar would be nice.
|
||||
|
||||
const box = ui.Box.create(6, 60, "Deleting...");
|
||||
box.move(2, 2);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 56));
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 56));
|
||||
box.move(4, 41);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
|
|
@ -142,16 +217,16 @@ fn drawProgress() void {
|
|||
}
|
||||
|
||||
fn drawErr() void {
|
||||
var path = std.ArrayList(u8).init(main.allocator);
|
||||
defer path.deinit();
|
||||
parent.fmtPath(false, &path);
|
||||
path.append('/') catch unreachable;
|
||||
path.appendSlice(entry.name()) catch unreachable;
|
||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer path.deinit(main.allocator);
|
||||
parent.fmtPath(main.allocator, false, &path);
|
||||
path.append(main.allocator, '/') catch unreachable;
|
||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
||||
|
||||
const box = ui.Box.create(6, 60, "Error");
|
||||
box.move(1, 2);
|
||||
ui.addstr("Error deleting ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 41));
|
||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 41));
|
||||
box.move(2, 4);
|
||||
ui.addstr(ui.errorString(error_code));
|
||||
|
||||
|
|
@ -179,11 +254,11 @@ pub fn draw() void {
|
|||
pub fn keyInput(ch: i32) void {
|
||||
switch (state) {
|
||||
.confirm => switch (ch) {
|
||||
'h', ui.c.KEY_LEFT => confirm = switch (confirm) {
|
||||
'h', c.KEY_LEFT => confirm = switch (confirm) {
|
||||
.ignore => .no,
|
||||
else => .yes,
|
||||
},
|
||||
'l', ui.c.KEY_RIGHT => confirm = switch (confirm) {
|
||||
'l', c.KEY_RIGHT => confirm = switch (confirm) {
|
||||
.yes => .no,
|
||||
else => .ignore,
|
||||
},
|
||||
|
|
@ -203,11 +278,11 @@ pub fn keyInput(ch: i32) void {
|
|||
main.state = .browse;
|
||||
},
|
||||
.err => switch (ch) {
|
||||
'h', ui.c.KEY_LEFT => error_option = switch (error_option) {
|
||||
'h', c.KEY_LEFT => error_option = switch (error_option) {
|
||||
.all => .ignore,
|
||||
else => .abort,
|
||||
},
|
||||
'l', ui.c.KEY_RIGHT => error_option = switch (error_option) {
|
||||
'l', c.KEY_RIGHT => error_option = switch (error_option) {
|
||||
.abort => .ignore,
|
||||
else => .all,
|
||||
},
|
||||
|
|
|
|||
322
src/exclude.zig
Normal file
322
src/exclude.zig
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Reference:
|
||||
// https://manned.org/glob.7
|
||||
// https://manned.org/man.b4c7391e/rsync#head17
|
||||
// https://manned.org/man.401d6ade/arch/gitignore#head4
|
||||
// Patterns:
|
||||
// Single component (none of these patterns match a '/'):
|
||||
// * -> match any character sequence
|
||||
// ? -> match single character
|
||||
// [abc] -> match a single character in the given list
|
||||
// [a-c] -> match a single character in the given range
|
||||
// [!a-c] -> match a single character not in the given range
|
||||
// # (these are currently still handled by calling libc fnmatch())
|
||||
// Anchored patterns:
|
||||
// /pattern
|
||||
// /dir/pattern
|
||||
// /dir/subdir/pattern
|
||||
// # In both rsync and gitignore, anchored patterns are relative to the
|
||||
// # directory under consideration. In ncdu they are instead anchored to
|
||||
// # the filesystem root (i.e. matched against the absolute path).
|
||||
// Non-anchored patterns:
|
||||
// somefile
|
||||
// subdir/foo
|
||||
// sub*/bar
|
||||
// # In .gitignore, non-anchored patterns with a slash are implicitely anchored,
|
||||
// # in rsync they can match anywhere in a path. We follow rsync here.
|
||||
// Dir patterns (trailing '/' matches only dirs):
|
||||
// /pattern/
|
||||
// somedir/
|
||||
// subdir/pattern/
|
||||
//
|
||||
// BREAKING CHANGE:
|
||||
// ncdu < 2.2 single-component matches may cross directory boundary, e.g.
|
||||
// 'a*b' matches 'a/b'. This is an old bug, the fix breaks compatibility with
|
||||
// old exlude patterns.
|
||||
|
||||
const Pattern = struct {
|
||||
isdir: bool = undefined,
|
||||
isliteral: bool = undefined,
|
||||
pattern: [:0]const u8,
|
||||
sub: ?*const Pattern = undefined,
|
||||
|
||||
fn isLiteral(str: []const u8) bool {
|
||||
for (str) |chr| switch (chr) {
|
||||
'[', '*', '?', '\\' => return false,
|
||||
else => {},
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
fn parse(pat_: []const u8) *const Pattern {
|
||||
var pat = std.mem.trimLeft(u8, pat_, "/");
|
||||
const top = main.allocator.create(Pattern) catch unreachable;
|
||||
var tail = top;
|
||||
tail.sub = null;
|
||||
while (std.mem.indexOfScalar(u8, pat, '/')) |idx| {
|
||||
tail.pattern = main.allocator.dupeZ(u8, pat[0..idx]) catch unreachable;
|
||||
tail.isdir = true;
|
||||
tail.isliteral = isLiteral(tail.pattern);
|
||||
pat = pat[idx+1..];
|
||||
if (std.mem.allEqual(u8, pat, '/')) return top;
|
||||
|
||||
const next = main.allocator.create(Pattern) catch unreachable;
|
||||
tail.sub = next;
|
||||
tail = next;
|
||||
tail.sub = null;
|
||||
}
|
||||
tail.pattern = main.allocator.dupeZ(u8, pat) catch unreachable;
|
||||
tail.isdir = false;
|
||||
tail.isliteral = isLiteral(tail.pattern);
|
||||
return top;
|
||||
}
|
||||
};
|
||||
|
||||
test "parse" {
|
||||
const t1 = Pattern.parse("");
|
||||
try std.testing.expectEqualStrings(t1.pattern, "");
|
||||
try std.testing.expectEqual(t1.isdir, false);
|
||||
try std.testing.expectEqual(t1.isliteral, true);
|
||||
try std.testing.expectEqual(t1.sub, null);
|
||||
|
||||
const t2 = Pattern.parse("//a//");
|
||||
try std.testing.expectEqualStrings(t2.pattern, "a");
|
||||
try std.testing.expectEqual(t2.isdir, true);
|
||||
try std.testing.expectEqual(t2.isliteral, true);
|
||||
try std.testing.expectEqual(t2.sub, null);
|
||||
|
||||
const t3 = Pattern.parse("foo*/bar.zig");
|
||||
try std.testing.expectEqualStrings(t3.pattern, "foo*");
|
||||
try std.testing.expectEqual(t3.isdir, true);
|
||||
try std.testing.expectEqual(t3.isliteral, false);
|
||||
try std.testing.expectEqualStrings(t3.sub.?.pattern, "bar.zig");
|
||||
try std.testing.expectEqual(t3.sub.?.isdir, false);
|
||||
try std.testing.expectEqual(t3.sub.?.isliteral, true);
|
||||
try std.testing.expectEqual(t3.sub.?.sub, null);
|
||||
|
||||
const t4 = Pattern.parse("/?/sub/dir/");
|
||||
try std.testing.expectEqualStrings(t4.pattern, "?");
|
||||
try std.testing.expectEqual(t4.isdir, true);
|
||||
try std.testing.expectEqual(t4.isliteral, false);
|
||||
try std.testing.expectEqualStrings(t4.sub.?.pattern, "sub");
|
||||
try std.testing.expectEqual(t4.sub.?.isdir, true);
|
||||
try std.testing.expectEqual(t4.sub.?.isliteral, true);
|
||||
try std.testing.expectEqualStrings(t4.sub.?.sub.?.pattern, "dir");
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.isdir, true);
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.isliteral, true);
|
||||
try std.testing.expectEqual(t4.sub.?.sub.?.sub, null);
|
||||
}
|
||||
|
||||
|
||||
// List of patterns to be matched at one particular level.
|
||||
// There are 2 different types of lists: those where all patterns have a
|
||||
// sub-pointer (where the pattern only matches directories at this level, and
|
||||
// the match result is only used to construct the PatternList of the
|
||||
// subdirectory) and patterns without a sub-pointer (where the match result
|
||||
// determines whether the file/dir at this level should be included or not).
|
||||
fn PatternList(comptime withsub: bool) type {
|
||||
return struct {
|
||||
literals: std.HashMapUnmanaged(*const Pattern, Val, Ctx, 80) = .{},
|
||||
wild: std.ArrayListUnmanaged(*const Pattern) = .empty,
|
||||
|
||||
// Not a fan of the map-of-arrays approach in the 'withsub' case, it
|
||||
// has a lot of extra allocations. Linking the Patterns together in a
|
||||
// list would be nicer, but that involves mutable Patterns, which in
|
||||
// turn prevents multithreaded scanning. An alternative would be a
|
||||
// sorted array + binary search, but that slows down lookups. Perhaps a
|
||||
// custom hashmap with support for duplicate keys?
|
||||
const Val = if (withsub) std.ArrayListUnmanaged(*const Pattern) else void;
|
||||
|
||||
const Ctx = struct {
|
||||
pub fn hash(_: Ctx, p: *const Pattern) u64 {
|
||||
return std.hash.Wyhash.hash(0, p.pattern);
|
||||
}
|
||||
pub fn eql(_: Ctx, a: *const Pattern, b: *const Pattern) bool {
|
||||
return std.mem.eql(u8, a.pattern, b.pattern);
|
||||
}
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn append(self: *Self, pat: *const Pattern) void {
|
||||
std.debug.assert((pat.sub != null) == withsub);
|
||||
if (pat.isliteral) {
|
||||
const e = self.literals.getOrPut(main.allocator, pat) catch unreachable;
|
||||
if (!e.found_existing) {
|
||||
e.key_ptr.* = pat;
|
||||
e.value_ptr.* = if (withsub) .{} else {};
|
||||
}
|
||||
if (!withsub and !pat.isdir and e.key_ptr.*.isdir) e.key_ptr.* = pat;
|
||||
if (withsub) {
|
||||
if (pat.sub) |s| e.value_ptr.*.append(main.allocator, s) catch unreachable;
|
||||
}
|
||||
|
||||
} else self.wild.append(main.allocator, pat) catch unreachable;
|
||||
}
|
||||
|
||||
fn match(self: *const Self, name: [:0]const u8) ?bool {
|
||||
var ret: ?bool = null;
|
||||
if (self.literals.getKey(&.{ .pattern = name })) |p| ret = p.isdir;
|
||||
for (self.wild.items) |p| {
|
||||
if (ret == false) return ret;
|
||||
if (c.fnmatch(p.pattern.ptr, name.ptr, 0) == 0) ret = p.isdir;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
fn enter(self: *const Self, out: *Patterns, name: [:0]const u8) void {
|
||||
if (self.literals.get(&.{ .pattern = name })) |lst| for (lst.items) |sub| out.append(sub);
|
||||
for (self.wild.items) |p| if (c.fnmatch(p.pattern.ptr, name.ptr, 0) == 0) out.append(p.sub.?);
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
if (withsub) {
|
||||
var it = self.literals.valueIterator();
|
||||
while (it.next()) |e| e.deinit(main.allocator);
|
||||
}
|
||||
self.literals.deinit(main.allocator);
|
||||
self.wild.deinit(main.allocator);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// List of all patterns that should be matched at one level.
|
||||
pub const Patterns = struct {
|
||||
nonsub: PatternList(false) = .{},
|
||||
sub: PatternList(true) = .{},
|
||||
isroot: bool = false,
|
||||
|
||||
fn append(self: *Patterns, pat: *const Pattern) void {
|
||||
if (pat.sub == null) self.nonsub.append(pat)
|
||||
else self.sub.append(pat);
|
||||
}
|
||||
|
||||
// Matches patterns in this level plus unanchored patterns.
|
||||
// Returns null if nothing matches, otherwise whether the given item should
|
||||
// only be exluced if it's a directory.
|
||||
// (Should not be called on root_unanchored)
|
||||
pub fn match(self: *const Patterns, name: [:0]const u8) ?bool {
|
||||
const a = self.nonsub.match(name);
|
||||
if (a == false) return false;
|
||||
const b = root_unanchored.nonsub.match(name);
|
||||
if (b == false) return false;
|
||||
return a orelse b;
|
||||
}
|
||||
|
||||
// Construct the list of patterns for a subdirectory.
|
||||
pub fn enter(self: *const Patterns, name: [:0]const u8) Patterns {
|
||||
var ret = Patterns{};
|
||||
self.sub.enter(&ret, name);
|
||||
root_unanchored.sub.enter(&ret, name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Patterns) void {
|
||||
// getPatterns() result should be deinit()ed, except when it returns the root,
|
||||
// let's simplify that and simply don't deinit root.
|
||||
if (self.isroot) return;
|
||||
self.nonsub.deinit();
|
||||
self.sub.deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
// Unanchored patterns that should be checked at every level
|
||||
var root_unanchored: Patterns = .{};
|
||||
|
||||
// Patterns anchored at the root
|
||||
var root: Patterns = .{ .isroot = true };
|
||||
|
||||
pub fn addPattern(pattern: []const u8) void {
|
||||
if (pattern.len == 0) return;
|
||||
const p = Pattern.parse(pattern);
|
||||
if (pattern[0] == '/') root.append(p)
|
||||
else root_unanchored.append(p);
|
||||
}
|
||||
|
||||
// Get the patterns for the given (absolute) path, assuming the given path
|
||||
// itself hasn't been excluded. This function is slow, directory walking code
|
||||
// should use Patterns.enter() instead.
|
||||
pub fn getPatterns(path_: []const u8) Patterns {
|
||||
var path = std.mem.trim(u8, path_, "/");
|
||||
if (path.len == 0) return root;
|
||||
var pat = root;
|
||||
defer pat.deinit();
|
||||
while (std.mem.indexOfScalar(u8, path, '/')) |idx| {
|
||||
const name = main.allocator.dupeZ(u8, path[0..idx]) catch unreachable;
|
||||
defer main.allocator.free(name);
|
||||
path = path[idx+1..];
|
||||
|
||||
const sub = pat.enter(name);
|
||||
pat.deinit();
|
||||
pat = sub;
|
||||
}
|
||||
|
||||
const name = main.allocator.dupeZ(u8, path) catch unreachable;
|
||||
defer main.allocator.free(name);
|
||||
return pat.enter(name);
|
||||
}
|
||||
|
||||
|
||||
fn testfoo(p: *const Patterns) !void {
|
||||
try std.testing.expectEqual(p.match("root"), null);
|
||||
try std.testing.expectEqual(p.match("bar"), false);
|
||||
try std.testing.expectEqual(p.match("qoo"), false);
|
||||
try std.testing.expectEqual(p.match("xyz"), false);
|
||||
try std.testing.expectEqual(p.match("okay"), null);
|
||||
try std.testing.expectEqual(p.match("somefile"), false);
|
||||
var s = p.enter("okay");
|
||||
try std.testing.expectEqual(s.match("bar"), null);
|
||||
try std.testing.expectEqual(s.match("xyz"), null);
|
||||
try std.testing.expectEqual(s.match("notokay"), false);
|
||||
s.deinit();
|
||||
}
|
||||
|
||||
test "Matching" {
|
||||
addPattern("/foo/bar");
|
||||
addPattern("/foo/qoo/");
|
||||
addPattern("/foo/qoo");
|
||||
addPattern("/foo/qoo/");
|
||||
addPattern("/f??/xyz");
|
||||
addPattern("/f??/xyz/");
|
||||
addPattern("/*o/somefile");
|
||||
addPattern("/a??/okay");
|
||||
addPattern("/roo?");
|
||||
addPattern("/root/");
|
||||
addPattern("excluded");
|
||||
addPattern("somefile/");
|
||||
addPattern("o*y/not[o]kay");
|
||||
|
||||
var a0 = getPatterns("/");
|
||||
try std.testing.expectEqual(a0.match("a"), null);
|
||||
try std.testing.expectEqual(a0.match("excluded"), false);
|
||||
try std.testing.expectEqual(a0.match("somefile"), true);
|
||||
try std.testing.expectEqual(a0.match("root"), false);
|
||||
var a1 = a0.enter("foo");
|
||||
a0.deinit();
|
||||
try testfoo(&a1);
|
||||
a1.deinit();
|
||||
|
||||
var b0 = getPatterns("/somedir/somewhere");
|
||||
try std.testing.expectEqual(b0.match("a"), null);
|
||||
try std.testing.expectEqual(b0.match("excluded"), false);
|
||||
try std.testing.expectEqual(b0.match("root"), null);
|
||||
try std.testing.expectEqual(b0.match("okay"), null);
|
||||
var b1 = b0.enter("okay");
|
||||
b0.deinit();
|
||||
try std.testing.expectEqual(b1.match("excluded"), false);
|
||||
try std.testing.expectEqual(b1.match("okay"), null);
|
||||
try std.testing.expectEqual(b1.match("notokay"), false);
|
||||
b1.deinit();
|
||||
|
||||
var c0 = getPatterns("/foo/");
|
||||
try testfoo(&c0);
|
||||
c0.deinit();
|
||||
}
|
||||
270
src/json_export.zig
Normal file
270
src/json_export.zig
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// JSON output is necessarily single-threaded and items MUST be added depth-first.
|
||||
|
||||
pub const global = struct {
|
||||
var writer: *Writer = undefined;
|
||||
};
|
||||
|
||||
|
||||
const ZstdWriter = struct {
|
||||
ctx: ?*c.ZSTD_CStream,
|
||||
out: c.ZSTD_outBuffer,
|
||||
outbuf: [c.ZSTD_BLOCKSIZE_MAX + 64]u8,
|
||||
|
||||
fn create() *ZstdWriter {
|
||||
const w = main.allocator.create(ZstdWriter) catch unreachable;
|
||||
w.out = .{
|
||||
.dst = &w.outbuf,
|
||||
.size = w.outbuf.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
w.ctx = c.ZSTD_createCStream();
|
||||
if (w.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
_ = c.ZSTD_CCtx_setParameter(w.ctx, c.ZSTD_c_compressionLevel, main.config.complevel);
|
||||
return w;
|
||||
}
|
||||
|
||||
fn destroy(w: *ZstdWriter) void {
|
||||
_ = c.ZSTD_freeCStream(w.ctx);
|
||||
main.allocator.destroy(w);
|
||||
}
|
||||
|
||||
fn write(w: *ZstdWriter, f: std.fs.File, in: []const u8, flush: bool) !void {
|
||||
var arg = c.ZSTD_inBuffer{
|
||||
.src = in.ptr,
|
||||
.size = in.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
const v = c.ZSTD_compressStream2(w.ctx, &w.out, &arg, if (flush) c.ZSTD_e_end else c.ZSTD_e_continue);
|
||||
if (c.ZSTD_isError(v) != 0) return error.ZstdCompressError;
|
||||
if (flush or w.out.pos > w.outbuf.len / 2) {
|
||||
try f.writeAll(w.outbuf[0..w.out.pos]);
|
||||
w.out.pos = 0;
|
||||
}
|
||||
if (!flush and arg.pos == arg.size) break;
|
||||
if (flush and v == 0) break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = struct {
|
||||
fd: std.fs.File,
|
||||
zstd: ?*ZstdWriter = null,
|
||||
// Must be large enough to hold PATH_MAX*6 plus some overhead.
|
||||
// (The 6 is because, in the worst case, every byte expands to a "\u####"
|
||||
// escape, and we do pessimistic estimates here in order to avoid checking
|
||||
// buffer lengths for each and every write operation)
|
||||
buf: [64*1024]u8 = undefined,
|
||||
off: usize = 0,
|
||||
dir_entry_open: bool = false,
|
||||
|
||||
fn flush(ctx: *Writer, bytes: usize) void {
|
||||
@branchHint(.unlikely);
|
||||
// This can only really happen when the root path exceeds PATH_MAX,
|
||||
// in which case we would probably have error'ed out earlier anyway.
|
||||
if (bytes > ctx.buf.len) ui.die("Error writing JSON export: path too long.\n", .{});
|
||||
const buf = ctx.buf[0..ctx.off];
|
||||
(if (ctx.zstd) |z| z.write(ctx.fd, buf, bytes == 0) else ctx.fd.writeAll(buf)) catch |e|
|
||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||
ctx.off = 0;
|
||||
}
|
||||
|
||||
fn ensureSpace(ctx: *Writer, bytes: usize) void {
|
||||
if (bytes > ctx.buf.len - ctx.off) ctx.flush(bytes);
|
||||
}
|
||||
|
||||
fn write(ctx: *Writer, s: []const u8) void {
|
||||
@memcpy(ctx.buf[ctx.off..][0..s.len], s);
|
||||
ctx.off += s.len;
|
||||
}
|
||||
|
||||
fn writeByte(ctx: *Writer, b: u8) void {
|
||||
ctx.buf[ctx.off] = b;
|
||||
ctx.off += 1;
|
||||
}
|
||||
|
||||
// Write escaped string contents, excluding the quotes.
|
||||
fn writeStr(ctx: *Writer, s: []const u8) void {
|
||||
for (s) |b| {
|
||||
if (b >= 0x20 and b != '"' and b != '\\' and b != 127) ctx.writeByte(b)
|
||||
else switch (b) {
|
||||
'\n' => ctx.write("\\n"),
|
||||
'\r' => ctx.write("\\r"),
|
||||
0x8 => ctx.write("\\b"),
|
||||
'\t' => ctx.write("\\t"),
|
||||
0xC => ctx.write("\\f"),
|
||||
'\\' => ctx.write("\\\\"),
|
||||
'"' => ctx.write("\\\""),
|
||||
else => {
|
||||
ctx.write("\\u00");
|
||||
const hexdig = "0123456789abcdef";
|
||||
ctx.writeByte(hexdig[b>>4]);
|
||||
ctx.writeByte(hexdig[b&0xf]);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeUint(ctx: *Writer, n: u64) void {
|
||||
// Based on std.fmt.formatInt
|
||||
var a = n;
|
||||
var buf: [24]u8 = undefined;
|
||||
var index: usize = buf.len;
|
||||
while (a >= 100) : (a = @divTrunc(a, 100)) {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a % 100)));
|
||||
}
|
||||
if (a < 10) {
|
||||
index -= 1;
|
||||
buf[index] = '0' + @as(u8, @intCast(a));
|
||||
} else {
|
||||
index -= 2;
|
||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a)));
|
||||
}
|
||||
ctx.write(buf[index..]);
|
||||
}
|
||||
|
||||
fn init(out: std.fs.File) *Writer {
|
||||
var ctx = main.allocator.create(Writer) catch unreachable;
|
||||
ctx.* = .{ .fd = out };
|
||||
if (main.config.compress) ctx.zstd = ZstdWriter.create();
|
||||
ctx.write("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
||||
ctx.writeUint(@intCast(@max(0, std.time.timestamp())));
|
||||
ctx.writeByte('}');
|
||||
return ctx;
|
||||
}
|
||||
|
||||
// A newly written directory entry is left "open", i.e. the '}' to close
|
||||
// the item object is not written, to allow for a setReadError() to be
|
||||
// caught if one happens before the first sub entry.
|
||||
// Any read errors after the first sub entry are thrown away, but that's
|
||||
// just a limitation of the JSON format.
|
||||
fn closeDirEntry(ctx: *Writer, rderr: bool) void {
|
||||
if (ctx.dir_entry_open) {
|
||||
ctx.dir_entry_open = false;
|
||||
if (rderr) ctx.write(",\"read_error\":true");
|
||||
ctx.writeByte('}');
|
||||
}
|
||||
}
|
||||
|
||||
fn writeSpecial(ctx: *Writer, name: []const u8, t: model.EType) void {
|
||||
ctx.closeDirEntry(false);
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (t.isDirectory()) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.write(switch (t) {
|
||||
.err => "\",\"read_error\":true}",
|
||||
.otherfs => "\",\"excluded\":\"otherfs\"}",
|
||||
.kernfs => "\",\"excluded\":\"kernfs\"}",
|
||||
.pattern => "\",\"excluded\":\"pattern\"}",
|
||||
else => unreachable,
|
||||
});
|
||||
if (t.isDirectory()) ctx.writeByte(']');
|
||||
}
|
||||
|
||||
fn writeStat(ctx: *Writer, name: []const u8, stat: *const sink.Stat, parent_dev: u64) void {
|
||||
ctx.ensureSpace(name.len*6 + 1000);
|
||||
ctx.write(if (stat.etype == .dir) ",\n[{\"name\":\"" else ",\n{\"name\":\"");
|
||||
ctx.writeStr(name);
|
||||
ctx.writeByte('"');
|
||||
if (stat.size > 0) {
|
||||
ctx.write(",\"asize\":");
|
||||
ctx.writeUint(stat.size);
|
||||
}
|
||||
if (stat.blocks > 0) {
|
||||
ctx.write(",\"dsize\":");
|
||||
ctx.writeUint(util.blocksToSize(stat.blocks));
|
||||
}
|
||||
if (stat.etype == .dir and stat.dev != parent_dev) {
|
||||
ctx.write(",\"dev\":");
|
||||
ctx.writeUint(stat.dev);
|
||||
}
|
||||
if (stat.etype == .link) {
|
||||
ctx.write(",\"ino\":");
|
||||
ctx.writeUint(stat.ino);
|
||||
ctx.write(",\"hlnkc\":true,\"nlink\":");
|
||||
ctx.writeUint(stat.nlink);
|
||||
}
|
||||
if (stat.etype == .nonreg) ctx.write(",\"notreg\":true");
|
||||
if (main.config.extended) {
|
||||
if (stat.ext.pack.hasuid) {
|
||||
ctx.write(",\"uid\":");
|
||||
ctx.writeUint(stat.ext.uid);
|
||||
}
|
||||
if (stat.ext.pack.hasgid) {
|
||||
ctx.write(",\"gid\":");
|
||||
ctx.writeUint(stat.ext.gid);
|
||||
}
|
||||
if (stat.ext.pack.hasmode) {
|
||||
ctx.write(",\"mode\":");
|
||||
ctx.writeUint(stat.ext.mode);
|
||||
}
|
||||
if (stat.ext.pack.hasmtime) {
|
||||
ctx.write(",\"mtime\":");
|
||||
ctx.writeUint(stat.ext.mtime);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Dir = struct {
|
||||
dev: u64,
|
||||
|
||||
pub fn addSpecial(_: *Dir, name: []const u8, sp: model.EType) void {
|
||||
global.writer.writeSpecial(name, sp);
|
||||
}
|
||||
|
||||
pub fn addStat(_: *Dir, name: []const u8, stat: *const sink.Stat) void {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, undefined);
|
||||
global.writer.writeByte('}');
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeStat(name, stat, d.dev);
|
||||
global.writer.dir_entry_open = true;
|
||||
return .{ .dev = stat.dev };
|
||||
}
|
||||
|
||||
pub fn setReadError(_: *Dir) void {
|
||||
global.writer.closeDirEntry(true);
|
||||
}
|
||||
|
||||
pub fn final(_: *Dir) void {
|
||||
global.writer.ensureSpace(1000);
|
||||
global.writer.closeDirEntry(false);
|
||||
global.writer.writeByte(']');
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
var root = Dir{.dev=0};
|
||||
return root.addDir(path, stat);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
global.writer.write("]\n");
|
||||
global.writer.flush(0);
|
||||
if (global.writer.zstd) |z| z.destroy();
|
||||
global.writer.fd.close();
|
||||
main.allocator.destroy(global.writer);
|
||||
}
|
||||
|
||||
pub fn setupOutput(out: std.fs.File) void {
|
||||
global.writer = Writer.init(out);
|
||||
}
|
||||
562
src/json_import.zig
Normal file
562
src/json_import.zig
Normal file
|
|
@ -0,0 +1,562 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
|
||||
const ZstdReader = struct {
|
||||
ctx: ?*c.ZSTD_DStream,
|
||||
in: c.ZSTD_inBuffer,
|
||||
lastret: usize = 0,
|
||||
inbuf: [c.ZSTD_BLOCKSIZE_MAX + 16]u8, // This ZSTD_DStreamInSize() + a little bit extra
|
||||
|
||||
fn create(head: []const u8) *ZstdReader {
|
||||
const r = main.allocator.create(ZstdReader) catch unreachable;
|
||||
@memcpy(r.inbuf[0..head.len], head);
|
||||
r.in = .{
|
||||
.src = &r.inbuf,
|
||||
.size = head.len,
|
||||
.pos = 0,
|
||||
};
|
||||
while (true) {
|
||||
r.ctx = c.ZSTD_createDStream();
|
||||
if (r.ctx != null) break;
|
||||
ui.oom();
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
fn destroy(r: *ZstdReader) void {
|
||||
_ = c.ZSTD_freeDStream(r.ctx);
|
||||
main.allocator.destroy(r);
|
||||
}
|
||||
|
||||
fn read(r: *ZstdReader, f: std.fs.File, out: []u8) !usize {
|
||||
while (true) {
|
||||
if (r.in.size == r.in.pos) {
|
||||
r.in.pos = 0;
|
||||
r.in.size = try f.read(&r.inbuf);
|
||||
if (r.in.size == 0) {
|
||||
if (r.lastret == 0) return 0;
|
||||
return error.ZstdDecompressError; // Early EOF
|
||||
}
|
||||
}
|
||||
|
||||
var arg = c.ZSTD_outBuffer{ .dst = out.ptr, .size = out.len, .pos = 0 };
|
||||
r.lastret = c.ZSTD_decompressStream(r.ctx, &arg, &r.in);
|
||||
if (c.ZSTD_isError(r.lastret) != 0) return error.ZstdDecompressError;
|
||||
if (arg.pos > 0) return arg.pos;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Using a custom JSON parser here because, while std.json is great, it does
|
||||
// perform strict UTF-8 validation. Which is correct, of course, but ncdu dumps
|
||||
// are not always correct JSON as they may contain non-UTF-8 paths encoded as
|
||||
// strings.
|
||||
|
||||
const Parser = struct {
|
||||
rd: std.fs.File,
|
||||
zstd: ?*ZstdReader = null,
|
||||
rdoff: usize = 0,
|
||||
rdsize: usize = 0,
|
||||
byte: u64 = 1,
|
||||
line: u64 = 1,
|
||||
buf: [129*1024]u8 = undefined,
|
||||
|
||||
fn die(p: *Parser, str: []const u8) noreturn {
|
||||
ui.die("Error importing file on line {}:{}: {s}.\n", .{ p.line, p.byte, str });
|
||||
}
|
||||
|
||||
// Feed back a byte that has just been returned by nextByte()
|
||||
fn undoNextByte(p: *Parser, b: u8) void {
|
||||
p.byte -= 1;
|
||||
p.rdoff -= 1;
|
||||
p.buf[p.rdoff] = b;
|
||||
}
|
||||
|
||||
fn fill(p: *Parser) void {
|
||||
p.rdoff = 0;
|
||||
p.rdsize = (if (p.zstd) |z| z.read(p.rd, &p.buf) else p.rd.read(&p.buf)) catch |e| switch (e) {
|
||||
error.IsDir => p.die("not a file"), // should be detected at open() time, but no flag for that...
|
||||
error.SystemResources => p.die("out of memory"),
|
||||
error.ZstdDecompressError => p.die("decompression error"),
|
||||
else => p.die("I/O error"),
|
||||
};
|
||||
}
|
||||
|
||||
// Returns 0 on EOF.
|
||||
// (or if the file contains a 0 byte, but that's invalid anyway)
|
||||
// (Returning a '?u8' here is nicer but kills performance by about +30%)
|
||||
fn nextByte(p: *Parser) u8 {
|
||||
if (p.rdoff == p.rdsize) {
|
||||
@branchHint(.unlikely);
|
||||
p.fill();
|
||||
if (p.rdsize == 0) return 0;
|
||||
}
|
||||
p.byte += 1;
|
||||
defer p.rdoff += 1;
|
||||
return (&p.buf)[p.rdoff];
|
||||
}
|
||||
|
||||
// next non-whitespace byte
|
||||
fn nextChr(p: *Parser) u8 {
|
||||
while (true) switch (p.nextByte()) {
|
||||
'\n' => {
|
||||
p.line += 1;
|
||||
p.byte = 1;
|
||||
},
|
||||
' ', '\t', '\r' => {},
|
||||
else => |b| return b,
|
||||
};
|
||||
}
|
||||
|
||||
fn expectLit(p: *Parser, lit: []const u8) void {
|
||||
for (lit) |b| if (b != p.nextByte()) p.die("invalid JSON");
|
||||
}
|
||||
|
||||
fn hexdig(p: *Parser) u16 {
|
||||
const b = p.nextByte();
|
||||
return switch (b) {
|
||||
'0'...'9' => b - '0',
|
||||
'a'...'f' => b - 'a' + 10,
|
||||
'A'...'F' => b - 'A' + 10,
|
||||
else => p.die("invalid hex digit"),
|
||||
};
|
||||
}
|
||||
|
||||
fn stringContentSlow(p: *Parser, buf: []u8, head: u8, off: usize) []u8 {
|
||||
@branchHint(.unlikely);
|
||||
var b = head;
|
||||
var n = off;
|
||||
while (true) {
|
||||
switch (b) {
|
||||
'"' => break,
|
||||
'\\' => switch (p.nextByte()) {
|
||||
'"' => if (n < buf.len) { buf[n] = '"'; n += 1; },
|
||||
'\\'=> if (n < buf.len) { buf[n] = '\\';n += 1; },
|
||||
'/' => if (n < buf.len) { buf[n] = '/'; n += 1; },
|
||||
'b' => if (n < buf.len) { buf[n] = 0x8; n += 1; },
|
||||
'f' => if (n < buf.len) { buf[n] = 0xc; n += 1; },
|
||||
'n' => if (n < buf.len) { buf[n] = 0xa; n += 1; },
|
||||
'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
|
||||
't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
|
||||
'u' => {
|
||||
const first = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
var unit = @as(u21, first);
|
||||
if (std.unicode.utf16IsLowSurrogate(first)) p.die("Unexpected low surrogate");
|
||||
if (std.unicode.utf16IsHighSurrogate(first)) {
|
||||
p.expectLit("\\u");
|
||||
const second = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||
unit = std.unicode.utf16DecodeSurrogatePair(&.{first, second}) catch p.die("Invalid low surrogate");
|
||||
}
|
||||
if (n + 6 < buf.len)
|
||||
n += std.unicode.utf8Encode(unit, buf[n..n+5]) catch unreachable;
|
||||
},
|
||||
else => p.die("invalid escape sequence"),
|
||||
},
|
||||
0x20, 0x21, 0x23...0x5b, 0x5d...0xff => if (n < buf.len) { buf[n] = b; n += 1; },
|
||||
else => p.die("invalid character in string"),
|
||||
}
|
||||
b = p.nextByte();
|
||||
}
|
||||
return buf[0..n];
|
||||
}
|
||||
|
||||
// Read a string (after the ") into buf.
|
||||
// Any characters beyond the size of the buffer are consumed but otherwise discarded.
|
||||
fn stringContent(p: *Parser, buf: []u8) []u8 {
|
||||
// The common case (for ncdu dumps): string fits in the given buffer and does not contain any escapes.
|
||||
var n: usize = 0;
|
||||
var b = p.nextByte();
|
||||
while (n < buf.len and b >= 0x20 and b != '"' and b != '\\') {
|
||||
buf[n] = b;
|
||||
n += 1;
|
||||
b = p.nextByte();
|
||||
}
|
||||
if (b == '"') return buf[0..n];
|
||||
return p.stringContentSlow(buf, b, n);
|
||||
}
|
||||
|
||||
fn string(p: *Parser, buf: []u8) []u8 {
|
||||
if (p.nextChr() != '"') p.die("expected string");
|
||||
return p.stringContent(buf);
|
||||
}
|
||||
|
||||
fn uintTail(p: *Parser, head: u8, T: anytype) T {
|
||||
if (head == '0') return 0;
|
||||
var v: T = head - '0'; // Assumption: T >= u8
|
||||
// Assumption: we don't parse JSON "documents" that are a bare uint.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'0'...'9' => |b| {
|
||||
const newv = v *% 10 +% (b - '0');
|
||||
if (newv < v) p.die("integer out of range");
|
||||
v = newv;
|
||||
},
|
||||
else => |b| break p.undoNextByte(b),
|
||||
};
|
||||
if (v == 0) p.die("expected number");
|
||||
return v;
|
||||
}
|
||||
|
||||
fn uint(p: *Parser, T: anytype) T {
|
||||
switch (p.nextChr()) {
|
||||
'0'...'9' => |b| return p.uintTail(b, T),
|
||||
else => p.die("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
fn boolean(p: *Parser) bool {
|
||||
switch (p.nextChr()) {
|
||||
't' => { p.expectLit("rue"); return true; },
|
||||
'f' => { p.expectLit("alse"); return false; },
|
||||
else => p.die("expected boolean"),
|
||||
}
|
||||
}
|
||||
|
||||
fn obj(p: *Parser) void {
|
||||
if (p.nextChr() != '{') p.die("expected object");
|
||||
}
|
||||
|
||||
fn key(p: *Parser, first: bool, buf: []u8) ?[]u8 {
|
||||
const k = switch (p.nextChr()) {
|
||||
',' => blk: {
|
||||
if (first) p.die("invalid JSON");
|
||||
break :blk p.string(buf);
|
||||
},
|
||||
'"' => blk: {
|
||||
if (!first) p.die("invalid JSON");
|
||||
break :blk p.stringContent(buf);
|
||||
},
|
||||
'}' => return null,
|
||||
else => p.die("invalid JSON"),
|
||||
};
|
||||
if (p.nextChr() != ':') p.die("invalid JSON");
|
||||
return k;
|
||||
}
|
||||
|
||||
fn array(p: *Parser) void {
|
||||
if (p.nextChr() != '[') p.die("expected array");
|
||||
}
|
||||
|
||||
fn elem(p: *Parser, first: bool) bool {
|
||||
switch (p.nextChr()) {
|
||||
',' => if (first) p.die("invalid JSON") else return true,
|
||||
']' => return false,
|
||||
else => |b| {
|
||||
if (!first) p.die("invalid JSON");
|
||||
p.undoNextByte(b);
|
||||
return true;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn skipContent(p: *Parser, head: u8) void {
|
||||
switch (head) {
|
||||
't' => p.expectLit("rue"),
|
||||
'f' => p.expectLit("alse"),
|
||||
'n' => p.expectLit("ull"),
|
||||
'-', '0'...'9' =>
|
||||
// Numbers are kind of annoying, this "parsing" is invalid and ultra-lazy.
|
||||
while (true) switch (p.nextByte()) {
|
||||
'-', '+', 'e', 'E', '.', '0'...'9' => {},
|
||||
else => |b| return p.undoNextByte(b),
|
||||
},
|
||||
'"' => _ = p.stringContent(&[0]u8{}),
|
||||
'[' => {
|
||||
var first = true;
|
||||
while (p.elem(first)) {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
'{' => {
|
||||
var first = true;
|
||||
while (p.key(first, &[0]u8{})) |_| {
|
||||
first = false;
|
||||
p.skip();
|
||||
}
|
||||
},
|
||||
else => p.die("invalid JSON"),
|
||||
}
|
||||
}
|
||||
|
||||
fn skip(p: *Parser) void {
|
||||
p.skipContent(p.nextChr());
|
||||
}
|
||||
|
||||
fn eof(p: *Parser) void {
|
||||
if (p.nextChr() != 0) p.die("trailing garbage");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Should really add some invalid JSON test cases as well, but I'd first like
|
||||
// to benchmark the performance impact of using error returns instead of
|
||||
// calling ui.die().
|
||||
test "JSON parser" {
|
||||
const json =
|
||||
\\{
|
||||
\\ "null": null,
|
||||
\\ "true": true,
|
||||
\\ "false": false,
|
||||
\\ "zero":0 ,"uint": 123,
|
||||
\\ "emptyObj": {},
|
||||
\\ "emptyArray": [],
|
||||
\\ "emptyString": "",
|
||||
\\ "encString": "\"\\\/\b\f\n\uBe3F",
|
||||
\\ "numbers": [0,1,20,-300, 3.4 ,0e-10 , -100.023e+13 ]
|
||||
\\}
|
||||
;
|
||||
var p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
p.skip();
|
||||
|
||||
p = Parser{ .rd = undefined, .rdsize = json.len };
|
||||
@memcpy(p.buf[0..json.len], json);
|
||||
var buf: [128]u8 = undefined;
|
||||
p.obj();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(true, &buf).?, "null");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "true");
|
||||
try std.testing.expect(p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "false");
|
||||
try std.testing.expect(!p.boolean());
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "zero");
|
||||
try std.testing.expectEqual(0, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "uint");
|
||||
try std.testing.expectEqual(123, p.uint(u8));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyObj");
|
||||
p.obj();
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyArray");
|
||||
p.array();
|
||||
try std.testing.expect(!p.elem(true));
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "emptyString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "encString");
|
||||
try std.testing.expectEqualStrings(p.string(&buf), "\"\\/\x08\x0c\n\u{be3f}");
|
||||
|
||||
try std.testing.expectEqualStrings(p.key(false, &buf).?, "numbers");
|
||||
p.skip();
|
||||
|
||||
try std.testing.expect(p.key(true, &buf) == null);
|
||||
}
|
||||
|
||||
|
||||
const Ctx = struct {
|
||||
p: *Parser,
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat = .{},
|
||||
rderr: bool = false,
|
||||
namelen: usize = 0,
|
||||
namebuf: [32*1024]u8 = undefined,
|
||||
};
|
||||
|
||||
|
||||
fn itemkey(ctx: *Ctx, key: []const u8) void {
|
||||
const eq = std.mem.eql;
|
||||
switch (if (key.len > 0) key[0] else @as(u8,0)) {
|
||||
'a' => {
|
||||
if (eq(u8, key, "asize")) {
|
||||
ctx.stat.size = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'd' => {
|
||||
if (eq(u8, key, "dsize")) {
|
||||
ctx.stat.blocks = @intCast(ctx.p.uint(u64)>>9);
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "dev")) {
|
||||
ctx.stat.dev = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'e' => {
|
||||
if (eq(u8, key, "excluded")) {
|
||||
var buf: [32]u8 = undefined;
|
||||
const typ = ctx.p.string(&buf);
|
||||
// "frmlnk" is also possible, but currently considered equivalent to "pattern".
|
||||
ctx.stat.etype =
|
||||
if (eq(u8, typ, "otherfs") or eq(u8, typ, "othfs")) .otherfs
|
||||
else if (eq(u8, typ, "kernfs")) .kernfs
|
||||
else .pattern;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'g' => {
|
||||
if (eq(u8, key, "gid")) {
|
||||
ctx.stat.ext.gid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasgid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'h' => {
|
||||
if (eq(u8, key, "hlnkc")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'i' => {
|
||||
if (eq(u8, key, "ino")) {
|
||||
ctx.stat.ino = ctx.p.uint(u64);
|
||||
return;
|
||||
}
|
||||
},
|
||||
'm' => {
|
||||
if (eq(u8, key, "mode")) {
|
||||
ctx.stat.ext.mode = ctx.p.uint(u16);
|
||||
ctx.stat.ext.pack.hasmode = true;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "mtime")) {
|
||||
ctx.stat.ext.mtime = ctx.p.uint(u64);
|
||||
ctx.stat.ext.pack.hasmtime = true;
|
||||
// Accept decimal numbers, but discard the fractional part because our data model doesn't support it.
|
||||
switch (ctx.p.nextByte()) {
|
||||
'.' =>
|
||||
while (true) switch (ctx.p.nextByte()) {
|
||||
'0'...'9' => {},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
},
|
||||
else => |b| return ctx.p.undoNextByte(b),
|
||||
}
|
||||
}
|
||||
},
|
||||
'n' => {
|
||||
if (eq(u8, key, "name")) {
|
||||
if (ctx.namelen != 0) ctx.p.die("duplicate key");
|
||||
ctx.namelen = ctx.p.string(&ctx.namebuf).len;
|
||||
if (ctx.namelen > ctx.namebuf.len-5) ctx.p.die("too long file name");
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "nlink")) {
|
||||
ctx.stat.nlink = ctx.p.uint(u31);
|
||||
if (ctx.stat.etype != .dir and ctx.stat.nlink > 1)
|
||||
ctx.stat.etype = .link;
|
||||
return;
|
||||
}
|
||||
if (eq(u8, key, "notreg")) {
|
||||
if (ctx.p.boolean()) ctx.stat.etype = .nonreg;
|
||||
return;
|
||||
}
|
||||
},
|
||||
'r' => {
|
||||
if (eq(u8, key, "read_error")) {
|
||||
if (ctx.p.boolean()) {
|
||||
if (ctx.stat.etype == .dir) ctx.rderr = true
|
||||
else ctx.stat.etype = .err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
},
|
||||
'u' => {
|
||||
if (eq(u8, key, "uid")) {
|
||||
ctx.stat.ext.uid = ctx.p.uint(u32);
|
||||
ctx.stat.ext.pack.hasuid = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
ctx.p.skip();
|
||||
}
|
||||
|
||||
|
||||
fn item(ctx: *Ctx, parent: ?*sink.Dir, dev: u64) void {
|
||||
ctx.stat = .{ .dev = dev };
|
||||
ctx.namelen = 0;
|
||||
ctx.rderr = false;
|
||||
const isdir = switch (ctx.p.nextChr()) {
|
||||
'[' => blk: {
|
||||
ctx.p.obj();
|
||||
break :blk true;
|
||||
},
|
||||
'{' => false,
|
||||
else => ctx.p.die("expected object or array"),
|
||||
};
|
||||
if (parent == null and !isdir) ctx.p.die("parent item must be a directory");
|
||||
ctx.stat.etype = if (isdir) .dir else .reg;
|
||||
|
||||
var keybuf: [32]u8 = undefined;
|
||||
var first = true;
|
||||
while (ctx.p.key(first, &keybuf)) |k| {
|
||||
first = false;
|
||||
itemkey(ctx, k);
|
||||
}
|
||||
if (ctx.namelen == 0) ctx.p.die("missing \"name\" field");
|
||||
const name = (&ctx.namebuf)[0..ctx.namelen];
|
||||
|
||||
if (ctx.stat.etype == .dir) {
|
||||
const ndev = ctx.stat.dev;
|
||||
const dir =
|
||||
if (parent) |d| d.addDir(ctx.sink, name, &ctx.stat)
|
||||
else sink.createRoot(name, &ctx.stat);
|
||||
ctx.sink.setDir(dir);
|
||||
if (ctx.rderr) dir.setReadError(ctx.sink);
|
||||
while (ctx.p.elem(false)) item(ctx, dir, ndev);
|
||||
ctx.sink.setDir(parent);
|
||||
dir.unref(ctx.sink);
|
||||
|
||||
} else {
|
||||
if (@intFromEnum(ctx.stat.etype) < 0)
|
||||
parent.?.addSpecial(ctx.sink, name, ctx.stat.etype)
|
||||
else
|
||||
parent.?.addStat(ctx.sink, name, &ctx.stat);
|
||||
if (isdir and ctx.p.elem(false)) ctx.p.die("unexpected contents in an excluded directory");
|
||||
}
|
||||
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
}
|
||||
|
||||
|
||||
pub fn import(fd: std.fs.File, head: []const u8) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
defer sink.done();
|
||||
|
||||
var p = Parser{.rd = fd};
|
||||
defer if (p.zstd) |z| z.destroy();
|
||||
|
||||
if (head.len >= 4 and std.mem.eql(u8, head[0..4], "\x28\xb5\x2f\xfd")) {
|
||||
p.zstd = ZstdReader.create(head);
|
||||
} else {
|
||||
p.rdsize = head.len;
|
||||
@memcpy(p.buf[0..head.len], head);
|
||||
}
|
||||
p.array();
|
||||
if (p.uint(u16) != 1) p.die("incompatible major format version");
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
_ = p.uint(u16); // minor version, ignored for now
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
|
||||
// metadata object
|
||||
p.obj();
|
||||
p.skipContent('{');
|
||||
|
||||
// Items
|
||||
if (!p.elem(false)) p.die("expected array element");
|
||||
var ctx = Ctx{.p = &p, .sink = &sink_threads[0]};
|
||||
item(&ctx, null, 0);
|
||||
|
||||
// accept more trailing elements
|
||||
while (p.elem(false)) p.skip();
|
||||
p.eof();
|
||||
}
|
||||
459
src/main.zig
459
src/main.zig
|
|
@ -1,26 +1,51 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pub const program_version = "2.1";
|
||||
pub const program_version = "2.9.2";
|
||||
|
||||
const std = @import("std");
|
||||
const model = @import("model.zig");
|
||||
const scan = @import("scan.zig");
|
||||
const json_import = @import("json_import.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const bin_reader = @import("bin_reader.zig");
|
||||
const sink = @import("sink.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const browser = @import("browser.zig");
|
||||
const delete = @import("delete.zig");
|
||||
const util = @import("util.zig");
|
||||
const c = @cImport(@cInclude("locale.h"));
|
||||
const exclude = @import("exclude.zig");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
test "imports" {
|
||||
_ = model;
|
||||
_ = scan;
|
||||
_ = json_import;
|
||||
_ = json_export;
|
||||
_ = bin_export;
|
||||
_ = bin_reader;
|
||||
_ = sink;
|
||||
_ = mem_src;
|
||||
_ = mem_sink;
|
||||
_ = ui;
|
||||
_ = browser;
|
||||
_ = delete;
|
||||
_ = util;
|
||||
_ = exclude;
|
||||
}
|
||||
|
||||
// "Custom" allocator that wraps the libc allocator and calls ui.oom() on error.
|
||||
// This allocator never returns an error, it either succeeds or causes ncdu to quit.
|
||||
// (Which means you'll find a lot of "catch unreachable" sprinkled through the code,
|
||||
// they look scarier than they are)
|
||||
fn wrapAlloc(_: *anyopaque, len: usize, alignment: u29, len_align: u29, return_address: usize) error{OutOfMemory}![]u8 {
|
||||
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: std.mem.Alignment, return_address: usize) ?[*]u8 {
|
||||
while (true) {
|
||||
if (std.heap.c_allocator.vtable.alloc(undefined, len, alignment, len_align, return_address)) |r|
|
||||
if (std.heap.c_allocator.vtable.alloc(undefined, len, ptr_alignment, return_address)) |r|
|
||||
return r
|
||||
else |_| {}
|
||||
else {}
|
||||
ui.oom();
|
||||
}
|
||||
}
|
||||
|
|
@ -31,11 +56,21 @@ pub const allocator = std.mem.Allocator{
|
|||
.alloc = wrapAlloc,
|
||||
// AFAIK, all uses of resize() to grow an allocation will fall back to alloc() on failure.
|
||||
.resize = std.heap.c_allocator.vtable.resize,
|
||||
.remap = std.heap.c_allocator.vtable.remap,
|
||||
.free = std.heap.c_allocator.vtable.free,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Custom panic impl to reset the terminal before spewing out an error message.
|
||||
pub const panic = std.debug.FullPanic(struct {
|
||||
pub fn panicFn(msg: []const u8, first_trace_addr: ?usize) noreturn {
|
||||
@branchHint(.cold);
|
||||
ui.deinit();
|
||||
std.debug.defaultPanic(msg, first_trace_addr);
|
||||
}
|
||||
}.panicFn);
|
||||
|
||||
pub const config = struct {
|
||||
pub const SortCol = enum { name, blocks, size, items, mtime };
|
||||
pub const SortOrder = enum { asc, desc };
|
||||
|
|
@ -45,7 +80,10 @@ pub const config = struct {
|
|||
pub var follow_symlinks: bool = false;
|
||||
pub var exclude_caches: bool = false;
|
||||
pub var exclude_kernfs: bool = false;
|
||||
pub var exclude_patterns: std.ArrayList([:0]const u8) = std.ArrayList([:0]const u8).init(allocator);
|
||||
pub var threads: usize = 1;
|
||||
pub var complevel: u8 = 4;
|
||||
pub var compress: bool = false;
|
||||
pub var export_block_size: ?usize = null;
|
||||
|
||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||
pub var scan_ui: ?enum { none, line, full } = null;
|
||||
|
|
@ -61,22 +99,28 @@ pub const config = struct {
|
|||
pub var show_mtime: bool = false;
|
||||
pub var show_graph: bool = true;
|
||||
pub var show_percent: bool = false;
|
||||
pub var graph_style: enum { hash, half, eigth } = .half;
|
||||
pub var graph_style: enum { hash, half, eighth } = .hash;
|
||||
pub var sort_col: SortCol = .blocks;
|
||||
pub var sort_order: SortOrder = .desc;
|
||||
pub var sort_dirsfirst: bool = false;
|
||||
pub var sort_natural: bool = true;
|
||||
|
||||
pub var imported: bool = false;
|
||||
pub var binreader: bool = false;
|
||||
pub var can_delete: ?bool = null;
|
||||
pub var can_shell: ?bool = null;
|
||||
pub var can_refresh: ?bool = null;
|
||||
pub var confirm_quit: bool = false;
|
||||
pub var confirm_delete: bool = true;
|
||||
pub var ignore_delete_errors: bool = false;
|
||||
pub var delete_command: [:0]const u8 = "";
|
||||
};
|
||||
|
||||
pub var state: enum { scan, browse, refresh, shell, delete } = .scan;
|
||||
|
||||
const stdin = if (@hasDecl(std.io, "getStdIn")) std.io.getStdIn() else std.fs.File.stdin();
|
||||
const stdout = if (@hasDecl(std.io, "getStdOut")) std.io.getStdOut() else std.fs.File.stdout();
|
||||
|
||||
// Simple generic argument parser, supports getopt_long() style arguments.
|
||||
const Args = struct {
|
||||
lst: []const [:0]const u8,
|
||||
|
|
@ -85,6 +129,7 @@ const Args = struct {
|
|||
last_arg: ?[:0]const u8 = null, // In the case of --option=<arg>
|
||||
shortbuf: [2]u8 = undefined,
|
||||
argsep: bool = false,
|
||||
ignerror: bool = false,
|
||||
|
||||
const Self = @This();
|
||||
const Option = struct {
|
||||
|
|
@ -114,22 +159,27 @@ const Args = struct {
|
|||
return .{ .opt = true, .val = &self.shortbuf };
|
||||
}
|
||||
|
||||
pub fn die(self: *const Self, comptime msg: []const u8, args: anytype) !noreturn {
|
||||
if (self.ignerror) return error.InvalidArg;
|
||||
ui.die(msg, args);
|
||||
}
|
||||
|
||||
/// Return the next option or positional argument.
|
||||
/// 'opt' indicates whether it's an option or positional argument,
|
||||
/// 'val' will be either -x, --something or the argument.
|
||||
pub fn next(self: *Self) ?Option {
|
||||
if (self.last_arg != null) ui.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||
pub fn next(self: *Self) !?Option {
|
||||
if (self.last_arg != null) try self.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||
if (self.short) |s| return self.shortopt(s);
|
||||
const val = self.pop() orelse return null;
|
||||
if (self.argsep or val.len == 0 or val[0] != '-') return Option{ .opt = false, .val = val };
|
||||
if (val.len == 1) ui.die("Invalid option '-'.\n", .{});
|
||||
if (val.len == 1) try self.die("Invalid option '-'.\n", .{});
|
||||
if (val.len == 2 and val[1] == '-') {
|
||||
self.argsep = true;
|
||||
return self.next();
|
||||
}
|
||||
if (val[1] == '-') {
|
||||
if (std.mem.indexOfScalar(u8, val, '=')) |sep| {
|
||||
if (sep == 2) ui.die("Invalid option '{s}'.\n", .{val});
|
||||
if (sep == 2) try self.die("Invalid option '{s}'.\n", .{val});
|
||||
self.last_arg = val[sep+1.. :0];
|
||||
self.last = val[0..sep];
|
||||
return Option{ .opt = true, .val = self.last.? };
|
||||
|
|
@ -141,7 +191,7 @@ const Args = struct {
|
|||
}
|
||||
|
||||
/// Returns the argument given to the last returned option. Dies with an error if no argument is provided.
|
||||
pub fn arg(self: *Self) [:0]const u8 {
|
||||
pub fn arg(self: *Self) ![:0]const u8 {
|
||||
if (self.short) |a| {
|
||||
defer self.short = null;
|
||||
return a;
|
||||
|
|
@ -151,11 +201,11 @@ const Args = struct {
|
|||
return a;
|
||||
}
|
||||
if (self.pop()) |o| return o;
|
||||
ui.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||
try self.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||
}
|
||||
};
|
||||
|
||||
fn argConfig(args: *Args, opt: Args.Option) bool {
|
||||
fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||
if (opt.is("-q") or opt.is("--slow-ui-updates")) config.update_delay = 2*std.time.ns_per_s
|
||||
else if (opt.is("--fast-ui-updates")) config.update_delay = 100*std.time.ns_per_ms
|
||||
else if (opt.is("-x") or opt.is("--one-file-system")) config.same_fs = true
|
||||
|
|
@ -182,14 +232,16 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
else if (opt.is("--hide-percent")) config.show_percent = false
|
||||
else if (opt.is("--group-directories-first")) config.sort_dirsfirst = true
|
||||
else if (opt.is("--no-group-directories-first")) config.sort_dirsfirst = false
|
||||
else if (opt.is("--enable-natsort")) config.sort_natural = true
|
||||
else if (opt.is("--disable-natsort")) config.sort_natural = false
|
||||
else if (opt.is("--graph-style")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "hash")) config.graph_style = .hash
|
||||
else if (std.mem.eql(u8, val, "half-block")) config.graph_style = .half
|
||||
else if (std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eigth
|
||||
else ui.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||
else if (std.mem.eql(u8, val, "eighth-block") or std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eighth
|
||||
else try args.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||
} else if (opt.is("--sort")) {
|
||||
var val: []const u8 = args.arg();
|
||||
var val: []const u8 = try args.arg();
|
||||
var ord: ?config.SortOrder = null;
|
||||
if (std.mem.endsWith(u8, val, "-asc")) {
|
||||
val = val[0..val.len-4];
|
||||
|
|
@ -213,13 +265,13 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
} else if (std.mem.eql(u8, val, "mtime")) {
|
||||
config.sort_col = .mtime;
|
||||
config.sort_order = ord orelse .asc;
|
||||
} else ui.die("Unknown --sort option: {s}.\n", .{val});
|
||||
} else try args.die("Unknown --sort option: {s}.\n", .{val});
|
||||
} else if (opt.is("--shared-column")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.show_shared = .off
|
||||
else if (std.mem.eql(u8, val, "shared")) config.show_shared = .shared
|
||||
else if (std.mem.eql(u8, val, "unique")) config.show_shared = .unique
|
||||
else ui.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||
else try args.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||
} else if (opt.is("--apparent-size")) config.show_blocks = false
|
||||
else if (opt.is("--disk-usage")) config.show_blocks = true
|
||||
else if (opt.is("-0")) config.scan_ui = .none
|
||||
|
|
@ -229,26 +281,45 @@ fn argConfig(args: *Args, opt: Args.Option) bool {
|
|||
else if (opt.is("--no-si")) config.si = false
|
||||
else if (opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
|
||||
else if (opt.is("--no-follow-symlinks")) config.follow_symlinks = false
|
||||
else if (opt.is("--exclude")) config.exclude_patterns.append(allocator.dupeZ(u8, args.arg()) catch unreachable) catch unreachable
|
||||
else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||
const arg = args.arg();
|
||||
readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||
else if (opt.is("--exclude")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
exclude.addPattern(arg);
|
||||
} else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
||||
defer if (infile) allocator.free(arg);
|
||||
readExcludeFile(arg) catch |e| try args.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||
} else if (opt.is("--exclude-caches")) config.exclude_caches = true
|
||||
else if (opt.is("--include-caches")) config.exclude_caches = false
|
||||
else if (opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
||||
else if (opt.is("--include-kernfs")) config.exclude_kernfs = false
|
||||
else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||
else if (opt.is("-c") or opt.is("--compress")) config.compress = true
|
||||
else if (opt.is("--no-compress")) config.compress = false
|
||||
else if (opt.is("--compress-level")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
if (num <= 0 or num > 20) try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||
config.complevel = num;
|
||||
} else if (opt.is("--export-block-size")) {
|
||||
const val = try args.arg();
|
||||
const num = std.fmt.parseInt(u14, val, 10) catch try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
if (num < 4 or num > 16000) try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
||||
config.export_block_size = @as(usize, num) * 1024;
|
||||
} else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||
else if (opt.is("--no-confirm-quit")) config.confirm_quit = false
|
||||
else if (opt.is("--confirm-delete")) config.confirm_delete = true
|
||||
else if (opt.is("--no-confirm-delete")) config.confirm_delete = false
|
||||
else if (opt.is("--delete-command")) config.delete_command = allocator.dupeZ(u8, try args.arg()) catch unreachable
|
||||
else if (opt.is("--color")) {
|
||||
const val = args.arg();
|
||||
const val = try args.arg();
|
||||
if (std.mem.eql(u8, val, "off")) config.ui_color = .off
|
||||
else if (std.mem.eql(u8, val, "dark")) config.ui_color = .dark
|
||||
else if (std.mem.eql(u8, val, "dark-bg")) config.ui_color = .darkbg
|
||||
else ui.die("Unknown --color option: {s}.\n", .{val});
|
||||
} else return false;
|
||||
return true;
|
||||
else try args.die("Unknown --color option: {s}.\n", .{val});
|
||||
} else if (opt.is("-t") or opt.is("--threads")) {
|
||||
const val = try args.arg();
|
||||
config.threads = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number of --threads: {s}.\n", .{val});
|
||||
} else return error.UnknownOption;
|
||||
}
|
||||
|
||||
fn tryReadArgsFile(path: [:0]const u8) void {
|
||||
|
|
@ -259,137 +330,142 @@ fn tryReadArgsFile(path: [:0]const u8) void {
|
|||
};
|
||||
defer f.close();
|
||||
|
||||
var arglist = std.ArrayList([:0]const u8).init(allocator);
|
||||
var rd = std.io.bufferedReader(f.reader()).reader();
|
||||
var linebuf: [4096]u8 = undefined;
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
|
||||
while (
|
||||
rd.readUntilDelimiterOrEof(&linebuf, '\n')
|
||||
catch |e| ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) })
|
||||
) |line_| {
|
||||
var line = std.mem.trim(u8, line_, &std.ascii.spaces);
|
||||
while (true) {
|
||||
const line_ = (line_rd.read() catch |e|
|
||||
ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) })
|
||||
) orelse break;
|
||||
|
||||
var argc: usize = 0;
|
||||
var ignerror = false;
|
||||
var arglist: [2][:0]const u8 = .{ "", "" };
|
||||
|
||||
var line = std.mem.trim(u8, line_, &std.ascii.whitespace);
|
||||
if (line.len > 0 and line[0] == '@') {
|
||||
ignerror = true;
|
||||
line = line[1..];
|
||||
}
|
||||
if (line.len == 0 or line[0] == '#') continue;
|
||||
if (std.mem.indexOfAny(u8, line, " \t=")) |i| {
|
||||
arglist.append(allocator.dupeZ(u8, line[0..i]) catch unreachable) catch unreachable;
|
||||
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.spaces);
|
||||
}
|
||||
arglist.append(allocator.dupeZ(u8, line) catch unreachable) catch unreachable;
|
||||
arglist[argc] = allocator.dupeZ(u8, line[0..i]) catch unreachable;
|
||||
argc += 1;
|
||||
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.whitespace);
|
||||
}
|
||||
arglist[argc] = allocator.dupeZ(u8, line) catch unreachable;
|
||||
argc += 1;
|
||||
|
||||
var args = Args.init(arglist.items);
|
||||
while (args.next()) |opt| {
|
||||
if (!argConfig(&args, opt))
|
||||
var args = Args.init(arglist[0..argc]);
|
||||
args.ignerror = ignerror;
|
||||
while (args.next() catch null) |opt| {
|
||||
if (argConfig(&args, opt, true)) |_| {}
|
||||
else |_| {
|
||||
if (ignerror) break;
|
||||
ui.die("Unrecognized option in config file '{s}': {s}.\nRun with --ignore-config to skip reading config files.\n", .{path, opt.val});
|
||||
}
|
||||
for (arglist.items) |i| allocator.free(i);
|
||||
arglist.deinit();
|
||||
}
|
||||
allocator.free(arglist[0]);
|
||||
if (argc == 2) allocator.free(arglist[1]);
|
||||
}
|
||||
}
|
||||
|
||||
fn version() noreturn {
|
||||
std.io.getStdOut().writer().writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
||||
stdout.writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
fn help() noreturn {
|
||||
std.io.getStdOut().writer().writeAll(
|
||||
stdout.writeAll(
|
||||
\\ncdu <options> <directory>
|
||||
\\
|
||||
\\Options:
|
||||
\\ -h,--help This help message
|
||||
\\ -q Quiet mode, refresh interval 2 seconds
|
||||
\\ -v,-V,--version Print version
|
||||
\\ -x Same filesystem
|
||||
\\ -e Enable extended information
|
||||
\\ -r Read only
|
||||
\\ -o FILE Export scanned directory to FILE
|
||||
\\Mode selection:
|
||||
\\ -h, --help This help message
|
||||
\\ -v, -V, --version Print version
|
||||
\\ -f FILE Import scanned directory from FILE
|
||||
\\ -0,-1,-2 UI to use when scanning (0=none,2=full ncurses)
|
||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||
\\ --confirm-quit Confirm quitting ncdu
|
||||
\\ --color SCHEME Set color scheme (off/dark/dark-bg)
|
||||
\\ -o FILE Export scanned directory to FILE in JSON format
|
||||
\\ -O FILE Export scanned directory to FILE in binary format
|
||||
\\ -e, --extended Enable extended information
|
||||
\\ --ignore-config Don't load config files
|
||||
\\
|
||||
\\Refer to `man ncdu` for the full list of options.
|
||||
\\Scan options:
|
||||
\\ -x, --one-file-system Stay on the same filesystem
|
||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||
\\ -t NUM Scan with NUM threads
|
||||
\\
|
||||
\\Export options:
|
||||
\\ -c, --compress Use Zstandard compression with `-o`
|
||||
\\ --compress-level NUM Set compression level
|
||||
\\ --export-block-size KIB Set export block size with `-O`
|
||||
\\
|
||||
\\Interface options:
|
||||
\\ -0, -1, -2 UI to use when scanning (0=none,2=full ncurses)
|
||||
\\ -q, --slow-ui-updates "Quiet" mode, refresh interval 2 seconds
|
||||
\\ --enable-shell Enable/disable shell spawning feature
|
||||
\\ --enable-delete Enable/disable file deletion feature
|
||||
\\ --enable-refresh Enable/disable directory refresh feature
|
||||
\\ -r Read only (--disable-delete)
|
||||
\\ -rr Read only++ (--disable-delete & --disable-shell)
|
||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||
\\ --apparent-size Show apparent size instead of disk usage by default
|
||||
\\ --hide-hidden Hide "hidden" or excluded files by default
|
||||
\\ --show-itemcount Show item count column by default
|
||||
\\ --show-mtime Show mtime column by default (requires `-e`)
|
||||
\\ --show-graph Show graph column by default
|
||||
\\ --show-percent Show percent column by default
|
||||
\\ --graph-style STYLE hash / half-block / eighth-block
|
||||
\\ --shared-column off / shared / unique
|
||||
\\ --sort COLUMN-(asc/desc) disk-usage / name / apparent-size / itemcount / mtime
|
||||
\\ --enable-natsort Use natural order when sorting by name
|
||||
\\ --group-directories-first Sort directories before files
|
||||
\\ --confirm-quit Ask confirmation before quitting ncdu
|
||||
\\ --no-confirm-delete Don't ask confirmation before deletion
|
||||
\\ --delete-command CMD Command to run for file deletion
|
||||
\\ --color SCHEME off / dark / dark-bg
|
||||
\\
|
||||
\\Refer to `man ncdu` for more information.
|
||||
\\
|
||||
) catch {};
|
||||
std.process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
fn spawnShell() void {
|
||||
ui.deinit();
|
||||
defer ui.init();
|
||||
|
||||
var path = std.ArrayList(u8).init(allocator);
|
||||
defer path.deinit();
|
||||
browser.dir_parent.fmtPath(true, &path);
|
||||
|
||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||
if (env.get("NCDU_LEVEL")) |l|
|
||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||
'0'...'8' => @as([]const u8, &.{l[0]+1}),
|
||||
'9' => "9",
|
||||
else => "1"
|
||||
}) catch unreachable
|
||||
else
|
||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||
|
||||
const shell = std.os.getenvZ("NCDU_SHELL") orelse std.os.getenvZ("SHELL") orelse "/bin/sh";
|
||||
var child = std.ChildProcess.init(&.{shell}, allocator) catch unreachable;
|
||||
defer child.deinit();
|
||||
child.cwd = path.items;
|
||||
child.env_map = &env;
|
||||
|
||||
const term = child.spawnAndWait() catch |e| blk: {
|
||||
_ = std.io.getStdErr().writer().print(
|
||||
"Error spawning shell: {s}\n\nPress enter to continue.\n",
|
||||
.{ ui.errorString(e) }
|
||||
) catch {};
|
||||
_ = std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
break :blk std.ChildProcess.Term{ .Exited = 0 };
|
||||
};
|
||||
if (term != .Exited) {
|
||||
const n = switch (term) {
|
||||
.Exited => "status",
|
||||
.Signal => "signal",
|
||||
.Stopped => "stopped",
|
||||
.Unknown => "unknown",
|
||||
};
|
||||
const v = switch (term) {
|
||||
.Exited => |v| v,
|
||||
.Signal => |v| v,
|
||||
.Stopped => |v| v,
|
||||
.Unknown => |v| v,
|
||||
};
|
||||
_ = std.io.getStdErr().writer().print(
|
||||
"Shell returned with {s} code {}.\n\nPress enter to continue.\n", .{ n, v }
|
||||
) catch {};
|
||||
_ = std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn readExcludeFile(path: [:0]const u8) !void {
|
||||
const f = try std.fs.cwd().openFileZ(path, .{});
|
||||
defer f.close();
|
||||
var rd = std.io.bufferedReader(f.reader()).reader();
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
while (true) {
|
||||
rd.readUntilDelimiterArrayList(&buf, '\n', 4096)
|
||||
catch |e| if (e != error.EndOfStream) return e else if (buf.items.len == 0) break;
|
||||
if (buf.items.len > 0)
|
||||
config.exclude_patterns.append(buf.toOwnedSliceSentinel(0) catch unreachable) catch unreachable;
|
||||
|
||||
var line_buf: [4096]u8 = undefined;
|
||||
var line_rd = util.LineReader.init(f, &line_buf);
|
||||
while (try line_rd.read()) |line| {
|
||||
if (line.len > 0)
|
||||
exclude.addPattern(line);
|
||||
}
|
||||
}
|
||||
|
||||
fn readImport(path: [:0]const u8) !void {
|
||||
const fd =
|
||||
if (std.mem.eql(u8, "-", path)) stdin
|
||||
else try std.fs.cwd().openFileZ(path, .{});
|
||||
errdefer fd.close();
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
if (8 != try fd.readAll(&buf)) return error.EndOfStream;
|
||||
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
|
||||
try bin_reader.open(fd);
|
||||
config.binreader = true;
|
||||
} else {
|
||||
json_import.import(fd, &buf);
|
||||
fd.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn main() void {
|
||||
ui.main_thread = std.Thread.getCurrentId();
|
||||
|
||||
// Grab thousands_sep from the current C locale.
|
||||
_ = c.setlocale(c.LC_ALL, "");
|
||||
if (c.localeconv()) |locale| {
|
||||
|
|
@ -399,7 +475,6 @@ pub fn main() void {
|
|||
config.thousands_sep = span;
|
||||
}
|
||||
}
|
||||
if (std.os.getenvZ("NO_COLOR") == null) config.ui_color = .darkbg;
|
||||
|
||||
const loadConf = blk: {
|
||||
var args = std.process.ArgIteratorPosix.init();
|
||||
|
|
@ -412,26 +487,28 @@ pub fn main() void {
|
|||
if (loadConf) {
|
||||
tryReadArgsFile("/etc/ncdu.conf");
|
||||
|
||||
if (std.os.getenvZ("XDG_CONFIG_HOME")) |p| {
|
||||
var path = std.fs.path.joinZ(allocator, &.{p, "ncdu", "config"}) catch unreachable;
|
||||
if (std.posix.getenvZ("XDG_CONFIG_HOME")) |p| {
|
||||
const path = std.fs.path.joinZ(allocator, &.{p, "ncdu", "config"}) catch unreachable;
|
||||
defer allocator.free(path);
|
||||
tryReadArgsFile(path);
|
||||
} else if (std.os.getenvZ("HOME")) |p| {
|
||||
var path = std.fs.path.joinZ(allocator, &.{p, ".config", "ncdu", "config"}) catch unreachable;
|
||||
} else if (std.posix.getenvZ("HOME")) |p| {
|
||||
const path = std.fs.path.joinZ(allocator, &.{p, ".config", "ncdu", "config"}) catch unreachable;
|
||||
defer allocator.free(path);
|
||||
tryReadArgsFile(path);
|
||||
}
|
||||
}
|
||||
|
||||
var scan_dir: ?[]const u8 = null;
|
||||
var scan_dir: ?[:0]const u8 = null;
|
||||
var import_file: ?[:0]const u8 = null;
|
||||
var export_file: ?[:0]const u8 = null;
|
||||
var export_json: ?[:0]const u8 = null;
|
||||
var export_bin: ?[:0]const u8 = null;
|
||||
var quit_after_scan = false;
|
||||
{
|
||||
var arglist = std.process.argsAlloc(allocator) catch unreachable;
|
||||
const arglist = std.process.argsAlloc(allocator) catch unreachable;
|
||||
defer std.process.argsFree(allocator, arglist);
|
||||
var args = Args.init(arglist);
|
||||
_ = args.next(); // program name
|
||||
while (args.next()) |opt| {
|
||||
_ = args.next() catch unreachable; // program name
|
||||
while (args.next() catch unreachable) |opt| {
|
||||
if (!opt.opt) {
|
||||
// XXX: ncdu 1.x doesn't error, it just silently ignores all but the last argument.
|
||||
if (scan_dir != null) ui.die("Multiple directories given, see ncdu -h for help.\n", .{});
|
||||
|
|
@ -440,46 +517,68 @@ pub fn main() void {
|
|||
}
|
||||
if (opt.is("-h") or opt.is("-?") or opt.is("--help")) help()
|
||||
else if (opt.is("-v") or opt.is("-V") or opt.is("--version")) version()
|
||||
else if (opt.is("-o") and export_file != null) ui.die("The -o flag can only be given once.\n", .{})
|
||||
else if (opt.is("-o")) export_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("-o") and (export_json != null or export_bin != null)) ui.die("The -o flag can only be given once.\n", .{})
|
||||
else if (opt.is("-o")) export_json = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-O") and (export_json != null or export_bin != null)) ui.die("The -O flag can only be given once.\n", .{})
|
||||
else if (opt.is("-O")) export_bin = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
|
||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
||||
else if (opt.is("--ignore-config")) {}
|
||||
else if (argConfig(&args, opt)) {}
|
||||
else ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||
else if (argConfig(&args, opt, false)) |_| {}
|
||||
else |_| ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
}
|
||||
}
|
||||
|
||||
if (config.threads == 0) config.threads = std.Thread.getCpuCount() catch 1;
|
||||
|
||||
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
||||
ui.die("The --exclude-kernfs tag is currently only supported on Linux.\n", .{});
|
||||
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
||||
|
||||
const out_tty = std.io.getStdOut().isTty();
|
||||
const in_tty = std.io.getStdIn().isTty();
|
||||
const out_tty = stdout.isTty();
|
||||
const in_tty = stdin.isTty();
|
||||
if (config.scan_ui == null) {
|
||||
if (export_file) |f| {
|
||||
if (export_json orelse export_bin) |f| {
|
||||
if (!out_tty or std.mem.eql(u8, f, "-")) config.scan_ui = .none
|
||||
else config.scan_ui = .line;
|
||||
} else config.scan_ui = .full;
|
||||
}
|
||||
if (!in_tty and import_file == null and export_file == null)
|
||||
if (!in_tty and import_file == null and export_json == null and export_bin == null and !quit_after_scan)
|
||||
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
|
||||
config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
|
||||
config.nc_tty = !in_tty or (if (export_json orelse export_bin) |f| std.mem.eql(u8, f, "-") else false);
|
||||
|
||||
event_delay_timer = std.time.Timer.start() catch unreachable;
|
||||
defer ui.deinit();
|
||||
|
||||
var out_file = if (export_file) |f| (
|
||||
if (std.mem.eql(u8, f, "-")) std.io.getStdOut()
|
||||
if (export_json) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)})
|
||||
) else null;
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
json_export.setupOutput(file);
|
||||
sink.global.sink = .json;
|
||||
} else if (export_bin) |f| {
|
||||
const file =
|
||||
if (std.mem.eql(u8, f, "-")) stdout
|
||||
else std.fs.cwd().createFileZ(f, .{})
|
||||
catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)});
|
||||
bin_export.setupOutput(file);
|
||||
sink.global.sink = .bin;
|
||||
}
|
||||
|
||||
if (import_file) |f| {
|
||||
scan.importRoot(f, out_file);
|
||||
readImport(f) catch |e| ui.die("Error reading file '{s}': {s}.\n", .{f, ui.errorString(e)});
|
||||
config.imported = true;
|
||||
} else scan.scanRoot(scan_dir orelse ".", out_file)
|
||||
catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
if (out_file != null) return;
|
||||
if (config.binreader and (export_json != null or export_bin != null))
|
||||
bin_reader.import();
|
||||
} else {
|
||||
var buf: [std.fs.max_path_bytes+1]u8 = @splat(0);
|
||||
const path =
|
||||
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
||||
else |_| (scan_dir orelse ".");
|
||||
scan.scan(path) catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
|
||||
}
|
||||
if (quit_after_scan or export_json != null or export_bin != null) return;
|
||||
|
||||
config.can_shell = config.can_shell orelse !config.imported;
|
||||
config.can_delete = config.can_delete orelse !config.imported;
|
||||
|
|
@ -488,44 +587,57 @@ pub fn main() void {
|
|||
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
|
||||
ui.init();
|
||||
state = .browse;
|
||||
browser.dir_parent = model.root;
|
||||
browser.loadDir(null);
|
||||
browser.initRoot();
|
||||
|
||||
while (true) {
|
||||
switch (state) {
|
||||
.refresh => {
|
||||
scan.scan();
|
||||
var full_path: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer full_path.deinit(allocator);
|
||||
mem_sink.global.root.?.fmtPath(allocator, true, &full_path);
|
||||
scan.scan(util.arrayListBufZ(&full_path, allocator)) catch {
|
||||
sink.global.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable;
|
||||
sink.global.state = .err;
|
||||
while (state == .refresh) handleEvent(true, true);
|
||||
};
|
||||
state = .browse;
|
||||
browser.loadDir(null);
|
||||
browser.loadDir(0);
|
||||
},
|
||||
.shell => {
|
||||
spawnShell();
|
||||
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||
defer env.deinit();
|
||||
ui.runCmd(&.{shell}, browser.dir_path, &env, false);
|
||||
state = .browse;
|
||||
},
|
||||
.delete => {
|
||||
const next = delete.delete();
|
||||
if (state != .refresh) {
|
||||
state = .browse;
|
||||
browser.loadDir(next);
|
||||
browser.loadDir(if (next) |n| n.nameHash() else 0);
|
||||
}
|
||||
},
|
||||
else => handleEvent(true, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var event_delay_timer: std.time.Timer = undefined;
|
||||
pub var event_delay_timer: std.time.Timer = undefined;
|
||||
|
||||
// Draw the screen and handle the next input event.
|
||||
// In non-blocking mode, screen drawing is rate-limited to keep this function fast.
|
||||
pub fn handleEvent(block: bool, force_draw: bool) void {
|
||||
while (ui.oom_threads.load(.monotonic) > 0) ui.oom();
|
||||
|
||||
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
||||
if (ui.inited) _ = ui.c.erase();
|
||||
if (ui.inited) _ = c.erase();
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.draw(),
|
||||
.scan, .refresh => sink.draw(),
|
||||
.browse => browser.draw(),
|
||||
.delete => delete.draw(),
|
||||
.shell => unreachable,
|
||||
}
|
||||
if (ui.inited) _ = ui.c.refresh();
|
||||
if (ui.inited) _ = c.refresh();
|
||||
event_delay_timer.reset();
|
||||
}
|
||||
if (!ui.inited) {
|
||||
|
|
@ -535,11 +647,11 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
|||
|
||||
var firstblock = block;
|
||||
while (true) {
|
||||
var ch = ui.getch(firstblock);
|
||||
const ch = ui.getch(firstblock);
|
||||
if (ch == 0) return;
|
||||
if (ch == -1) return handleEvent(firstblock, true);
|
||||
switch (state) {
|
||||
.scan, .refresh => scan.keyInput(ch),
|
||||
.scan, .refresh => sink.keyInput(ch),
|
||||
.browse => browser.keyInput(ch),
|
||||
.delete => delete.keyInput(ch),
|
||||
.shell => unreachable,
|
||||
|
|
@ -548,19 +660,18 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
test "argument parser" {
|
||||
const lst = [_][:0]const u8{ "a", "-abcd=e", "--opt1=arg1", "--opt2", "arg2", "-x", "foo", "", "--", "--arg", "", "-", };
|
||||
const T = struct {
|
||||
a: Args,
|
||||
fn opt(self: *@This(), isopt: bool, val: []const u8) !void {
|
||||
const o = self.a.next().?;
|
||||
const o = (self.a.next() catch unreachable).?;
|
||||
try std.testing.expectEqual(isopt, o.opt);
|
||||
try std.testing.expectEqualStrings(val, o.val);
|
||||
try std.testing.expectEqual(o.is(val), isopt);
|
||||
}
|
||||
fn arg(self: *@This(), val: []const u8) !void {
|
||||
try std.testing.expectEqualStrings(val, self.a.arg());
|
||||
try std.testing.expectEqualStrings(val, self.a.arg() catch unreachable);
|
||||
}
|
||||
};
|
||||
var t = T{ .a = Args.init(&lst) };
|
||||
|
|
|
|||
212
src/mem_sink.zig
Normal file
212
src/mem_sink.zig
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var root: ?*model.Dir = null;
|
||||
pub var stats: bool = true; // calculate aggregate directory stats
|
||||
};
|
||||
|
||||
pub const Thread = struct {
|
||||
// Arena allocator for model.Entry structs, these are never freed.
|
||||
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
||||
};
|
||||
|
||||
pub fn statToEntry(stat: *const sink.Stat, e: *model.Entry, parent: *model.Dir) void {
|
||||
e.pack.blocks = stat.blocks;
|
||||
e.size = stat.size;
|
||||
if (e.dir()) |d| {
|
||||
d.parent = parent;
|
||||
d.pack.dev = model.devices.getId(stat.dev);
|
||||
}
|
||||
if (e.link()) |l| {
|
||||
l.parent = parent;
|
||||
l.ino = stat.ino;
|
||||
l.pack.nlink = stat.nlink;
|
||||
model.inodes.lock.lock();
|
||||
defer model.inodes.lock.unlock();
|
||||
l.addLink();
|
||||
}
|
||||
if (e.ext()) |ext| ext.* = stat.ext;
|
||||
}
|
||||
|
||||
pub const Dir = struct {
|
||||
dir: *model.Dir,
|
||||
entries: Map,
|
||||
|
||||
own_blocks: model.Blocks,
|
||||
own_bytes: u64,
|
||||
|
||||
// Additional counts collected from subdirectories. Subdirs may run final()
|
||||
// from separate threads so these need to be protected.
|
||||
blocks: model.Blocks = 0,
|
||||
bytes: u64 = 0,
|
||||
items: u32 = 0,
|
||||
mtime: u64 = 0,
|
||||
suberr: bool = false,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
|
||||
const Map = std.HashMap(*model.Entry, void, HashContext, 80);
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), e: *model.Entry) u64 {
|
||||
return std.hash.Wyhash.hash(0, e.name());
|
||||
}
|
||||
pub fn eql(_: @This(), a: *model.Entry, b: *model.Entry) bool {
|
||||
return a == b or std.mem.eql(u8, a.name(), b.name());
|
||||
}
|
||||
};
|
||||
|
||||
const HashContextAdapted = struct {
|
||||
pub fn hash(_: @This(), v: []const u8) u64 {
|
||||
return std.hash.Wyhash.hash(0, v);
|
||||
}
|
||||
pub fn eql(_: @This(), a: []const u8, b: *model.Entry) bool {
|
||||
return std.mem.eql(u8, a, b.name());
|
||||
}
|
||||
};
|
||||
|
||||
fn init(dir: *model.Dir) Dir {
|
||||
var self = Dir{
|
||||
.dir = dir,
|
||||
.entries = Map.initContext(main.allocator, HashContext{}),
|
||||
.own_blocks = dir.entry.pack.blocks,
|
||||
.own_bytes = dir.entry.size,
|
||||
};
|
||||
|
||||
var count: Map.Size = 0;
|
||||
var it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) count += 1;
|
||||
self.entries.ensureUnusedCapacity(count) catch unreachable;
|
||||
|
||||
it = dir.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr)
|
||||
self.entries.putAssumeCapacity(e, {});
|
||||
return self;
|
||||
}
|
||||
|
||||
fn getEntry(self: *Dir, t: *Thread, etype: model.EType, isext: bool, name: []const u8) *model.Entry {
|
||||
if (self.entries.getKeyAdapted(name, HashContextAdapted{})) |e| {
|
||||
// XXX: In-place conversion may be possible in some cases.
|
||||
if (e.pack.etype.base() == etype.base() and (!isext or e.pack.isext)) {
|
||||
e.pack.etype = etype;
|
||||
e.pack.isext = isext;
|
||||
_ = self.entries.removeAdapted(name, HashContextAdapted{});
|
||||
return e;
|
||||
}
|
||||
}
|
||||
const e = model.Entry.create(t.arena.allocator(), etype, isext, name);
|
||||
e.next.ptr = self.dir.sub.ptr;
|
||||
self.dir.sub.ptr = e;
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addSpecial(self: *Dir, t: *Thread, name: []const u8, st: model.EType) void {
|
||||
self.dir.items += 1;
|
||||
if (st == .err) self.dir.pack.suberr = true;
|
||||
_ = self.getEntry(t, st, false, name);
|
||||
}
|
||||
|
||||
pub fn addStat(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) *model.Entry {
|
||||
if (global.stats) {
|
||||
self.dir.items +|= 1;
|
||||
if (stat.etype != .link) {
|
||||
self.dir.entry.pack.blocks +|= stat.blocks;
|
||||
self.dir.entry.size +|= stat.size;
|
||||
}
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (stat.ext.mtime > e.mtime) e.mtime = stat.ext.mtime;
|
||||
}
|
||||
}
|
||||
|
||||
const e = self.getEntry(t, stat.etype, main.config.extended and !stat.ext.isEmpty(), name);
|
||||
statToEntry(stat, e, self.dir);
|
||||
return e;
|
||||
}
|
||||
|
||||
pub fn addDir(self: *Dir, t: *Thread, name: []const u8, stat: *const sink.Stat) Dir {
|
||||
return init(self.addStat(t, name, stat).dir().?);
|
||||
}
|
||||
|
||||
pub fn setReadError(self: *Dir) void {
|
||||
self.dir.pack.err = true;
|
||||
}
|
||||
|
||||
pub fn final(self: *Dir, parent: ?*Dir) void {
|
||||
// Remove entries we've not seen
|
||||
if (self.entries.count() > 0) {
|
||||
var it = &self.dir.sub.ptr;
|
||||
while (it.*) |e| {
|
||||
if (self.entries.getKey(e) == e) it.* = e.next.ptr
|
||||
else it = &e.next.ptr;
|
||||
}
|
||||
}
|
||||
self.entries.deinit();
|
||||
|
||||
if (!global.stats) return;
|
||||
|
||||
// Grab counts collected from subdirectories
|
||||
self.dir.entry.pack.blocks +|= self.blocks;
|
||||
self.dir.entry.size +|= self.bytes;
|
||||
self.dir.items +|= self.items;
|
||||
if (self.suberr) self.dir.pack.suberr = true;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (self.mtime > e.mtime) e.mtime = self.mtime;
|
||||
}
|
||||
|
||||
// Add own counts to parent
|
||||
if (parent) |p| {
|
||||
p.lock.lock();
|
||||
defer p.lock.unlock();
|
||||
p.blocks +|= self.dir.entry.pack.blocks - self.own_blocks;
|
||||
p.bytes +|= self.dir.entry.size - self.own_bytes;
|
||||
p.items +|= self.dir.items;
|
||||
if (self.dir.entry.ext()) |e| {
|
||||
if (e.mtime > p.mtime) p.mtime = e.mtime;
|
||||
}
|
||||
if (self.suberr or self.dir.pack.suberr or self.dir.pack.err) p.suberr = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||
const p = global.root orelse blk: {
|
||||
model.root = model.Entry.create(main.allocator, .dir, main.config.extended and !stat.ext.isEmpty(), path).dir().?;
|
||||
break :blk model.root;
|
||||
};
|
||||
sink.global.state = .zeroing;
|
||||
if (p.items > 10_000) main.handleEvent(false, true);
|
||||
// Do the zeroStats() here, after the "root" entry has been
|
||||
// stat'ed and opened, so that a fatal error on refresh won't
|
||||
// zero-out the requested directory.
|
||||
p.entry.zeroStats(p.parent);
|
||||
sink.global.state = .running;
|
||||
p.entry.pack.blocks = stat.blocks;
|
||||
p.entry.size = stat.size;
|
||||
p.pack.dev = model.devices.getId(stat.dev);
|
||||
if (p.entry.ext()) |e| e.* = stat.ext;
|
||||
return Dir.init(p);
|
||||
}
|
||||
|
||||
pub fn done() void {
|
||||
if (!global.stats) return;
|
||||
|
||||
sink.global.state = .hlcnt;
|
||||
main.handleEvent(false, true);
|
||||
const dir = global.root orelse model.root;
|
||||
var it: ?*model.Dir = dir;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.updateSubErr();
|
||||
if (p != dir) {
|
||||
p.entry.pack.blocks +|= dir.entry.pack.blocks;
|
||||
p.entry.size +|= dir.entry.size;
|
||||
p.items +|= dir.items + 1;
|
||||
}
|
||||
}
|
||||
model.inodes.addAllStats();
|
||||
}
|
||||
73
src/mem_src.zig
Normal file
73
src/mem_src.zig
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const sink = @import("sink.zig");
|
||||
|
||||
// Emit the memory tree to the sink in depth-first order from a single thread,
|
||||
// suitable for JSON export.
|
||||
|
||||
fn toStat(e: *model.Entry) sink.Stat {
|
||||
const el = e.link();
|
||||
return sink.Stat{
|
||||
.etype = e.pack.etype,
|
||||
.blocks = e.pack.blocks,
|
||||
.size = e.size,
|
||||
.dev =
|
||||
if (e.dir()) |d| model.devices.list.items[d.pack.dev]
|
||||
else if (el) |l| model.devices.list.items[l.parent.pack.dev]
|
||||
else undefined,
|
||||
.ino = if (el) |l| l.ino else undefined,
|
||||
.nlink = if (el) |l| l.pack.nlink else 1,
|
||||
.ext = if (e.ext()) |x| x.* else .{},
|
||||
};
|
||||
}
|
||||
|
||||
const Ctx = struct {
|
||||
sink: *sink.Thread,
|
||||
stat: sink.Stat,
|
||||
};
|
||||
|
||||
|
||||
fn rec(ctx: *Ctx, dir: *sink.Dir, entry: *model.Entry) void {
|
||||
if ((ctx.sink.files_seen.load(.monotonic) & 65) == 0)
|
||||
main.handleEvent(false, false);
|
||||
|
||||
ctx.stat = toStat(entry);
|
||||
switch (entry.pack.etype) {
|
||||
.dir => {
|
||||
const d = entry.dir().?;
|
||||
var ndir = dir.addDir(ctx.sink, entry.name(), &ctx.stat);
|
||||
ctx.sink.setDir(ndir);
|
||||
if (d.pack.err) ndir.setReadError(ctx.sink);
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(ctx, ndir, e);
|
||||
ctx.sink.setDir(dir);
|
||||
ndir.unref(ctx.sink);
|
||||
},
|
||||
.reg, .nonreg, .link => dir.addStat(ctx.sink, entry.name(), &ctx.stat),
|
||||
else => dir.addSpecial(ctx.sink, entry.name(), entry.pack.etype),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn run(d: *model.Dir) void {
|
||||
const sink_threads = sink.createThreads(1);
|
||||
|
||||
var ctx: Ctx = .{
|
||||
.sink = &sink_threads[0],
|
||||
.stat = toStat(&d.entry),
|
||||
};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
d.fmtPath(main.allocator, true, &buf);
|
||||
const root = sink.createRoot(buf.items, &ctx.stat);
|
||||
buf.deinit(main.allocator);
|
||||
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
|
||||
|
||||
root.unref(ctx.sink);
|
||||
sink.done();
|
||||
}
|
||||
571
src/model.zig
571
src/model.zig
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
|
|
@ -6,20 +6,51 @@ const main = @import("main.zig");
|
|||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// While an arena allocator is optimimal for almost all scenarios in which ncdu
|
||||
// is used, it doesn't allow for re-using deleted nodes after doing a delete or
|
||||
// refresh operation, so a long-running ncdu session with regular refreshes
|
||||
// will leak memory, but I'd say that's worth the efficiency gains.
|
||||
// TODO: Can still implement a simple bucketed free list on top of this arena
|
||||
// allocator to reuse nodes, if necessary.
|
||||
var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = allocator_state.allocator();
|
||||
// Numbers are used in the binfmt export, so must be stable.
|
||||
pub const EType = enum(i3) {
|
||||
dir = 0,
|
||||
reg = 1,
|
||||
nonreg = 2,
|
||||
link = 3,
|
||||
err = -1,
|
||||
pattern = -2,
|
||||
otherfs = -3,
|
||||
kernfs = -4,
|
||||
|
||||
pub const EType = enum(u2) { dir, link, file };
|
||||
pub fn base(t: EType) EType {
|
||||
return switch (t) {
|
||||
.dir, .link => t,
|
||||
else => .reg,
|
||||
};
|
||||
}
|
||||
|
||||
// Type for the Entry.blocks field. Smaller than a u64 to make room for flags.
|
||||
// Whether this entry should be displayed as a "directory".
|
||||
// Some dirs are actually represented in this data model as a File for efficiency.
|
||||
pub fn isDirectory(t: EType) bool {
|
||||
return switch (t) {
|
||||
.dir, .otherfs, .kernfs => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Type for the Entry.Packed.blocks field. Smaller than a u64 to make room for flags.
|
||||
pub const Blocks = u60;
|
||||
|
||||
// Entries read from bin_reader may refer to other entries by itemref rather than pointer.
|
||||
// This is a hack that allows browser.zig to use the same types for in-memory
|
||||
// and bin_reader-backed directory trees. Most code can only deal with
|
||||
// in-memory trees and accesses the .ptr field directly.
|
||||
pub const Ref = extern union {
|
||||
ptr: ?*Entry align(1),
|
||||
ref: u64 align(1),
|
||||
|
||||
pub fn isNull(r: Ref) bool {
|
||||
if (main.config.binreader) return r.ref == std.math.maxInt(u64)
|
||||
else return r.ptr == null;
|
||||
}
|
||||
};
|
||||
|
||||
// Memory layout:
|
||||
// (Ext +) Dir + name
|
||||
// or: (Ext +) Link + name
|
||||
|
|
@ -31,301 +62,308 @@ pub const Blocks = u60;
|
|||
// These are all packed structs and hence do not have any alignment, which is
|
||||
// great for saving memory but perhaps not very great for code size or
|
||||
// performance.
|
||||
// (TODO: What are the aliassing rules for Zig? There is a 'noalias' keyword,
|
||||
// but does that mean all unmarked pointers are allowed to alias?)
|
||||
pub const Entry = packed struct {
|
||||
pub const Entry = extern struct {
|
||||
pack: Packed align(1),
|
||||
size: u64 align(1) = 0,
|
||||
next: Ref = .{ .ptr = null },
|
||||
|
||||
pub const Packed = packed struct(u64) {
|
||||
etype: EType,
|
||||
isext: bool,
|
||||
// Whether or not this entry's size has been counted in its parents.
|
||||
// Counting of Link entries is deferred until the scan/delete operation has
|
||||
// completed, so for those entries this flag indicates an intention to be
|
||||
// counted.
|
||||
counted: bool,
|
||||
blocks: Blocks, // 512-byte blocks
|
||||
size: u64,
|
||||
next: ?*Entry,
|
||||
blocks: Blocks = 0, // 512-byte blocks
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn dir(self: *Self) ?*Dir {
|
||||
return if (self.etype == .dir) @ptrCast(*Dir, self) else null;
|
||||
return if (self.pack.etype == .dir) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn link(self: *Self) ?*Link {
|
||||
return if (self.etype == .link) @ptrCast(*Link, self) else null;
|
||||
return if (self.pack.etype == .link) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn file(self: *Self) ?*File {
|
||||
return if (self.etype == .file) @ptrCast(*File, self) else null;
|
||||
}
|
||||
|
||||
// Whether this entry should be displayed as a "directory".
|
||||
// Some dirs are actually represented in this data model as a File for efficiency.
|
||||
pub fn isDirectory(self: *Self) bool {
|
||||
return if (self.file()) |f| f.other_fs or f.kernfs else self.etype == .dir;
|
||||
}
|
||||
|
||||
fn nameOffset(etype: EType) usize {
|
||||
return switch (etype) {
|
||||
.dir => @offsetOf(Dir, "name"),
|
||||
.link => @offsetOf(Link, "name"),
|
||||
.file => @offsetOf(File, "name"),
|
||||
};
|
||||
return if (self.pack.etype != .dir and self.pack.etype != .link) @ptrCast(self) else null;
|
||||
}
|
||||
|
||||
pub fn name(self: *const Self) [:0]const u8 {
|
||||
const ptr = @ptrCast([*:0]const u8, self) + nameOffset(self.etype);
|
||||
return std.mem.sliceTo(ptr, 0);
|
||||
const self_name = switch (self.pack.etype) {
|
||||
.dir => &@as(*const Dir, @ptrCast(self)).name,
|
||||
.link => &@as(*const Link, @ptrCast(self)).name,
|
||||
else => &@as(*const File, @ptrCast(self)).name,
|
||||
};
|
||||
const name_ptr: [*:0]const u8 = @ptrCast(self_name);
|
||||
return std.mem.sliceTo(name_ptr, 0);
|
||||
}
|
||||
|
||||
pub fn nameHash(self: *const Self) u64 {
|
||||
return std.hash.Wyhash.hash(0, self.name());
|
||||
}
|
||||
|
||||
pub fn ext(self: *Self) ?*Ext {
|
||||
if (!self.isext) return null;
|
||||
return @ptrCast(*Ext, @ptrCast([*]Ext, self) - 1);
|
||||
if (!self.pack.isext) return null;
|
||||
return @ptrCast(@as([*]Ext, @ptrCast(self)) - 1);
|
||||
}
|
||||
|
||||
pub fn create(etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
const extsize = if (isext) @as(usize, @sizeOf(Ext)) else 0;
|
||||
const size = nameOffset(etype) + ename.len + 1 + extsize;
|
||||
var ptr = blk: {
|
||||
while (true) {
|
||||
if (allocator.allocWithOptions(u8, size, std.math.max(@alignOf(Ext), @alignOf(Entry)), null)) |p|
|
||||
break :blk p
|
||||
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
||||
var ptr = blk: while (true) {
|
||||
const alignment = if (@typeInfo(@TypeOf(std.mem.Allocator.allocWithOptions)).@"fn".params[3].type == ?u29) 1 else std.mem.Alignment.@"1";
|
||||
if (allocator.allocWithOptions(u8, size, alignment, null)) |p| break :blk p
|
||||
else |_| {}
|
||||
ui.oom();
|
||||
}
|
||||
};
|
||||
std.mem.set(u8, ptr, 0); // kind of ugly, but does the trick
|
||||
var e = @ptrCast(*Entry, ptr.ptr + extsize);
|
||||
e.etype = etype;
|
||||
e.isext = isext;
|
||||
var name_ptr = @ptrCast([*]u8, e) + nameOffset(etype);
|
||||
std.mem.copy(u8, name_ptr[0..ename.len], ename);
|
||||
return e;
|
||||
if (isext) {
|
||||
@as(*Ext, @ptrCast(ptr)).* = .{};
|
||||
ptr = ptr[@sizeOf(Ext)..];
|
||||
}
|
||||
const e: *T = @ptrCast(ptr);
|
||||
e.* = .{ .entry = .{ .pack = .{ .etype = etype, .isext = isext } } };
|
||||
const n = @as([*]u8, @ptrCast(&e.name))[0..ename.len+1];
|
||||
@memcpy(n[0..ename.len], ename);
|
||||
n[ename.len] = 0;
|
||||
return &e.entry;
|
||||
}
|
||||
|
||||
// Set the 'err' flag on Dirs and Files, propagating 'suberr' to parents.
|
||||
pub fn setErr(self: *Self, parent: *Dir) void {
|
||||
if (self.dir()) |d| d.err = true
|
||||
else if (self.file()) |f| f.err = true
|
||||
else unreachable;
|
||||
var it: ?*Dir = if (&parent.entry == self) parent.parent else parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
if (p.suberr) break;
|
||||
p.suberr = true;
|
||||
}
|
||||
pub fn create(allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||
return switch (etype) {
|
||||
.dir => alloc(Dir, allocator, etype, isext, ename),
|
||||
.link => alloc(Link, allocator, etype, isext, ename),
|
||||
else => alloc(File, allocator, etype, isext, ename),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn addStats(self: *Entry, parent: *Dir, nlink: u31) void {
|
||||
if (self.counted) return;
|
||||
self.counted = true;
|
||||
|
||||
// Add link to the inode map, but don't count its size (yet).
|
||||
if (self.link()) |l| {
|
||||
l.parent = parent;
|
||||
var d = inodes.map.getOrPut(l) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
d.value_ptr.* = .{ .counted = false, .nlink = nlink };
|
||||
inodes.total_blocks +|= self.blocks;
|
||||
l.next = l;
|
||||
} else {
|
||||
inodes.setStats(.{ .key_ptr = d.key_ptr, .value_ptr = d.value_ptr }, false);
|
||||
// If the nlink counts are not consistent, reset to 0 so we calculate with what we have instead.
|
||||
if (d.value_ptr.nlink != nlink)
|
||||
d.value_ptr.nlink = 0;
|
||||
l.next = d.key_ptr.*.next;
|
||||
d.key_ptr.*.next = l;
|
||||
}
|
||||
inodes.addUncounted(l);
|
||||
pub fn destroy(self: *Self, allocator: std.mem.Allocator) void {
|
||||
const ptr: [*]u8 = if (self.ext()) |e| @ptrCast(e) else @ptrCast(self);
|
||||
const esize: usize = switch (self.pack.etype) {
|
||||
.dir => @sizeOf(Dir),
|
||||
.link => @sizeOf(Link),
|
||||
else => @sizeOf(File),
|
||||
};
|
||||
const size = (if (self.pack.isext) @as(usize, @sizeOf(Ext)) else 0) + esize + self.name().len + 1;
|
||||
allocator.free(ptr[0..size]);
|
||||
}
|
||||
|
||||
var it: ?*Dir = parent;
|
||||
while(it) |p| : (it = p.parent) {
|
||||
if (self.ext()) |e|
|
||||
if (p.entry.ext()) |pe|
|
||||
if (e.mtime > pe.mtime) { pe.mtime = e.mtime; };
|
||||
p.items +|= 1;
|
||||
if (self.etype != .link) {
|
||||
p.entry.size +|= self.size;
|
||||
p.entry.blocks +|= self.blocks;
|
||||
}
|
||||
}
|
||||
fn hasErr(self: *Self) bool {
|
||||
return
|
||||
if(self.dir()) |d| d.pack.err or d.pack.suberr
|
||||
else self.pack.etype == .err;
|
||||
}
|
||||
|
||||
// Opposite of addStats(), but has some limitations:
|
||||
// - If addStats() saturated adding sizes, then the sizes after delStats()
|
||||
// will be incorrect.
|
||||
// - mtime of parents is not adjusted (but that's a feature, possibly?)
|
||||
//
|
||||
// This function assumes that, for directories, all sub-entries have
|
||||
// already been un-counted.
|
||||
//
|
||||
// When removing a Link, the entry's nlink counter is reset to zero, so
|
||||
// that it will be recalculated based on our view of the tree. This means
|
||||
// that links outside of the scanned directory will not be considered
|
||||
// anymore, meaning that delStats() followed by addStats() with the same
|
||||
// data may cause information to be lost.
|
||||
pub fn delStats(self: *Entry, parent: *Dir) void {
|
||||
if (!self.counted) return;
|
||||
defer self.counted = false; // defer, to make sure inodes.setStats() still sees it as counted.
|
||||
|
||||
if (self.link()) |l| {
|
||||
var d = inodes.map.getEntry(l).?;
|
||||
inodes.setStats(d, false);
|
||||
d.value_ptr.nlink = 0;
|
||||
if (l.next == l) {
|
||||
_ = inodes.map.remove(l);
|
||||
_ = inodes.uncounted.remove(l);
|
||||
inodes.total_blocks -|= self.blocks;
|
||||
} else {
|
||||
if (d.key_ptr.* == l)
|
||||
d.key_ptr.* = l.next;
|
||||
inodes.addUncounted(l.next);
|
||||
// This is O(n), which in this context has the potential to
|
||||
// slow ncdu down to a crawl. But this function is only called
|
||||
// on refresh/delete operations and even then it's not common
|
||||
// to have very long lists, so this blowing up should be very
|
||||
// rare. This removal can also be deferred to setStats() to
|
||||
// amortize the costs, if necessary.
|
||||
var it = l.next;
|
||||
while (it.next != l) it = it.next;
|
||||
it.next = l.next;
|
||||
}
|
||||
}
|
||||
|
||||
var it: ?*Dir = parent;
|
||||
while(it) |p| : (it = p.parent) {
|
||||
p.items -|= 1;
|
||||
if (self.etype != .link) {
|
||||
p.entry.size -|= self.size;
|
||||
p.entry.blocks -|= self.blocks;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delStatsRec(self: *Entry, parent: *Dir) void {
|
||||
fn removeLinks(self: *Entry) void {
|
||||
if (self.dir()) |d| {
|
||||
var it = d.sub;
|
||||
while (it) |e| : (it = e.next)
|
||||
e.delStatsRec(d);
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.removeLinks();
|
||||
}
|
||||
self.delStats(parent);
|
||||
if (self.link()) |l| l.removeLink();
|
||||
}
|
||||
|
||||
fn zeroStatsRec(self: *Entry) void {
|
||||
self.pack.blocks = 0;
|
||||
self.size = 0;
|
||||
if (self.dir()) |d| {
|
||||
d.items = 0;
|
||||
d.pack.err = false;
|
||||
d.pack.suberr = false;
|
||||
var it = d.sub.ptr;
|
||||
while (it) |e| : (it = e.next.ptr) e.zeroStatsRec();
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively set stats and those of sub-items to zero and removes counts
|
||||
// from parent directories; as if this item does not exist in the tree.
|
||||
// XXX: Does not update the 'suberr' flag of parent directories, make sure
|
||||
// to call updateSubErr() afterwards.
|
||||
pub fn zeroStats(self: *Entry, parent: ?*Dir) void {
|
||||
self.removeLinks();
|
||||
|
||||
var it = parent;
|
||||
while (it) |p| : (it = p.parent) {
|
||||
p.entry.pack.blocks -|= self.pack.blocks;
|
||||
p.entry.size -|= self.size;
|
||||
p.items -|= 1 + (if (self.dir()) |d| d.items else 0);
|
||||
}
|
||||
self.zeroStatsRec();
|
||||
}
|
||||
};
|
||||
|
||||
const DevId = u30; // Can be reduced to make room for more flags in Dir.
|
||||
const DevId = u30; // Can be reduced to make room for more flags in Dir.Packed.
|
||||
|
||||
pub const Dir = packed struct {
|
||||
pub const Dir = extern struct {
|
||||
entry: Entry,
|
||||
|
||||
sub: ?*Entry,
|
||||
parent: ?*Dir,
|
||||
sub: Ref = .{ .ptr = null },
|
||||
parent: ?*Dir align(1) = null,
|
||||
|
||||
// entry.{blocks,size}: Total size of all unique files + dirs. Non-shared hardlinks are counted only once.
|
||||
// (i.e. the space you'll need if you created a filesystem with only this dir)
|
||||
// shared_*: Unique hardlinks that still have references outside of this directory.
|
||||
// (i.e. the space you won't reclaim by deleting this dir)
|
||||
// (space reclaimed by deleting a dir =~ entry. - shared_)
|
||||
shared_blocks: u64,
|
||||
shared_size: u64,
|
||||
items: u32,
|
||||
shared_blocks: u64 align(1) = 0,
|
||||
shared_size: u64 align(1) = 0,
|
||||
items: u32 align(1) = 0,
|
||||
|
||||
// Indexes into the global 'devices.list' array
|
||||
dev: DevId,
|
||||
|
||||
err: bool,
|
||||
suberr: bool,
|
||||
pack: Packed align(1) = .{},
|
||||
|
||||
// Only used to find the @offsetOff, the name is written at this point as a 0-terminated string.
|
||||
// (Old C habits die hard)
|
||||
name: u8,
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
pub fn fmtPath(self: *const @This(), withRoot: bool, out: *std.ArrayList(u8)) void {
|
||||
pub const Packed = packed struct {
|
||||
// Indexes into the global 'devices.list' array
|
||||
dev: DevId = 0,
|
||||
err: bool = false,
|
||||
suberr: bool = false,
|
||||
};
|
||||
|
||||
pub fn fmtPath(self: *const @This(), alloc: std.mem.Allocator, withRoot: bool, out: *std.ArrayListUnmanaged(u8)) void {
|
||||
if (!withRoot and self.parent == null) return;
|
||||
var components = std.ArrayList([:0]const u8).init(main.allocator);
|
||||
defer components.deinit();
|
||||
var components: std.ArrayListUnmanaged([:0]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*const @This() = self;
|
||||
while (it) |e| : (it = e.parent)
|
||||
if (withRoot or e.parent != null)
|
||||
components.append(e.entry.name()) catch unreachable;
|
||||
components.append(main.allocator, e.entry.name()) catch unreachable;
|
||||
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/')) out.append('/') catch unreachable;
|
||||
out.appendSlice(components.items[i]) catch unreachable;
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(alloc, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Only updates the suberr of this Dir, assumes child dirs have already
|
||||
// been updated and does not propagate to parents.
|
||||
pub fn updateSubErr(self: *@This()) void {
|
||||
self.pack.suberr = false;
|
||||
var sub = self.sub.ptr;
|
||||
while (sub) |e| : (sub = e.next.ptr) {
|
||||
if (e.hasErr()) {
|
||||
self.pack.suberr = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// File that's been hardlinked (i.e. nlink > 1)
|
||||
pub const Link = packed struct {
|
||||
pub const Link = extern struct {
|
||||
entry: Entry,
|
||||
parent: *Dir,
|
||||
next: *Link, // Singly circular linked list of all *Link nodes with the same dev,ino.
|
||||
parent: *Dir align(1) = undefined,
|
||||
next: *Link align(1) = undefined, // circular linked list of all *Link nodes with the same dev,ino.
|
||||
prev: *Link align(1) = undefined,
|
||||
// dev is inherited from the parent Dir
|
||||
ino: u64,
|
||||
name: u8,
|
||||
ino: u64 align(1) = undefined,
|
||||
pack: Pack align(1) = .{},
|
||||
name: [0]u8 = undefined,
|
||||
|
||||
const Pack = packed struct(u32) {
|
||||
// Whether this Inode is counted towards the parent directories.
|
||||
// Is kept synchronized between all Link nodes with the same dev/ino.
|
||||
counted: bool = false,
|
||||
// Number of links for this inode. When set to '0', we don't know the
|
||||
// actual nlink count; which happens for old JSON dumps.
|
||||
nlink: u31 = undefined,
|
||||
};
|
||||
|
||||
// Return value should be freed with main.allocator.
|
||||
pub fn path(self: @This(), withRoot: bool) [:0]const u8 {
|
||||
var out = std.ArrayList(u8).init(main.allocator);
|
||||
self.parent.fmtPath(withRoot, &out);
|
||||
out.append('/') catch unreachable;
|
||||
out.appendSlice(self.entry.name()) catch unreachable;
|
||||
return out.toOwnedSliceSentinel(0) catch unreachable;
|
||||
pub fn path(self: *const @This(), withRoot: bool) [:0]const u8 {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
self.parent.fmtPath(main.allocator, withRoot, &out);
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, self.entry.name()) catch unreachable;
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
// Add this link to the inodes map and mark it as 'uncounted'.
|
||||
pub fn addLink(l: *@This()) void {
|
||||
const d = inodes.map.getOrPut(l) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
l.next = l;
|
||||
l.prev = l;
|
||||
} else {
|
||||
inodes.setStats(d.key_ptr.*, false);
|
||||
l.next = d.key_ptr.*;
|
||||
l.prev = d.key_ptr.*.prev;
|
||||
l.next.prev = l;
|
||||
l.prev.next = l;
|
||||
}
|
||||
inodes.addUncounted(l);
|
||||
}
|
||||
|
||||
// Remove this link from the inodes map and remove its stats from parent directories.
|
||||
fn removeLink(l: *@This()) void {
|
||||
inodes.setStats(l, false);
|
||||
const entry = inodes.map.getEntry(l) orelse return;
|
||||
if (l.next == l) {
|
||||
_ = inodes.map.remove(l);
|
||||
_ = inodes.uncounted.remove(l);
|
||||
} else {
|
||||
// XXX: If this link is actually removed from the filesystem, then
|
||||
// the nlink count of the existing links should be updated to
|
||||
// reflect that. But we can't do that here, because this function
|
||||
// is also called before doing a filesystem refresh - in which case
|
||||
// the nlink count likely won't change. Best we can hope for is
|
||||
// that a refresh will encounter another link to the same inode and
|
||||
// trigger an nlink change.
|
||||
if (entry.key_ptr.* == l)
|
||||
entry.key_ptr.* = l.next;
|
||||
inodes.addUncounted(l.next);
|
||||
l.next.prev = l.prev;
|
||||
l.prev.next = l.next;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Anything that's not an (indexed) directory or hardlink. Excluded directories are also "Files".
|
||||
pub const File = packed struct {
|
||||
pub const File = extern struct {
|
||||
entry: Entry,
|
||||
name: [0]u8 = undefined,
|
||||
};
|
||||
|
||||
err: bool,
|
||||
excluded: bool,
|
||||
other_fs: bool,
|
||||
kernfs: bool,
|
||||
notreg: bool,
|
||||
_pad: u3,
|
||||
pub const Ext = extern struct {
|
||||
pack: Pack = .{},
|
||||
mtime: u64 align(1) = 0,
|
||||
uid: u32 align(1) = 0,
|
||||
gid: u32 align(1) = 0,
|
||||
mode: u16 align(1) = 0,
|
||||
|
||||
name: u8,
|
||||
pub const Pack = packed struct(u8) {
|
||||
hasmtime: bool = false,
|
||||
hasuid: bool = false,
|
||||
hasgid: bool = false,
|
||||
hasmode: bool = false,
|
||||
_pad: u4 = 0,
|
||||
};
|
||||
|
||||
pub fn resetFlags(f: *@This()) void {
|
||||
f.err = false;
|
||||
f.excluded = false;
|
||||
f.other_fs = false;
|
||||
f.kernfs = false;
|
||||
f.notreg = false;
|
||||
pub fn isEmpty(e: *const Ext) bool {
|
||||
return !e.pack.hasmtime and !e.pack.hasuid and !e.pack.hasgid and !e.pack.hasmode;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ext = packed struct {
|
||||
mtime: u64 = 0,
|
||||
uid: u32 = 0,
|
||||
gid: u32 = 0,
|
||||
mode: u16 = 0,
|
||||
};
|
||||
|
||||
comptime {
|
||||
std.debug.assert(@bitOffsetOf(Dir, "name") % 8 == 0);
|
||||
std.debug.assert(@bitOffsetOf(Link, "name") % 8 == 0);
|
||||
std.debug.assert(@bitOffsetOf(File, "name") % 8 == 0);
|
||||
}
|
||||
|
||||
|
||||
// List of st_dev entries. Those are typically 64bits, but that's quite a waste
|
||||
// of space when a typical scan won't cover many unique devices.
|
||||
pub const devices = struct {
|
||||
var lock = std.Thread.Mutex{};
|
||||
// id -> dev
|
||||
pub var list = std.ArrayList(u64).init(main.allocator);
|
||||
pub var list: std.ArrayListUnmanaged(u64) = .empty;
|
||||
// dev -> id
|
||||
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||
|
||||
pub fn getId(dev: u64) DevId {
|
||||
var d = lookup.getOrPut(dev) catch unreachable;
|
||||
lock.lock();
|
||||
defer lock.unlock();
|
||||
const d = lookup.getOrPut(dev) catch unreachable;
|
||||
if (!d.found_existing) {
|
||||
d.value_ptr.* = @intCast(DevId, list.items.len);
|
||||
list.append(dev) catch unreachable;
|
||||
if (list.items.len >= std.math.maxInt(DevId)) ui.die("Maximum number of device identifiers exceeded.\n", .{});
|
||||
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
||||
list.append(main.allocator, dev) catch unreachable;
|
||||
}
|
||||
return d.value_ptr.*;
|
||||
}
|
||||
|
|
@ -338,14 +376,9 @@ pub const inodes = struct {
|
|||
// node in the list. Link entries with the same dev/ino are part of a
|
||||
// circular linked list, so you can iterate through all of them with this
|
||||
// single pointer.
|
||||
const Map = std.HashMap(*Link, Inode, HashContext, 80);
|
||||
const Map = std.HashMap(*Link, void, HashContext, 80);
|
||||
pub var map = Map.init(main.allocator);
|
||||
|
||||
// Cumulative size of all unique hard links in the map. This is a somewhat
|
||||
// ugly workaround to provide accurate sizes during the initial scan, when
|
||||
// the hard links are not counted as part of the parent directories yet.
|
||||
pub var total_blocks: Blocks = 0;
|
||||
|
||||
// List of nodes in 'map' with !counted, to speed up addAllStats().
|
||||
// If this list grows large relative to the number of nodes in 'map', then
|
||||
// this list is cleared and uncounted_full is set instead, so that
|
||||
|
|
@ -353,27 +386,18 @@ pub const inodes = struct {
|
|||
var uncounted = std.HashMap(*Link, void, HashContext, 80).init(main.allocator);
|
||||
var uncounted_full = true; // start with true for the initial scan
|
||||
|
||||
const Inode = packed struct {
|
||||
// Whether this Inode is counted towards the parent directories.
|
||||
counted: bool,
|
||||
// Number of links for this inode. When set to '0', we don't know the
|
||||
// actual nlink count, either because it wasn't part of the imported
|
||||
// JSON data or because we read inconsistent values from the
|
||||
// filesystem. The count will then be updated by the actual number of
|
||||
// links in our in-memory tree.
|
||||
nlink: u31,
|
||||
};
|
||||
pub var lock = std.Thread.Mutex{};
|
||||
|
||||
const HashContext = struct {
|
||||
pub fn hash(_: @This(), l: *Link) u64 {
|
||||
var h = std.hash.Wyhash.init(0);
|
||||
h.update(std.mem.asBytes(&@as(u32, l.parent.dev)));
|
||||
h.update(std.mem.asBytes(&@as(u32, l.parent.pack.dev)));
|
||||
h.update(std.mem.asBytes(&l.ino));
|
||||
return h.final();
|
||||
}
|
||||
|
||||
pub fn eql(_: @This(), a: *Link, b: *Link) bool {
|
||||
return a.ino == b.ino and a.parent.dev == b.parent.dev;
|
||||
return a.ino == b.ino and a.parent.pack.dev == b.parent.pack.dev;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -390,61 +414,85 @@ pub const inodes = struct {
|
|||
// the list of *Links and their sizes and counts must be in the exact same
|
||||
// state as when the stats were added. Hence, any modification to the Link
|
||||
// state should be preceded by a setStats(.., false).
|
||||
fn setStats(entry: Map.Entry, add: bool) void {
|
||||
if (entry.value_ptr.counted == add) return;
|
||||
entry.value_ptr.counted = add;
|
||||
fn setStats(l: *Link, add: bool) void {
|
||||
if (l.pack.counted == add) return;
|
||||
|
||||
var nlink: u31 = 0;
|
||||
var inconsistent = false;
|
||||
var dirs = std.AutoHashMap(*Dir, u32).init(main.allocator);
|
||||
defer dirs.deinit();
|
||||
var it = entry.key_ptr.*;
|
||||
var it = l;
|
||||
while (true) {
|
||||
if (it.entry.counted) {
|
||||
it.pack.counted = add;
|
||||
nlink += 1;
|
||||
if (it.pack.nlink != l.pack.nlink) inconsistent = true;
|
||||
var parent: ?*Dir = it.parent;
|
||||
while (parent) |p| : (parent = p.parent) {
|
||||
var de = dirs.getOrPut(p) catch unreachable;
|
||||
const de = dirs.getOrPut(p) catch unreachable;
|
||||
if (de.found_existing) de.value_ptr.* += 1
|
||||
else de.value_ptr.* = 1;
|
||||
}
|
||||
}
|
||||
it = it.next;
|
||||
if (it == entry.key_ptr.*)
|
||||
if (it == l)
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry.value_ptr.nlink < nlink) entry.value_ptr.nlink = nlink
|
||||
else nlink = entry.value_ptr.nlink;
|
||||
// There's not many sensible things we can do when we encounter
|
||||
// inconsistent nlink counts. Current approach is to use the number of
|
||||
// times we've seen this link in our tree as fallback for when the
|
||||
// nlink counts aren't matching. May want to add a warning of some
|
||||
// sorts to the UI at some point.
|
||||
if (!inconsistent and l.pack.nlink >= nlink) nlink = l.pack.nlink;
|
||||
|
||||
// XXX: We're also not testing for inconsistent entry sizes, instead
|
||||
// using the given 'l' size for all Links. Might warrant a warning as
|
||||
// well.
|
||||
|
||||
var dir_iter = dirs.iterator();
|
||||
if (add) {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.blocks +|= entry.key_ptr.*.entry.blocks;
|
||||
de.key_ptr.*.entry.size +|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.entry.pack.blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size +|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks +|= entry.key_ptr.*.entry.blocks;
|
||||
de.key_ptr.*.shared_size +|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.shared_blocks +|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size +|= l.entry.size;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (dir_iter.next()) |de| {
|
||||
de.key_ptr.*.entry.blocks -|= entry.key_ptr.*.entry.blocks;
|
||||
de.key_ptr.*.entry.size -|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.entry.pack.blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.entry.size -|= l.entry.size;
|
||||
if (de.value_ptr.* < nlink) {
|
||||
de.key_ptr.*.shared_blocks -|= entry.key_ptr.*.entry.blocks;
|
||||
de.key_ptr.*.shared_size -|= entry.key_ptr.*.entry.size;
|
||||
de.key_ptr.*.shared_blocks -|= l.entry.pack.blocks;
|
||||
de.key_ptr.*.shared_size -|= l.entry.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// counters to track progress for addAllStats()
|
||||
pub var add_total: usize = 0;
|
||||
pub var add_done: usize = 0;
|
||||
|
||||
pub fn addAllStats() void {
|
||||
if (uncounted_full) {
|
||||
var it = map.iterator();
|
||||
while (it.next()) |e| setStats(e, true);
|
||||
add_total = map.count();
|
||||
add_done = 0;
|
||||
var it = map.keyIterator();
|
||||
while (it.next()) |e| {
|
||||
setStats(e.*, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
} else {
|
||||
var it = uncounted.iterator();
|
||||
while (it.next()) |u| if (map.getEntry(u.key_ptr.*)) |e| setStats(e, true);
|
||||
add_total = uncounted.count();
|
||||
add_done = 0;
|
||||
var it = uncounted.keyIterator();
|
||||
while (it.next()) |u| {
|
||||
if (map.getKey(u.*)) |e| setStats(e, true);
|
||||
add_done += 1;
|
||||
if ((add_done & 65) == 0) main.handleEvent(false, false);
|
||||
}
|
||||
}
|
||||
uncounted_full = false;
|
||||
if (uncounted.count() > 0)
|
||||
|
|
@ -457,8 +505,9 @@ pub var root: *Dir = undefined;
|
|||
|
||||
|
||||
test "entry" {
|
||||
var e = Entry.create(.file, false, "hello") catch unreachable;
|
||||
std.debug.assert(e.etype == .file);
|
||||
std.debug.assert(!e.isext);
|
||||
std.testing.expectEqualStrings(e.name(), "hello");
|
||||
var e = Entry.create(std.testing.allocator, .reg, false, "hello");
|
||||
defer e.destroy(std.testing.allocator);
|
||||
try std.testing.expectEqual(e.pack.etype, .reg);
|
||||
try std.testing.expect(!e.pack.isext);
|
||||
try std.testing.expectEqualStrings(e.name(), "hello");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
/* SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
#include <curses.h>
|
||||
|
||||
/* Zig @cImport() has problems with the ACS_* macros. Two, in fact:
|
||||
*
|
||||
* 1. Naively using the ACS_* macros results in:
|
||||
*
|
||||
* error: cannot store runtime value in compile time variable
|
||||
* return acs_map[NCURSES_CAST(u8, c)];
|
||||
* ^
|
||||
* That error doesn't make much sense to me, but it might be
|
||||
* related to https://github.com/ziglang/zig/issues/5344?
|
||||
*
|
||||
* 2. The 'acs_map' extern variable isn't being linked correctly?
|
||||
* Haven't investigated this one deeply enough yet, but attempting
|
||||
* to dereference acs_map from within Zig leads to a segfault;
|
||||
* its pointer value doesn't make any sense.
|
||||
*/
|
||||
chtype ncdu_acs_ulcorner() { return ACS_ULCORNER; }
|
||||
chtype ncdu_acs_llcorner() { return ACS_LLCORNER; }
|
||||
chtype ncdu_acs_urcorner() { return ACS_URCORNER; }
|
||||
chtype ncdu_acs_lrcorner() { return ACS_LRCORNER; }
|
||||
chtype ncdu_acs_hline() { return ACS_VLINE ; }
|
||||
chtype ncdu_acs_vline() { return ACS_HLINE ; }
|
||||
|
||||
/* https://github.com/ziglang/zig/issues/8947 */
|
||||
void ncdu_init_pair(short a,b,c) { init_pair(a,b,c); }
|
||||
1277
src/scan.zig
1277
src/scan.zig
File diff suppressed because it is too large
Load diff
498
src/sink.zig
Normal file
498
src/sink.zig
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const model = @import("model.zig");
|
||||
const mem_src = @import("mem_src.zig");
|
||||
const mem_sink = @import("mem_sink.zig");
|
||||
const json_export = @import("json_export.zig");
|
||||
const bin_export = @import("bin_export.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
// Terminology note:
|
||||
// "source" is where scan results come from, these are scan.zig, mem_src.zig
|
||||
// and json_import.zig.
|
||||
// "sink" is where scan results go to. This file provides a generic sink API
|
||||
// for sources to use. The API forwards the results to specific sink
|
||||
// implementations (mem_sink.zig or json_export.zig) and provides progress
|
||||
// updates.
|
||||
|
||||
// API for sources:
|
||||
//
|
||||
// Single-threaded:
|
||||
//
|
||||
// createThreads(1)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(name, stat)
|
||||
// (no dir.stuff here)
|
||||
// sub.addstuff();
|
||||
// sub.unref();
|
||||
// dir.unref();
|
||||
// done()
|
||||
//
|
||||
// Multi-threaded interleaving:
|
||||
//
|
||||
// createThreads(n)
|
||||
// dir = createRoot(name, stat)
|
||||
// dir.addSpecial(name, opt)
|
||||
// dir.addFile(name, stat)
|
||||
// sub = dir.addDir(...)
|
||||
// sub.addstuff();
|
||||
// sub2 = dir.addDir(..);
|
||||
// sub.unref();
|
||||
// dir.unref(); // <- no more direct descendants for x, but subdirs could still be active
|
||||
// sub2.addStuff();
|
||||
// sub2.unref(); // <- this is where 'dir' is really done.
|
||||
// done()
|
||||
//
|
||||
// Rule:
|
||||
// No concurrent method calls on a single Dir object, but objects may be passed between threads.
|
||||
|
||||
|
||||
// Concise stat struct for fields we're interested in, with the types used by the model.
|
||||
pub const Stat = struct {
|
||||
etype: model.EType = .reg,
|
||||
blocks: model.Blocks = 0,
|
||||
size: u64 = 0,
|
||||
dev: u64 = 0,
|
||||
ino: u64 = 0,
|
||||
nlink: u31 = 0,
|
||||
ext: model.Ext = .{},
|
||||
};
|
||||
|
||||
|
||||
pub const Dir = struct {
|
||||
refcnt: std.atomic.Value(usize) = std.atomic.Value(usize).init(1),
|
||||
name: []const u8,
|
||||
parent: ?*Dir,
|
||||
out: Out,
|
||||
|
||||
const Out = union(enum) {
|
||||
mem: mem_sink.Dir,
|
||||
json: json_export.Dir,
|
||||
bin: bin_export.Dir,
|
||||
};
|
||||
|
||||
pub fn addSpecial(d: *Dir, t: *Thread, name: []const u8, sp: model.EType) void {
|
||||
std.debug.assert(@intFromEnum(sp) < 0); // >=0 aren't "special"
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.addSpecial(&t.sink.mem, name, sp),
|
||||
.json => |*j| j.addSpecial(name, sp),
|
||||
.bin => |*b| b.addSpecial(&t.sink.bin, name, sp),
|
||||
}
|
||||
if (sp == .err) {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
const p = d.path();
|
||||
global.last_error = std.fs.path.joinZ(main.allocator, &.{ p, name }) catch unreachable;
|
||||
main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addStat(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) void {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes((stat.blocks *| 512) / @max(1, stat.nlink));
|
||||
std.debug.assert(stat.etype != .dir);
|
||||
switch (d.out) {
|
||||
.mem => |*m| _ = m.addStat(&t.sink.mem, name, stat),
|
||||
.json => |*j| j.addStat(name, stat),
|
||||
.bin => |*b| b.addStat(&t.sink.bin, name, stat),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addDir(d: *Dir, t: *Thread, name: []const u8, stat: *const Stat) *Dir {
|
||||
_ = t.files_seen.fetchAdd(1, .monotonic);
|
||||
_ = t.addBytes(stat.blocks *| 512);
|
||||
std.debug.assert(stat.etype == .dir);
|
||||
std.debug.assert(d.out != .json or d.refcnt.load(.monotonic) == 1);
|
||||
|
||||
const s = main.allocator.create(Dir) catch unreachable;
|
||||
s.* = .{
|
||||
.name = main.allocator.dupe(u8, name) catch unreachable,
|
||||
.parent = d,
|
||||
.out = switch (d.out) {
|
||||
.mem => |*m| .{ .mem = m.addDir(&t.sink.mem, name, stat) },
|
||||
.json => |*j| .{ .json = j.addDir(name, stat) },
|
||||
.bin => |*b| .{ .bin = b.addDir(stat) },
|
||||
},
|
||||
};
|
||||
d.ref();
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn setReadError(d: *Dir, t: *Thread) void {
|
||||
_ = t;
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.setReadError(),
|
||||
.json => |*j| j.setReadError(),
|
||||
.bin => |*b| b.setReadError(),
|
||||
}
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = d.path();
|
||||
}
|
||||
|
||||
fn path(d: *Dir) [:0]u8 {
|
||||
var components: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
defer components.deinit(main.allocator);
|
||||
var it: ?*Dir = d;
|
||||
while (it) |e| : (it = e.parent) components.append(main.allocator, e.name) catch unreachable;
|
||||
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var i: usize = components.items.len-1;
|
||||
while (true) {
|
||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
||||
out.append(main.allocator, '/') catch unreachable;
|
||||
out.appendSlice(main.allocator, components.items[i]) catch unreachable;
|
||||
if (i == 0) break;
|
||||
i -= 1;
|
||||
}
|
||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
||||
}
|
||||
|
||||
fn ref(d: *Dir) void {
|
||||
_ = d.refcnt.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
pub fn unref(d: *Dir, t: *Thread) void {
|
||||
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
||||
_ = d.refcnt.load(.acquire);
|
||||
|
||||
switch (d.out) {
|
||||
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
||||
.json => |*j| j.final(),
|
||||
.bin => |*b| b.final(&t.sink.bin, d.name, if (d.parent) |p| &p.out.bin else null),
|
||||
}
|
||||
|
||||
if (d.parent) |p| p.unref(t);
|
||||
if (d.name.len > 0) main.allocator.free(d.name);
|
||||
main.allocator.destroy(d);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const Thread = struct {
|
||||
current_dir: ?*Dir = null,
|
||||
lock: std.Thread.Mutex = .{},
|
||||
// On 32-bit architectures, bytes_seen is protected by the above mutex instead.
|
||||
bytes_seen: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
files_seen: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
|
||||
sink: union {
|
||||
mem: mem_sink.Thread,
|
||||
json: void,
|
||||
bin: bin_export.Thread,
|
||||
} = .{.mem = .{}},
|
||||
|
||||
fn addBytes(t: *Thread, bytes: u64) void {
|
||||
if (@bitSizeOf(usize) >= 64) _ = t.bytes_seen.fetchAdd(bytes, .monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.bytes_seen.raw += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
fn getBytes(t: *Thread) u64 {
|
||||
if (@bitSizeOf(usize) >= 64) return t.bytes_seen.load(.monotonic)
|
||||
else {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
return t.bytes_seen.raw;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setDir(t: *Thread, d: ?*Dir) void {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
t.current_dir = d;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub const global = struct {
|
||||
pub var state: enum { done, err, zeroing, hlcnt, running } = .running;
|
||||
pub var threads: []Thread = undefined;
|
||||
pub var sink: enum { json, mem, bin } = .mem;
|
||||
|
||||
pub var last_error: ?[:0]u8 = null;
|
||||
var last_error_lock = std.Thread.Mutex{};
|
||||
var need_confirm_quit = false;
|
||||
};
|
||||
|
||||
|
||||
// Must be the first thing to call from a source; initializes global state.
|
||||
pub fn createThreads(num: usize) []Thread {
|
||||
// JSON export does not support multiple threads, scan into memory first.
|
||||
if (global.sink == .json and num > 1) {
|
||||
global.sink = .mem;
|
||||
mem_sink.global.stats = false;
|
||||
}
|
||||
|
||||
global.state = .running;
|
||||
if (global.last_error) |p| main.allocator.free(p);
|
||||
global.last_error = null;
|
||||
global.threads = main.allocator.alloc(Thread, num) catch unreachable;
|
||||
for (global.threads) |*t| t.* = .{
|
||||
.sink = switch (global.sink) {
|
||||
.mem => .{ .mem = .{} },
|
||||
.json => .{ .json = {} },
|
||||
.bin => .{ .bin = .{} },
|
||||
},
|
||||
};
|
||||
return global.threads;
|
||||
}
|
||||
|
||||
|
||||
// Must be the last thing to call from a source.
|
||||
pub fn done() void {
|
||||
switch (global.sink) {
|
||||
.mem => mem_sink.done(),
|
||||
.json => json_export.done(),
|
||||
.bin => bin_export.done(global.threads),
|
||||
}
|
||||
global.state = .done;
|
||||
main.allocator.free(global.threads);
|
||||
|
||||
// We scanned into memory, now we need to scan from memory to JSON
|
||||
if (global.sink == .mem and !mem_sink.global.stats) {
|
||||
global.sink = .json;
|
||||
mem_src.run(model.root);
|
||||
}
|
||||
|
||||
// Clear the screen when done.
|
||||
if (main.config.scan_ui == .line) main.handleEvent(false, true);
|
||||
}
|
||||
|
||||
|
||||
pub fn createRoot(path: []const u8, stat: *const Stat) *Dir {
|
||||
const d = main.allocator.create(Dir) catch unreachable;
|
||||
d.* = .{
|
||||
.name = main.allocator.dupe(u8, path) catch unreachable,
|
||||
.parent = null,
|
||||
.out = switch (global.sink) {
|
||||
.mem => .{ .mem = mem_sink.createRoot(path, stat) },
|
||||
.json => .{ .json = json_export.createRoot(path, stat) },
|
||||
.bin => .{ .bin = bin_export.createRoot(stat, global.threads) },
|
||||
},
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
fn drawConsole() void {
|
||||
const st = struct {
|
||||
var ansi: ?bool = null;
|
||||
var lines_written: usize = 0;
|
||||
};
|
||||
const stderr = if (@hasDecl(std.io, "getStdErr")) std.io.getStdErr() else std.fs.File.stderr();
|
||||
const ansi = st.ansi orelse blk: {
|
||||
const t = stderr.supportsAnsiEscapeCodes();
|
||||
st.ansi = t;
|
||||
break :blk t;
|
||||
};
|
||||
|
||||
var buf: [4096]u8 = undefined;
|
||||
var strm = std.io.fixedBufferStream(buf[0..]);
|
||||
var wr = strm.writer();
|
||||
while (ansi and st.lines_written > 0) {
|
||||
wr.writeAll("\x1b[1F\x1b[2K") catch {};
|
||||
st.lines_written -= 1;
|
||||
}
|
||||
|
||||
if (global.state == .hlcnt) {
|
||||
wr.writeAll("Counting hardlinks...") catch {};
|
||||
if (model.inodes.add_total > 0)
|
||||
wr.print(" {} / {}", .{ model.inodes.add_done, model.inodes.add_total }) catch {};
|
||||
wr.writeByte('\n') catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
} else if (global.state == .running) {
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
const r = ui.FmtSize.fmt(bytes);
|
||||
wr.print("{} files / {s}{s}\n", .{files, r.num(), r.unit}) catch {};
|
||||
st.lines_written += 1;
|
||||
|
||||
for (global.threads, 0..) |*t, i| {
|
||||
const dir = blk: {
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
wr.print(" #{}: {s}\n", .{i+1, ui.shorten(ui.toUtf8(dir orelse "(waiting)"), 73)}) catch {};
|
||||
st.lines_written += 1;
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
}
|
||||
|
||||
stderr.writeAll(strm.getWritten()) catch {};
|
||||
}
|
||||
|
||||
|
||||
fn drawProgress() void {
|
||||
const st = struct { var animation_pos: usize = 0; };
|
||||
|
||||
var bytes: u64 = 0;
|
||||
var files: u64 = 0;
|
||||
for (global.threads) |*t| {
|
||||
bytes +|= t.getBytes();
|
||||
files += t.files_seen.load(.monotonic);
|
||||
}
|
||||
|
||||
ui.init();
|
||||
const width = ui.cols -| 5;
|
||||
const numthreads: u32 = @intCast(@min(global.threads.len, @max(1, ui.rows -| 10)));
|
||||
const box = ui.Box.create(8 + numthreads, width, "Scanning...");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Total items: ");
|
||||
ui.addnum(.default, files);
|
||||
|
||||
if (width > 48) {
|
||||
box.move(2, 30);
|
||||
ui.addstr("size: ");
|
||||
ui.addsize(.default, bytes);
|
||||
}
|
||||
|
||||
for (0..numthreads) |i| {
|
||||
box.move(3+@as(u32, @intCast(i)), 4);
|
||||
const dir = blk: {
|
||||
const t = &global.threads[i];
|
||||
t.lock.lock();
|
||||
defer t.lock.unlock();
|
||||
break :blk if (t.current_dir) |d| d.path() else null;
|
||||
};
|
||||
ui.addstr(ui.shorten(ui.toUtf8(dir orelse "(waiting)"), width -| 6));
|
||||
if (dir) |p| main.allocator.free(p);
|
||||
}
|
||||
|
||||
blk: {
|
||||
global.last_error_lock.lock();
|
||||
defer global.last_error_lock.unlock();
|
||||
const err = global.last_error orelse break :blk;
|
||||
box.move(4 + numthreads, 2);
|
||||
ui.style(.bold);
|
||||
ui.addstr("Warning: ");
|
||||
ui.style(.default);
|
||||
ui.addstr("error scanning ");
|
||||
ui.addstr(ui.shorten(ui.toUtf8(err), width -| 28));
|
||||
box.move(5 + numthreads, 3);
|
||||
ui.addstr("some directory sizes may not be correct.");
|
||||
}
|
||||
|
||||
if (global.need_confirm_quit) {
|
||||
box.move(6 + numthreads, width -| 20);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('y');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to confirm");
|
||||
} else {
|
||||
box.move(6 + numthreads, width -| 18);
|
||||
ui.addstr("Press ");
|
||||
ui.style(.key);
|
||||
ui.addch('q');
|
||||
ui.style(.default);
|
||||
ui.addstr(" to abort");
|
||||
}
|
||||
|
||||
if (main.config.update_delay < std.time.ns_per_s and width > 40) {
|
||||
const txt = "Scanning...";
|
||||
st.animation_pos += 1;
|
||||
if (st.animation_pos >= txt.len*2) st.animation_pos = 0;
|
||||
if (st.animation_pos < txt.len) {
|
||||
box.move(6 + numthreads, 2);
|
||||
for (txt[0..st.animation_pos + 1]) |t| ui.addch(t);
|
||||
} else {
|
||||
var i: u32 = txt.len-1;
|
||||
while (i > st.animation_pos-txt.len) : (i -= 1) {
|
||||
box.move(6 + numthreads, 2+i);
|
||||
ui.addch(txt[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn drawError() void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(6, width, "Scan error");
|
||||
|
||||
box.move(2, 2);
|
||||
ui.addstr("Unable to open directory:");
|
||||
box.move(3, 4);
|
||||
ui.addstr(ui.shorten(ui.toUtf8(global.last_error.?), width -| 10));
|
||||
|
||||
box.move(4, width -| 27);
|
||||
ui.addstr("Press any key to continue");
|
||||
}
|
||||
|
||||
|
||||
fn drawMessage(msg: []const u8) void {
|
||||
const width = ui.cols -| 5;
|
||||
const box = ui.Box.create(4, width, "Scan error");
|
||||
box.move(2, 2);
|
||||
ui.addstr(msg);
|
||||
}
|
||||
|
||||
|
||||
pub fn draw() void {
|
||||
switch (main.config.scan_ui.?) {
|
||||
.none => {},
|
||||
.line => drawConsole(),
|
||||
.full => {
|
||||
ui.init();
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => drawError(),
|
||||
.zeroing => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Initializing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Clearing directory counts...");
|
||||
},
|
||||
.hlcnt => {
|
||||
const box = ui.Box.create(4, ui.cols -| 5, "Finalizing");
|
||||
box.move(2, 2);
|
||||
ui.addstr("Counting hardlinks... ");
|
||||
if (model.inodes.add_total > 0) {
|
||||
ui.addnum(.default, model.inodes.add_done);
|
||||
ui.addstr(" / ");
|
||||
ui.addnum(.default, model.inodes.add_total);
|
||||
}
|
||||
},
|
||||
.running => drawProgress(),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn keyInput(ch: i32) void {
|
||||
switch (global.state) {
|
||||
.done => {},
|
||||
.err => main.state = .browse,
|
||||
.zeroing => {},
|
||||
.hlcnt => {},
|
||||
.running => {
|
||||
switch (ch) {
|
||||
'q' => {
|
||||
if (main.config.confirm_quit) global.need_confirm_quit = !global.need_confirm_quit
|
||||
else ui.quit();
|
||||
},
|
||||
'y', 'Y' => if (global.need_confirm_quit) ui.quit(),
|
||||
else => global.need_confirm_quit = false,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
303
src/ui.zig
303
src/ui.zig
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Ncurses wrappers and TUI helper functions.
|
||||
|
|
@ -6,25 +6,18 @@
|
|||
const std = @import("std");
|
||||
const main = @import("main.zig");
|
||||
const util = @import("util.zig");
|
||||
|
||||
pub const c = @cImport({
|
||||
@cDefine("_XOPEN_SOURCE", "1");
|
||||
@cInclude("stdio.h");
|
||||
@cInclude("string.h");
|
||||
@cInclude("curses.h");
|
||||
@cInclude("time.h");
|
||||
@cInclude("wchar.h");
|
||||
@cInclude("locale.h");
|
||||
});
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
pub var inited: bool = false;
|
||||
pub var main_thread: std.Thread.Id = undefined;
|
||||
pub var oom_threads = std.atomic.Value(usize).init(0);
|
||||
|
||||
pub var rows: u32 = undefined;
|
||||
pub var cols: u32 = undefined;
|
||||
|
||||
pub fn die(comptime fmt: []const u8, args: anytype) noreturn {
|
||||
deinit();
|
||||
_ = std.io.getStdErr().writer().print(fmt, args) catch {};
|
||||
std.debug.print(fmt, args);
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -33,6 +26,8 @@ pub fn quit() noreturn {
|
|||
std.process.exit(0);
|
||||
}
|
||||
|
||||
const sleep = if (@hasDecl(std.time, "sleep")) std.time.sleep else std.Thread.sleep;
|
||||
|
||||
// Should be called when malloc fails. Will show a message to the user, wait
|
||||
// for a second and return to give it another try.
|
||||
// Glitch: this function may be called while we're in the process of drawing
|
||||
|
|
@ -43,12 +38,19 @@ pub fn quit() noreturn {
|
|||
// no clue if ncurses will consistently report OOM, but we're not handling that
|
||||
// right now.
|
||||
pub fn oom() void {
|
||||
@branchHint(.cold);
|
||||
if (main_thread == std.Thread.getCurrentId()) {
|
||||
const haveui = inited;
|
||||
deinit();
|
||||
_ = std.io.getStdErr().writer().writeAll("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8") catch {};
|
||||
std.time.sleep(std.time.ns_per_s);
|
||||
std.debug.print("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8", .{});
|
||||
sleep(std.time.ns_per_s);
|
||||
if (haveui)
|
||||
init();
|
||||
} else {
|
||||
_ = oom_threads.fetchAdd(1, .monotonic);
|
||||
sleep(std.time.ns_per_s);
|
||||
_ = oom_threads.fetchSub(1, .monotonic);
|
||||
}
|
||||
}
|
||||
|
||||
// Dumb strerror() alternative for Zig file I/O, not complete.
|
||||
|
|
@ -73,11 +75,12 @@ pub fn errorString(e: anyerror) [:0]const u8 {
|
|||
error.ReadOnlyFilesystem => "Read-only filesystem",
|
||||
error.SymlinkLoop => "Symlink loop",
|
||||
error.SystemFdQuotaExceeded => "System file descriptor limit exceeded",
|
||||
error.EndOfStream => "Unexpected end of file",
|
||||
else => @errorName(e),
|
||||
};
|
||||
}
|
||||
|
||||
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
|
||||
var to_utf8_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
fn toUtf8BadChar(ch: u8) bool {
|
||||
return switch (ch) {
|
||||
|
|
@ -104,19 +107,19 @@ pub fn toUtf8(in: [:0]const u8) [:0]const u8 {
|
|||
if (std.unicode.utf8ByteSequenceLength(in[i])) |cp_len| {
|
||||
if (!toUtf8BadChar(in[i]) and i + cp_len <= in.len) {
|
||||
if (std.unicode.utf8Decode(in[i .. i + cp_len])) |_| {
|
||||
to_utf8_buf.appendSlice(in[i .. i + cp_len]) catch unreachable;
|
||||
to_utf8_buf.appendSlice(main.allocator, in[i .. i + cp_len]) catch unreachable;
|
||||
i += cp_len;
|
||||
continue;
|
||||
} else |_| {}
|
||||
}
|
||||
} else |_| {}
|
||||
to_utf8_buf.writer().print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||
to_utf8_buf.writer(main.allocator).print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||
i += 1;
|
||||
}
|
||||
return util.arrayListBufZ(&to_utf8_buf);
|
||||
return util.arrayListBufZ(&to_utf8_buf, main.allocator);
|
||||
}
|
||||
|
||||
var shorten_buf = std.ArrayList(u8).init(main.allocator);
|
||||
var shorten_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
// Shorten the given string to fit in the given number of columns.
|
||||
// If the string is too long, only the prefix and suffix will be printed, with '...' in between.
|
||||
|
|
@ -128,42 +131,43 @@ pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
|||
var total_width: u32 = 0;
|
||||
var prefix_width: u32 = 0;
|
||||
var prefix_end: u32 = 0;
|
||||
var prefix_done = false;
|
||||
var it = std.unicode.Utf8View.initUnchecked(in).iterator();
|
||||
while (it.nextCodepoint()) |cp| {
|
||||
// XXX: libc assumption: wchar_t is a Unicode point. True for most modern libcs?
|
||||
// (The "proper" way is to use mbtowc(), but I'd rather port the musl wcwidth implementation to Zig so that I *know* it'll be Unicode.
|
||||
// On the other hand, ncurses also use wcwidth() so that would cause duplicated code. Ugh)
|
||||
const cp_width_ = c.wcwidth(cp);
|
||||
const cp_width = @intCast(u32, if (cp_width_ < 0) 1 else cp_width_);
|
||||
const cp_width: u32 = @intCast(if (cp_width_ < 0) 0 else cp_width_);
|
||||
const cp_len = std.unicode.utf8CodepointSequenceLength(cp) catch unreachable;
|
||||
total_width += cp_width;
|
||||
if (prefix_width + cp_width <= @divFloor(max_width-1, 2)-1) {
|
||||
if (!prefix_done and prefix_width + cp_width <= @divFloor(max_width-1, 2)-1) {
|
||||
prefix_width += cp_width;
|
||||
prefix_end += cp_len;
|
||||
continue;
|
||||
}
|
||||
} else
|
||||
prefix_done = true;
|
||||
}
|
||||
if (total_width <= max_width) return in;
|
||||
|
||||
shorten_buf.shrinkRetainingCapacity(0);
|
||||
shorten_buf.appendSlice(in[0..prefix_end]) catch unreachable;
|
||||
shorten_buf.appendSlice("...") catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, in[0..prefix_end]) catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, "...") catch unreachable;
|
||||
|
||||
var start_width: u32 = prefix_width;
|
||||
var start_len: u32 = prefix_end;
|
||||
it = std.unicode.Utf8View.initUnchecked(in[prefix_end..]).iterator();
|
||||
while (it.nextCodepoint()) |cp| {
|
||||
const cp_width_ = c.wcwidth(cp);
|
||||
const cp_width = @intCast(u32, if (cp_width_ < 0) 1 else cp_width_);
|
||||
const cp_width: u32 = @intCast(if (cp_width_ < 0) 0 else cp_width_);
|
||||
const cp_len = std.unicode.utf8CodepointSequenceLength(cp) catch unreachable;
|
||||
start_width += cp_width;
|
||||
start_len += cp_len;
|
||||
if (total_width - start_width <= max_width - prefix_width - 3) {
|
||||
shorten_buf.appendSlice(in[start_len..]) catch unreachable;
|
||||
shorten_buf.appendSlice(main.allocator, in[start_len..]) catch unreachable;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return util.arrayListBufZ(&shorten_buf);
|
||||
return util.arrayListBufZ(&shorten_buf, main.allocator);
|
||||
}
|
||||
|
||||
fn shortenTest(in: [:0]const u8, max_width: u32, out: [:0]const u8) !void {
|
||||
|
|
@ -185,20 +189,13 @@ test "shorten" {
|
|||
try t("AaBCDEFGH", 8, "A...H"); // could optimize this, but w/e
|
||||
try t("ABCDEFGaH", 8, "A...aH");
|
||||
try t("ABCDEFGH", 15, "ABC...FGH");
|
||||
try t("❤︎a❤︎a❤︎a", 5, "❤︎...︎a"); // Variation selectors; not great, there's an additional U+FE0E before 'a'.
|
||||
try t("ą́ą́ą́ą́ą́ą́", 5, "ą́...̨́ą́"); // Combining marks, similarly bad.
|
||||
}
|
||||
|
||||
// ncurses_refs.c
|
||||
extern fn ncdu_acs_ulcorner() c.chtype;
|
||||
extern fn ncdu_acs_llcorner() c.chtype;
|
||||
extern fn ncdu_acs_urcorner() c.chtype;
|
||||
extern fn ncdu_acs_lrcorner() c.chtype;
|
||||
extern fn ncdu_acs_hline() c.chtype;
|
||||
extern fn ncdu_acs_vline() c.chtype;
|
||||
extern fn ncdu_init_pair(idx: c_short, fg: c_short, bg: c_short) void;
|
||||
|
||||
const StyleAttr = struct { fg: i16, bg: i16, attr: u32 };
|
||||
const StyleDef = struct {
|
||||
name: []const u8,
|
||||
name: [:0]const u8,
|
||||
off: StyleAttr,
|
||||
dark: StyleAttr,
|
||||
darkbg: StyleAttr,
|
||||
|
|
@ -283,20 +280,18 @@ const styles = [_]StyleDef{
|
|||
};
|
||||
|
||||
pub const Style = lbl: {
|
||||
var fields: [styles.len]std.builtin.TypeInfo.EnumField = undefined;
|
||||
var decls = [_]std.builtin.TypeInfo.Declaration{};
|
||||
inline for (styles) |s, i| {
|
||||
fields[i] = .{
|
||||
var fields: [styles.len]std.builtin.Type.EnumField = undefined;
|
||||
for (&fields, styles, 0..) |*field, s, i| {
|
||||
field.* = .{
|
||||
.name = s.name,
|
||||
.value = i,
|
||||
};
|
||||
}
|
||||
break :lbl @Type(.{
|
||||
.Enum = .{
|
||||
.layout = .Auto,
|
||||
.@"enum" = .{
|
||||
.tag_type = u8,
|
||||
.fields = &fields,
|
||||
.decls = &decls,
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
.is_exhaustive = true,
|
||||
}
|
||||
});
|
||||
|
|
@ -333,23 +328,23 @@ pub const Bg = enum {
|
|||
|
||||
fn updateSize() void {
|
||||
// getmax[yx] macros are marked as "legacy", but Zig can't deal with the "proper" getmaxyx macro.
|
||||
rows = @intCast(u32, c.getmaxy(c.stdscr));
|
||||
cols = @intCast(u32, c.getmaxx(c.stdscr));
|
||||
rows = @intCast(c.getmaxy(c.stdscr));
|
||||
cols = @intCast(c.getmaxx(c.stdscr));
|
||||
}
|
||||
|
||||
fn clearScr() void {
|
||||
// Send a "clear from cursor to end of screen" instruction, to clear a
|
||||
// potential line left behind from scanning in -1 mode.
|
||||
_ = std.io.getStdErr().write("\x1b[J") catch {};
|
||||
std.debug.print("\x1b[J", .{});
|
||||
}
|
||||
|
||||
pub fn init() void {
|
||||
if (inited) return;
|
||||
clearScr();
|
||||
if (main.config.nc_tty) {
|
||||
var tty = c.fopen("/dev/tty", "r+");
|
||||
if (tty == null) die("Error opening /dev/tty: {s}.\n", .{ c.strerror(@enumToInt(std.c.getErrno(-1))) });
|
||||
var term = c.newterm(null, tty, tty);
|
||||
const tty = c.fopen("/dev/tty", "r+");
|
||||
if (tty == null) die("Error opening /dev/tty: {s}.\n", .{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||
const term = c.newterm(null, tty, tty);
|
||||
if (term == null) die("Error initializing ncurses.\n", .{});
|
||||
_ = c.set_term(term);
|
||||
} else {
|
||||
|
|
@ -363,8 +358,8 @@ pub fn init() void {
|
|||
|
||||
_ = c.start_color();
|
||||
_ = c.use_default_colors();
|
||||
for (styles) |s, i| _ = ncdu_init_pair(@intCast(i16, i+1), s.style().fg, s.style().bg);
|
||||
|
||||
for (styles, 0..) |s, i| _ = c.init_pair(@as(i16, @intCast(i+1)), s.style().fg, s.style().bg);
|
||||
_ = c.bkgd(@intCast(c.COLOR_PAIR(@intFromEnum(Style.default)+1)));
|
||||
inited = true;
|
||||
}
|
||||
|
||||
|
|
@ -380,18 +375,18 @@ pub fn deinit() void {
|
|||
}
|
||||
|
||||
pub fn style(s: Style) void {
|
||||
_ = c.attr_set(styles[@enumToInt(s)].style().attr, @enumToInt(s)+1, null);
|
||||
_ = c.attr_set(styles[@intFromEnum(s)].style().attr, @intFromEnum(s)+1, null);
|
||||
}
|
||||
|
||||
pub fn move(y: u32, x: u32) void {
|
||||
_ = c.move(@intCast(i32, y), @intCast(i32, x));
|
||||
_ = c.move(@as(i32, @intCast(y)), @as(i32, @intCast(x)));
|
||||
}
|
||||
|
||||
// Wraps to the next line if the text overflows, not sure how to disable that.
|
||||
// (Well, addchstr() does that, but not entirely sure I want to go that way.
|
||||
// Does that even work with UTF-8? Or do I really need to go wchar madness?)
|
||||
pub fn addstr(s: [:0]const u8) void {
|
||||
_ = c.addstr(s);
|
||||
_ = c.addstr(s.ptr);
|
||||
}
|
||||
|
||||
// Not to be used for strings that may end up >256 bytes.
|
||||
|
|
@ -410,39 +405,86 @@ pub fn addch(ch: c.chtype) void {
|
|||
// unit = " XB" or " XiB"
|
||||
// Concatenated, these take 8 columns in SI mode or 9 otherwise.
|
||||
pub const FmtSize = struct {
|
||||
buf: [8:0]u8,
|
||||
buf: [5:0]u8,
|
||||
unit: [:0]const u8,
|
||||
|
||||
pub fn fmt(v: u64) @This() {
|
||||
var r: @This() = undefined;
|
||||
var f = @intToFloat(f32, v);
|
||||
if (main.config.si) {
|
||||
if(f < 1000.0) { r.unit = " B"; }
|
||||
else if(f < 1e6) { r.unit = " KB"; f /= 1e3; }
|
||||
else if(f < 1e9) { r.unit = " MB"; f /= 1e6; }
|
||||
else if(f < 1e12) { r.unit = " GB"; f /= 1e9; }
|
||||
else if(f < 1e15) { r.unit = " TB"; f /= 1e12; }
|
||||
else if(f < 1e18) { r.unit = " PB"; f /= 1e15; }
|
||||
else { r.unit = " EB"; f /= 1e18; }
|
||||
}
|
||||
else {
|
||||
if(f < 1000.0) { r.unit = " B"; }
|
||||
else if(f < 1023e3) { r.unit = " KiB"; f /= 1024.0; }
|
||||
else if(f < 1023e6) { r.unit = " MiB"; f /= 1048576.0; }
|
||||
else if(f < 1023e9) { r.unit = " GiB"; f /= 1073741824.0; }
|
||||
else if(f < 1023e12) { r.unit = " TiB"; f /= 1099511627776.0; }
|
||||
else if(f < 1023e15) { r.unit = " PiB"; f /= 1125899906842624.0; }
|
||||
else { r.unit = " EiB"; f /= 1152921504606846976.0; }
|
||||
}
|
||||
_ = std.fmt.bufPrintZ(&r.buf, "{d:>5.1}", .{f}) catch unreachable;
|
||||
return r;
|
||||
fn init(u: [:0]const u8, n: u64, mul: u64, div: u64) FmtSize {
|
||||
return .{
|
||||
.unit = u,
|
||||
.buf = util.fmt5dec(@intCast( ((n*mul) +| (div / 2)) / div )),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn num(self: *const @This()) [:0]const u8 {
|
||||
return std.mem.sliceTo(&self.buf, 0);
|
||||
pub fn fmt(v: u64) FmtSize {
|
||||
if (main.config.si) {
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 999_950) { return FmtSize.init(" kB", v, 1, 100); }
|
||||
else if (v < 999_950_000) { return FmtSize.init(" MB", v, 1, 100_000); }
|
||||
else if (v < 999_950_000_000) { return FmtSize.init(" GB", v, 1, 100_000_000); }
|
||||
else if (v < 999_950_000_000_000) { return FmtSize.init(" TB", v, 1, 100_000_000_000); }
|
||||
else if (v < 999_950_000_000_000_000) { return FmtSize.init(" PB", v, 1, 100_000_000_000_000); }
|
||||
else { return FmtSize.init(" EB", v, 1, 100_000_000_000_000_000); }
|
||||
} else {
|
||||
// Cutoff values are obtained by calculating 999.949999999999999999999999 * div with an infinite-precision calculator.
|
||||
// (Admittedly, this precision is silly)
|
||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||
else if (v < 1023949) { return FmtSize.init(" KiB", v, 10, 1<<10); }
|
||||
else if (v < 1048523572) { return FmtSize.init(" MiB", v, 10, 1<<20); }
|
||||
else if (v < 1073688136909) { return FmtSize.init(" GiB", v, 10, 1<<30); }
|
||||
else if (v < 1099456652194612) { return FmtSize.init(" TiB", v, 10, 1<<40); }
|
||||
else if (v < 1125843611847281869) { return FmtSize.init(" PiB", v, 10, 1<<50); }
|
||||
else { return FmtSize.init(" EiB", v, 1, (1<<60)/10); }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num(self: *const FmtSize) [:0]const u8 {
|
||||
return &self.buf;
|
||||
}
|
||||
|
||||
fn testEql(self: FmtSize, exp: []const u8) !void {
|
||||
var buf: [10]u8 = undefined;
|
||||
try std.testing.expectEqualStrings(exp, try std.fmt.bufPrint(&buf, "{s}{s}", .{ self.num(), self.unit }));
|
||||
}
|
||||
};
|
||||
|
||||
test "fmtsize" {
|
||||
main.config.si = true;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1049).testEql(" 1.0 kB");
|
||||
try FmtSize.fmt( 1050).testEql(" 1.1 kB");
|
||||
try FmtSize.fmt( 999_899).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_949).testEql("999.9 kB");
|
||||
try FmtSize.fmt( 999_950).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 1000_000).testEql(" 1.0 MB");
|
||||
try FmtSize.fmt( 999_850_009).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_899_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_900_000).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_949_999).testEql("999.9 MB");
|
||||
try FmtSize.fmt( 999_950_000).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt( 999_999_999).testEql(" 1.0 GB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 18.4 EB");
|
||||
|
||||
main.config.si = false;
|
||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||
try FmtSize.fmt( 1000).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 1024).testEql(" 1.0 KiB");
|
||||
try FmtSize.fmt( 102400).testEql("100.0 KiB");
|
||||
try FmtSize.fmt( 1023898).testEql("999.9 KiB");
|
||||
try FmtSize.fmt( 1023949).testEql(" 1.0 MiB");
|
||||
try FmtSize.fmt( 1048523571).testEql("999.9 MiB");
|
||||
try FmtSize.fmt( 1048523572).testEql(" 1.0 GiB");
|
||||
try FmtSize.fmt( 1073688136908).testEql("999.9 GiB");
|
||||
try FmtSize.fmt( 1073688136909).testEql(" 1.0 TiB");
|
||||
try FmtSize.fmt( 1099456652194611).testEql("999.9 TiB");
|
||||
try FmtSize.fmt( 1099456652194612).testEql(" 1.0 PiB");
|
||||
try FmtSize.fmt(1125843611847281868).testEql("999.9 PiB");
|
||||
try FmtSize.fmt(1125843611847281869).testEql(" 1.0 EiB");
|
||||
try FmtSize.fmt(std.math.maxInt(u64)).testEql(" 16.0 EiB");
|
||||
}
|
||||
|
||||
// Print a formatted human-readable size string onto the given background.
|
||||
pub fn addsize(bg: Bg, v: u64) void {
|
||||
const r = FmtSize.fmt(v);
|
||||
|
|
@ -460,7 +502,7 @@ pub fn addnum(bg: Bg, v: u64) void {
|
|||
const s = std.fmt.bufPrint(&buf, "{d}", .{v}) catch unreachable;
|
||||
var f: [64:0]u8 = undefined;
|
||||
var i: usize = 0;
|
||||
for (s) |digit, n| {
|
||||
for (s, 0..) |digit, n| {
|
||||
if (n != 0 and (s.len - n) % 3 == 0) {
|
||||
for (main.config.thousands_sep) |ch| {
|
||||
f[i] = ch;
|
||||
|
|
@ -478,14 +520,14 @@ pub fn addnum(bg: Bg, v: u64) void {
|
|||
|
||||
// Print a file mode, takes 10 columns
|
||||
pub fn addmode(mode: u32) void {
|
||||
addch(switch (mode & std.os.S.IFMT) {
|
||||
std.os.S.IFDIR => 'd',
|
||||
std.os.S.IFREG => '-',
|
||||
std.os.S.IFLNK => 'l',
|
||||
std.os.S.IFIFO => 'p',
|
||||
std.os.S.IFSOCK => 's',
|
||||
std.os.S.IFCHR => 'c',
|
||||
std.os.S.IFBLK => 'b',
|
||||
addch(switch (mode & std.posix.S.IFMT) {
|
||||
std.posix.S.IFDIR => 'd',
|
||||
std.posix.S.IFREG => '-',
|
||||
std.posix.S.IFLNK => 'l',
|
||||
std.posix.S.IFIFO => 'p',
|
||||
std.posix.S.IFSOCK => 's',
|
||||
std.posix.S.IFCHR => 'c',
|
||||
std.posix.S.IFBLK => 'b',
|
||||
else => '?'
|
||||
});
|
||||
addch(if (mode & 0o400 > 0) 'r' else '-');
|
||||
|
|
@ -496,7 +538,7 @@ pub fn addmode(mode: u32) void {
|
|||
addch(if (mode & 0o2000 > 0) 's' else if (mode & 0o010 > 0) @as(u7, 'x') else '-');
|
||||
addch(if (mode & 0o004 > 0) 'r' else '-');
|
||||
addch(if (mode & 0o002 > 0) 'w' else '-');
|
||||
addch(if (mode & 0o1000 > 0) (if (std.os.S.ISDIR(mode)) @as(u7, 't') else 'T') else if (mode & 0o001 > 0) @as(u7, 'x') else '-');
|
||||
addch(if (mode & 0o1000 > 0) (if (std.posix.S.ISDIR(mode)) @as(u7, 't') else 'T') else if (mode & 0o001 > 0) @as(u7, 'x') else '-');
|
||||
}
|
||||
|
||||
// Print a timestamp, takes 25 columns
|
||||
|
|
@ -514,7 +556,7 @@ pub fn addts(bg: Bg, ts: u64) void {
|
|||
}
|
||||
|
||||
pub fn hline(ch: c.chtype, len: u32) void {
|
||||
_ = c.hline(ch, @intCast(i32, len));
|
||||
_ = c.hline(ch, @as(i32, @intCast(len)));
|
||||
}
|
||||
|
||||
// Draws a bordered box in the center of the screen.
|
||||
|
|
@ -532,20 +574,21 @@ pub const Box = struct {
|
|||
style(.default);
|
||||
if (width < 6 or height < 3) return s;
|
||||
|
||||
const ulcorner = ncdu_acs_ulcorner();
|
||||
const llcorner = ncdu_acs_llcorner();
|
||||
const urcorner = ncdu_acs_urcorner();
|
||||
const lrcorner = ncdu_acs_lrcorner();
|
||||
const acs_hline = ncdu_acs_hline();
|
||||
const acs_vline = ncdu_acs_vline();
|
||||
const acs_map = @extern(*[128]c.chtype, .{ .name = "acs_map" });
|
||||
const ulcorner = acs_map['l'];
|
||||
const llcorner = acs_map['m'];
|
||||
const urcorner = acs_map['k'];
|
||||
const lrcorner = acs_map['j'];
|
||||
const acs_hline = acs_map['q'];
|
||||
const acs_vline = acs_map['x'];
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < height) : (i += 1) {
|
||||
s.move(i, 0);
|
||||
addch(if (i == 0) ulcorner else if (i == height-1) llcorner else acs_hline);
|
||||
hline(if (i == 0 or i == height-1) acs_vline else ' ', width-2);
|
||||
addch(if (i == 0) ulcorner else if (i == height-1) llcorner else acs_vline);
|
||||
hline(if (i == 0 or i == height-1) acs_hline else ' ', width-2);
|
||||
s.move(i, width-1);
|
||||
addch(if (i == 0) urcorner else if (i == height-1) lrcorner else acs_hline);
|
||||
addch(if (i == 0) urcorner else if (i == height-1) lrcorner else acs_vline);
|
||||
}
|
||||
|
||||
s.move(0, 3);
|
||||
|
|
@ -582,20 +625,66 @@ pub fn getch(block: bool) i32 {
|
|||
// In non-blocking mode, we can only assume that ERR means "no input yet".
|
||||
// In blocking mode, give it 100 tries with a 10ms delay in between,
|
||||
// then just give up and die to avoid an infinite loop and unresponsive program.
|
||||
var attempts: u8 = 0;
|
||||
while (attempts < 100) : (attempts += 1) {
|
||||
var ch = c.getch();
|
||||
for (0..100) |_| {
|
||||
const ch = c.getch();
|
||||
if (ch == c.KEY_RESIZE) {
|
||||
updateSize();
|
||||
return -1;
|
||||
}
|
||||
if (ch == c.ERR) {
|
||||
if (!block) return 0;
|
||||
std.os.nanosleep(0, 10*std.time.ns_per_ms);
|
||||
sleep(10*std.time.ns_per_ms);
|
||||
continue;
|
||||
}
|
||||
return ch;
|
||||
}
|
||||
die("Error reading keyboard input, assuming TTY has been lost.\n(Potentially nonsensical error message: {s})\n",
|
||||
.{ c.strerror(@enumToInt(std.c.getErrno(-1))) });
|
||||
.{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||
}
|
||||
|
||||
fn waitInput() void {
|
||||
if (@hasDecl(std.io, "getStdIn")) {
|
||||
std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||
} else {
|
||||
var buf: [512]u8 = undefined;
|
||||
var rd = std.fs.File.stdin().reader(&buf);
|
||||
_ = rd.interface.discardDelimiterExclusive('\n') catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runCmd(cmd: []const []const u8, cwd: ?[]const u8, env: *std.process.EnvMap, reporterr: bool) void {
|
||||
deinit();
|
||||
defer init();
|
||||
|
||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||
if (env.get("NCDU_LEVEL")) |l|
|
||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||
'0'...'8' => |d| &[1] u8{d+1},
|
||||
'9' => "9",
|
||||
else => "1"
|
||||
}) catch unreachable
|
||||
else
|
||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||
|
||||
var child = std.process.Child.init(cmd, main.allocator);
|
||||
child.cwd = cwd;
|
||||
child.env_map = env;
|
||||
|
||||
const term = child.spawnAndWait() catch |e| blk: {
|
||||
std.debug.print("Error running command: {s}\n\nPress enter to continue.\n", .{ ui.errorString(e) });
|
||||
waitInput();
|
||||
break :blk std.process.Child.Term{ .Exited = 0 };
|
||||
};
|
||||
|
||||
const n = switch (term) {
|
||||
.Exited => "error",
|
||||
.Signal => "signal",
|
||||
.Stopped => "stopped",
|
||||
.Unknown => "unknown",
|
||||
};
|
||||
const v = switch (term) { inline else => |v| v };
|
||||
if (term != .Exited or (reporterr and v != 0)) {
|
||||
std.debug.print("\nCommand returned with {s} code {}.\nPress enter to continue.\n", .{ n, v });
|
||||
waitInput();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
139
src/util.zig
139
src/util.zig
|
|
@ -1,7 +1,8 @@
|
|||
// SPDX-FileCopyrightText: 2021-2022 Yoran Heling <projects@yorhel.nl>
|
||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
const std = @import("std");
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
// Cast any integer type to the target type, clamping the value to the supported maximum if necessary.
|
||||
pub fn castClamp(comptime T: type, x: anytype) T {
|
||||
|
|
@ -11,32 +12,63 @@ pub fn castClamp(comptime T: type, x: anytype) T {
|
|||
} else if (std.math.minInt(@TypeOf(x)) < std.math.minInt(T) and x < std.math.minInt(T)) {
|
||||
return std.math.minInt(T);
|
||||
} else {
|
||||
return @intCast(T, x);
|
||||
return @intCast(x);
|
||||
}
|
||||
}
|
||||
|
||||
// Cast any integer type to the target type, truncating if necessary.
|
||||
pub fn castTruncate(comptime T: type, x: anytype) T {
|
||||
const Ti = @typeInfo(T).Int;
|
||||
const Xi = @typeInfo(@TypeOf(x)).Int;
|
||||
const nx = if (Xi.signedness != Ti.signedness) @bitCast(std.meta.Int(Ti.signedness, Xi.bits), x) else x;
|
||||
return if (Xi.bits > Ti.bits) @truncate(T, nx) else nx;
|
||||
const Ti = @typeInfo(T).int;
|
||||
const Xi = @typeInfo(@TypeOf(x)).int;
|
||||
const nx: std.meta.Int(Ti.signedness, Xi.bits) = @bitCast(x);
|
||||
return if (Xi.bits > Ti.bits) @truncate(nx) else nx;
|
||||
}
|
||||
|
||||
// Multiplies by 512, saturating.
|
||||
pub fn blocksToSize(b: u64) u64 {
|
||||
return if (b & 0xFF80000000000000 > 0) std.math.maxInt(u64) else b << 9;
|
||||
return b *| 512;
|
||||
}
|
||||
|
||||
// Ensure the given arraylist buffer gets zero-terminated and returns a slice
|
||||
// into the buffer. The returned buffer is invalidated whenever the arraylist
|
||||
// is freed or written to.
|
||||
pub fn arrayListBufZ(buf: *std.ArrayList(u8)) [:0]const u8 {
|
||||
buf.append(0) catch unreachable;
|
||||
pub fn arrayListBufZ(buf: *std.ArrayListUnmanaged(u8), alloc: std.mem.Allocator) [:0]const u8 {
|
||||
buf.append(alloc, 0) catch unreachable;
|
||||
defer buf.items.len -= 1;
|
||||
return buf.items[0..buf.items.len-1:0];
|
||||
}
|
||||
|
||||
// Format an integer as right-aligned '###.#'.
|
||||
// Pretty much equivalent to:
|
||||
// std.fmt.bufPrintZ(.., "{d:>5.1}", @floatFromInt(n)/10.0);
|
||||
// Except this function doesn't pull in large float formatting tables.
|
||||
pub fn fmt5dec(n: u14) [5:0]u8 {
|
||||
std.debug.assert(n <= 9999);
|
||||
var buf: [5:0]u8 = " 0.0".*;
|
||||
var v = n;
|
||||
buf[4] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
buf[2] += @intCast(v % 10);
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[1] = '0' + @as(u8, @intCast(v % 10));
|
||||
v /= 10;
|
||||
if (v == 0) return buf;
|
||||
buf[0] = '0' + @as(u8, @intCast(v));
|
||||
return buf;
|
||||
}
|
||||
|
||||
test "fmt5dec" {
|
||||
const eq = std.testing.expectEqualStrings;
|
||||
try eq(" 0.0", &fmt5dec(0));
|
||||
try eq(" 0.5", &fmt5dec(5));
|
||||
try eq(" 9.5", &fmt5dec(95));
|
||||
try eq(" 12.5", &fmt5dec(125));
|
||||
try eq("123.9", &fmt5dec(1239));
|
||||
try eq("999.9", &fmt5dec(9999));
|
||||
}
|
||||
|
||||
|
||||
// Straightforward Zig port of strnatcmp() from https://github.com/sourcefrog/natsort/
|
||||
// (Requiring nul-terminated strings is ugly, but we've got them anyway and it does simplify the code)
|
||||
pub fn strnatcmp(a: [:0]const u8, b: [:0]const u8) std.math.Order {
|
||||
|
|
@ -44,8 +76,8 @@ pub fn strnatcmp(a: [:0]const u8, b: [:0]const u8) std.math.Order {
|
|||
var bi: usize = 0;
|
||||
const isDigit = std.ascii.isDigit;
|
||||
while (true) {
|
||||
while (std.ascii.isSpace(a[ai])) ai += 1;
|
||||
while (std.ascii.isSpace(b[bi])) bi += 1;
|
||||
while (std.ascii.isWhitespace(a[ai])) ai += 1;
|
||||
while (std.ascii.isWhitespace(b[bi])) bi += 1;
|
||||
|
||||
if (isDigit(a[ai]) and isDigit(b[bi])) {
|
||||
if (a[ai] == '0' or b[bi] == '0') { // compare_left
|
||||
|
|
@ -133,12 +165,85 @@ test "strnatcmp" {
|
|||
};
|
||||
// Test each string against each other string, simple and thorough.
|
||||
const eq = std.testing.expectEqual;
|
||||
var i: usize = 0;
|
||||
while (i < w.len) : (i += 1) {
|
||||
var j: usize = 0;
|
||||
for (0..w.len) |i| {
|
||||
try eq(strnatcmp(w[i], w[i]), .eq);
|
||||
while (j < i) : (j += 1) try eq(strnatcmp(w[i], w[j]), .gt);
|
||||
j += 1;
|
||||
while (j < w.len) : (j += 1) try eq(strnatcmp(w[i], w[j]), .lt);
|
||||
for (0..i) |j| try eq(strnatcmp(w[i], w[j]), .gt);
|
||||
for (i+1..w.len) |j| try eq(strnatcmp(w[i], w[j]), .lt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn expanduser(path: []const u8, alloc: std.mem.Allocator) ![:0]u8 {
|
||||
if (path.len == 0 or path[0] != '~') return alloc.dupeZ(u8, path);
|
||||
|
||||
const len = std.mem.indexOfScalar(u8, path, '/') orelse path.len;
|
||||
const home_raw = blk: {
|
||||
const pwd = pwd: {
|
||||
if (len == 1) {
|
||||
if (std.posix.getenvZ("HOME")) |p| break :blk p;
|
||||
break :pwd c.getpwuid(c.getuid());
|
||||
} else {
|
||||
const name = try alloc.dupeZ(u8, path[1..len]);
|
||||
defer alloc.free(name);
|
||||
break :pwd c.getpwnam(name.ptr);
|
||||
}
|
||||
};
|
||||
if (pwd != null)
|
||||
if (@as(*c.struct_passwd, pwd).pw_dir) |p|
|
||||
break :blk std.mem.span(p);
|
||||
return alloc.dupeZ(u8, path);
|
||||
};
|
||||
const home = std.mem.trimRight(u8, home_raw, "/");
|
||||
|
||||
if (home.len == 0 and path.len == len) return alloc.dupeZ(u8, "/");
|
||||
return try std.mem.concatWithSentinel(alloc, u8, &.{ home, path[len..] }, 0);
|
||||
}
|
||||
|
||||
|
||||
// Silly abstraction to read a file one line at a time. Only exists to help
|
||||
// with supporting both Zig 0.14 and 0.15, can be removed once 0.14 support is
|
||||
// dropped.
|
||||
pub const LineReader = if (@hasDecl(std.io, "bufferedReader")) struct {
|
||||
rd: std.io.BufferedReader(4096, std.fs.File.Reader),
|
||||
fbs: std.io.FixedBufferStream([]u8),
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{
|
||||
.rd = std.io.bufferedReader(f.reader()),
|
||||
.fbs = std.io.fixedBufferStream(buf),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
s.fbs.reset();
|
||||
s.rd.reader().streamUntilDelimiter(s.fbs.writer(), '\n', s.fbs.buffer.len) catch |err| switch (err) {
|
||||
error.EndOfStream => if (s.fbs.getPos() catch unreachable == 0) return null,
|
||||
else => |e| return e,
|
||||
};
|
||||
return s.fbs.getWritten();
|
||||
}
|
||||
|
||||
} else struct {
|
||||
rd: std.fs.File.Reader,
|
||||
|
||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
||||
return .{ .rd = f.readerStreaming(buf) };
|
||||
}
|
||||
|
||||
pub fn read(s: *@This()) !?[]u8 {
|
||||
// Can't use takeDelimiter() because that's not available in 0.15.1,
|
||||
// Can't use takeDelimiterExclusive() because that changed behavior in 0.15.2.
|
||||
const r = &s.rd.interface;
|
||||
const result = r.peekDelimiterInclusive('\n') catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
const remaining = r.buffer[r.seek..r.end];
|
||||
if (remaining.len == 0) return null;
|
||||
r.toss(remaining.len);
|
||||
return remaining;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
r.toss(result.len);
|
||||
return result[0 .. result.len - 1];
|
||||
}
|
||||
};
|
||||
|
|
|
|||
Loading…
Reference in a new issue