mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-13 09:18:40 -09:00
Compare commits
No commits in common. "zig" and "v2.6" have entirely different histories.
22 changed files with 671 additions and 866 deletions
40
ChangeLog
40
ChangeLog
|
|
@ -1,46 +1,6 @@
|
||||||
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
2.9.2 - 2025-10-24
|
|
||||||
- Still requires Zig 0.14 or 0.15
|
|
||||||
- Fix hang on loading config file when compiled with Zig 0.15.2
|
|
||||||
|
|
||||||
2.9.1 - 2025-08-21
|
|
||||||
- Add support for building with Zig 0.15
|
|
||||||
- Zig 0.14 is still supported
|
|
||||||
|
|
||||||
2.9 - 2025-08-16
|
|
||||||
- Still requires Zig 0.14
|
|
||||||
- Add --delete-command option to replace the built-in file deletion
|
|
||||||
- Move term cursor to selected option in delete confirmation window
|
|
||||||
- Support binary import on older Linux kernels lacking statx() (may break
|
|
||||||
again in the future, Zig does not officially support such old kernels)
|
|
||||||
|
|
||||||
2.8.2 - 2025-05-01
|
|
||||||
- Still requires Zig 0.14
|
|
||||||
- Fix a build error on MacOS
|
|
||||||
|
|
||||||
2.8.1 - 2025-04-28
|
|
||||||
- Still requires Zig 0.14
|
|
||||||
- Fix integer overflow in binary export
|
|
||||||
- Fix crash when `fstatat()` returns EINVAL
|
|
||||||
- Minor build system improvements
|
|
||||||
|
|
||||||
2.8 - 2025-03-05
|
|
||||||
- Now requires Zig 0.14
|
|
||||||
- Add support for @-prefixed lines to ignore errors in config file
|
|
||||||
- List all supported options in `--help`
|
|
||||||
- Use `kB` instead of `KB` in `--si` mode
|
|
||||||
|
|
||||||
2.7 - 2024-11-19
|
|
||||||
- Still requires Zig 0.12 or 0.13
|
|
||||||
- Support transparent reading/writing of zstandard-compressed JSON
|
|
||||||
- Add `--compress` and `--export-block-size` options
|
|
||||||
- Perform tilde expansion on paths in the config file
|
|
||||||
- Fix JSON import of escaped UTF-16 surrogate pairs
|
|
||||||
- Fix incorrect field in root item when exporting to the binary format
|
|
||||||
- Add -Dstrip build flag
|
|
||||||
|
|
||||||
2.6 - 2024-09-27
|
2.6 - 2024-09-27
|
||||||
- Still requires Zig 0.12 or 0.13
|
- Still requires Zig 0.12 or 0.13
|
||||||
- Add dependency on libzstd
|
- Add dependency on libzstd
|
||||||
|
|
|
||||||
13
Makefile
13
Makefile
|
|
@ -9,7 +9,7 @@ ZIG ?= zig
|
||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
BINDIR ?= ${PREFIX}/bin
|
BINDIR ?= ${PREFIX}/bin
|
||||||
MANDIR ?= ${PREFIX}/share/man/man1
|
MANDIR ?= ${PREFIX}/share/man/man1
|
||||||
ZIG_FLAGS ?= --release=fast -Dstrip
|
ZIG_FLAGS ?= --release=fast
|
||||||
|
|
||||||
NCDU_VERSION=$(shell grep 'program_version = "' src/main.zig | sed -e 's/^.*"\(.\+\)".*$$/\1/')
|
NCDU_VERSION=$(shell grep 'program_version = "' src/main.zig | sed -e 's/^.*"\(.\+\)".*$$/\1/')
|
||||||
|
|
||||||
|
|
@ -68,25 +68,24 @@ static-%.tar.gz:
|
||||||
LD="${ZIG} cc --target=$*"\
|
LD="${ZIG} cc --target=$*"\
|
||||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"
|
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"
|
||||||
cd static-$*/nc && ../../ncurses/configure --prefix="`pwd`/../inst"\
|
cd static-$*/nc && ../../ncurses/configure --prefix="`pwd`/../inst"\
|
||||||
|
--with-pkg-config-libdir="`pwd`/../inst/pkg"\
|
||||||
--without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs\
|
--without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs\
|
||||||
--without-tests --disable-pc-files --without-pkg-config --without-shared --without-debug\
|
--without-tests --enable-pc-files --without-pkg-config --without-shared --without-debug\
|
||||||
--without-gpm --without-sysmouse --enable-widec --with-default-terminfo-dir=/usr/share/terminfo\
|
--without-gpm --without-sysmouse --enable-widec --with-default-terminfo-dir=/usr/share/terminfo\
|
||||||
--with-terminfo-dirs=/usr/share/terminfo:/lib/terminfo:/usr/local/share/terminfo\
|
--with-terminfo-dirs=/usr/share/terminfo:/lib/terminfo:/usr/local/share/terminfo\
|
||||||
--with-fallbacks="screen linux vt100 xterm xterm-256color" --host=$*\
|
--with-fallbacks="screen linux vt100 xterm xterm-256color" --host=$*\
|
||||||
CC="${ZIG} cc --target=$*"\
|
CC="${ZIG} cc --target=$*"\
|
||||||
LD="${ZIG} cc --target=$*"\
|
LD="${ZIG} cc --target=$*"\
|
||||||
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"\
|
AR="${ZIG} ar" RANLIB="${ZIG} ranlib"\
|
||||||
CPPFLAGS=-D_GNU_SOURCE && make -j8
|
CPPFLAGS=-D_GNU_SOURCE && make -j8 && make install.libs
|
||||||
@# zig-build - cleaner approach but doesn't work, results in a dynamically linked binary.
|
@# zig-build - cleaner approach but doesn't work, results in a dynamically linked binary.
|
||||||
@#cd static-$* && PKG_CONFIG_LIBDIR="`pwd`/inst/pkg" zig build -Dtarget=$*
|
@#cd static-$* && PKG_CONFIG_LIBDIR="`pwd`/inst/pkg" zig build -Dtarget=$*
|
||||||
@# --build-file ../build.zig --search-prefix inst/ --cache-dir zig -Drelease-fast=true
|
@# --build-file ../build.zig --search-prefix inst/ --cache-dir zig -Drelease-fast=true
|
||||||
@# Alternative approach, bypassing zig-build
|
@# Alternative approach, bypassing zig-build
|
||||||
cd static-$* && ${ZIG} build-exe -target $*\
|
cd static-$* && ${ZIG} build-exe -target $*\
|
||||||
-Inc/include -Izstd -lc nc/lib/libncursesw.a zstd/libzstd.a\
|
-Iinst/include -Iinst/include/ncursesw -Izstd -lc inst/lib/libncursesw.a zstd/libzstd.a\
|
||||||
--cache-dir zig-cache -static -fstrip -O ReleaseFast ../src/main.zig
|
--cache-dir zig-cache -static -fstrip -O ReleaseFast ../src/main.zig
|
||||||
@# My system's strip can't deal with arm binaries and zig doesn't wrap a strip alternative.
|
strip -R .eh_frame -R .eh_frame_hdr static-$*/main
|
||||||
@# Whatever, just let it error for those.
|
|
||||||
strip -R .eh_frame -R .eh_frame_hdr static-$*/main || true
|
|
||||||
cd static-$* && mv main ncdu && tar -czf ../static-$*.tar.gz ncdu
|
cd static-$* && mv main ncdu && tar -czf ../static-$*.tar.gz ncdu
|
||||||
rm -rf static-$*
|
rm -rf static-$*
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ C version (1.x).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- Zig 0.14 or 0.15
|
- Zig 0.12 or 0.13.
|
||||||
- Some sort of POSIX-like OS
|
- Some sort of POSIX-like OS
|
||||||
- ncurses
|
- ncurses
|
||||||
- libzstd
|
- libzstd
|
||||||
|
|
|
||||||
31
build.zig
31
build.zig
|
|
@ -7,26 +7,21 @@ pub fn build(b: *std.Build) void {
|
||||||
const target = b.standardTargetOptions(.{});
|
const target = b.standardTargetOptions(.{});
|
||||||
const optimize = b.standardOptimizeOption(.{});
|
const optimize = b.standardOptimizeOption(.{});
|
||||||
|
|
||||||
const pie = b.option(bool, "pie", "Build with PIE support (by default: target-dependant)");
|
const pie = b.option(bool, "pie", "Build with PIE support (by default false)") orelse false;
|
||||||
const strip = b.option(bool, "strip", "Strip debugging info (by default false)") orelse false;
|
|
||||||
|
|
||||||
const main_mod = b.createModule(.{
|
|
||||||
.root_source_file = b.path("src/main.zig"),
|
|
||||||
.target = target,
|
|
||||||
.optimize = optimize,
|
|
||||||
.strip = strip,
|
|
||||||
.link_libc = true,
|
|
||||||
});
|
|
||||||
main_mod.linkSystemLibrary("ncursesw", .{});
|
|
||||||
main_mod.linkSystemLibrary("zstd", .{});
|
|
||||||
|
|
||||||
const exe = b.addExecutable(.{
|
const exe = b.addExecutable(.{
|
||||||
.name = "ncdu",
|
.name = "ncdu",
|
||||||
.root_module = main_mod,
|
.root_source_file = b.path("src/main.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
.link_libc = true,
|
||||||
});
|
});
|
||||||
|
|
||||||
exe.pie = pie;
|
exe.pie = pie;
|
||||||
// https://github.com/ziglang/zig/blob/faccd79ca5debbe22fe168193b8de54393257604/build.zig#L745-L748
|
exe.root_module.linkSystemLibrary("ncursesw", .{});
|
||||||
if (target.result.os.tag.isDarwin()) {
|
exe.root_module.linkSystemLibrary("libzstd", .{});
|
||||||
|
// https://github.com/ziglang/zig/blob/b52be973dfb7d1408218b8e75800a2da3dc69108/build.zig#L551-L554
|
||||||
|
if (target.result.isDarwin()) {
|
||||||
// useful for package maintainers
|
// useful for package maintainers
|
||||||
exe.headerpad_max_install_names = true;
|
exe.headerpad_max_install_names = true;
|
||||||
}
|
}
|
||||||
|
|
@ -42,9 +37,13 @@ pub fn build(b: *std.Build) void {
|
||||||
run_step.dependOn(&run_cmd.step);
|
run_step.dependOn(&run_cmd.step);
|
||||||
|
|
||||||
const unit_tests = b.addTest(.{
|
const unit_tests = b.addTest(.{
|
||||||
.root_module = main_mod,
|
.root_source_file = b.path("src/main.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
.link_libc = true,
|
||||||
});
|
});
|
||||||
unit_tests.pie = pie;
|
unit_tests.pie = pie;
|
||||||
|
unit_tests.root_module.linkSystemLibrary("ncursesw", .{});
|
||||||
|
|
||||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||||
|
|
||||||
|
|
|
||||||
85
ncdu.1
85
ncdu.1
|
|
@ -1,6 +1,6 @@
|
||||||
.\" SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
.\" SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||||
.\" SPDX-License-Identifier: MIT
|
.\" SPDX-License-Identifier: MIT
|
||||||
.Dd August 16, 2025
|
.Dd September 27, 2024
|
||||||
.Dt NCDU 1
|
.Dt NCDU 1
|
||||||
.Os
|
.Os
|
||||||
.Sh NAME
|
.Sh NAME
|
||||||
|
|
@ -21,9 +21,7 @@
|
||||||
.Op Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
.Op Fl L , \-follow\-symlinks , \-no\-follow\-symlinks
|
||||||
.Op Fl \-include\-kernfs , \-exclude\-kernfs
|
.Op Fl \-include\-kernfs , \-exclude\-kernfs
|
||||||
.Op Fl t , \-threads Ar num
|
.Op Fl t , \-threads Ar num
|
||||||
.Op Fl c , \-compress , \-no\-compress
|
|
||||||
.Op Fl \-compress\-level Ar num
|
.Op Fl \-compress\-level Ar num
|
||||||
.Op Fl \-export\-block\-size Ar num
|
|
||||||
.Op Fl 0 , 1 , 2
|
.Op Fl 0 , 1 , 2
|
||||||
.Op Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
.Op Fl q , \-slow\-ui\-updates , \-fast\-ui\-updates
|
||||||
.Op Fl \-enable\-shell , \-disable\-shell
|
.Op Fl \-enable\-shell , \-disable\-shell
|
||||||
|
|
@ -44,7 +42,6 @@
|
||||||
.Op Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
.Op Fl \-group\-directories\-first , \-no\-group\-directories\-first
|
||||||
.Op Fl \-confirm\-quit , \-no\-confirm\-quit
|
.Op Fl \-confirm\-quit , \-no\-confirm\-quit
|
||||||
.Op Fl \-confirm\-delete , \-no\-confirm\-delete
|
.Op Fl \-confirm\-delete , \-no\-confirm\-delete
|
||||||
.Op Fl \-delete\-command Ar command
|
|
||||||
.Op Fl \-color Ar off | dark | dark-bg
|
.Op Fl \-color Ar off | dark | dark-bg
|
||||||
.Op Ar path
|
.Op Ar path
|
||||||
.Nm
|
.Nm
|
||||||
|
|
@ -100,11 +97,6 @@ uncompressed, or a little over 100 KiB when compressed with gzip.
|
||||||
This scales linearly, so be prepared to handle a few tens of megabytes when
|
This scales linearly, so be prepared to handle a few tens of megabytes when
|
||||||
dealing with millions of files.
|
dealing with millions of files.
|
||||||
.Pp
|
.Pp
|
||||||
Consider enabling
|
|
||||||
.Fl c
|
|
||||||
to output Zstandard-compressed JSON, which can significantly reduce size of the
|
|
||||||
exported data.
|
|
||||||
.Pp
|
|
||||||
When running a multi-threaded scan or when scanning a directory tree that may
|
When running a multi-threaded scan or when scanning a directory tree that may
|
||||||
not fit in memory, consider using
|
not fit in memory, consider using
|
||||||
.Fl O
|
.Fl O
|
||||||
|
|
@ -195,36 +187,12 @@ The binary format (see
|
||||||
.Fl O )
|
.Fl O )
|
||||||
does not have this problem and supports efficient exporting with any number of
|
does not have this problem and supports efficient exporting with any number of
|
||||||
threads.
|
threads.
|
||||||
.El
|
|
||||||
.
|
|
||||||
.Ss Export Options
|
|
||||||
These options affect behavior when exporting to file with the
|
|
||||||
.Fl o
|
|
||||||
or
|
|
||||||
.Fl O
|
|
||||||
options.
|
|
||||||
.Bl -tag -width Ds
|
|
||||||
.It Fl c , \-compress , \-no\-compress
|
|
||||||
Enable or disable Zstandard compression when exporting to JSON (see
|
|
||||||
.Fl o ) .
|
|
||||||
.It Fl \-compress\-level Ar num
|
.It Fl \-compress\-level Ar num
|
||||||
Set the Zstandard compression level when using
|
Set the Zstandard compression level when using
|
||||||
.Fl O
|
.Fl O
|
||||||
or
|
to create a binary export.
|
||||||
.Fl c .
|
|
||||||
Valid values are 1 (fastest) to 19 (slowest).
|
Valid values are 1 (fastest) to 19 (slowest).
|
||||||
Defaults to 4.
|
Defaults to 4.
|
||||||
.It Fl \-export\-block\-size Ar num
|
|
||||||
Set the block size, in kibibytes, for the binary export format (see
|
|
||||||
.Fl O ) .
|
|
||||||
Larger blocks require more memory but result in better compression efficiency.
|
|
||||||
This option can be combined with a higher
|
|
||||||
.Fl \-compress\-level
|
|
||||||
for even better compression.
|
|
||||||
.Pp
|
|
||||||
Accepted values are between 4 and 16000.
|
|
||||||
The defaults is to start at 64 KiB and then gradually increase the block size
|
|
||||||
for large exports.
|
|
||||||
.El
|
.El
|
||||||
.
|
.
|
||||||
.Ss Interface Options
|
.Ss Interface Options
|
||||||
|
|
@ -287,7 +255,7 @@ when given twice it will also add
|
||||||
thus ensuring that there is no way to modify the file system from within
|
thus ensuring that there is no way to modify the file system from within
|
||||||
.Nm .
|
.Nm .
|
||||||
.It Fl \-si , \-no\-si
|
.It Fl \-si , \-no\-si
|
||||||
List sizes using base 10 prefixes, that is, powers of 1000 (kB, MB, etc), as
|
List sizes using base 10 prefixes, that is, powers of 1000 (KB, MB, etc), as
|
||||||
defined in the International System of Units (SI), instead of the usual base 2
|
defined in the International System of Units (SI), instead of the usual base 2
|
||||||
prefixes (KiB, MiB, etc).
|
prefixes (KiB, MiB, etc).
|
||||||
.It Fl \-disk\-usage , \-apparent\-size
|
.It Fl \-disk\-usage , \-apparent\-size
|
||||||
|
|
@ -360,31 +328,6 @@ Can be helpful when you accidentally press 'q' during or after a very long scan.
|
||||||
Require a confirmation before deleting a file or directory.
|
Require a confirmation before deleting a file or directory.
|
||||||
Enabled by default, but can be disabled if you're absolutely sure you won't
|
Enabled by default, but can be disabled if you're absolutely sure you won't
|
||||||
accidentally press 'd'.
|
accidentally press 'd'.
|
||||||
.It Fl \-delete\-command Ar command
|
|
||||||
When set to a non-empty string, replace the built-in file deletion feature with
|
|
||||||
a custom shell command.
|
|
||||||
.Pp
|
|
||||||
The absolute path of the item to be deleted is appended to the given command
|
|
||||||
and the result is evaluated in a shell.
|
|
||||||
The command is run from the same directory that ncdu itself was started in.
|
|
||||||
The
|
|
||||||
.Ev NCDU_DELETE_PATH
|
|
||||||
environment variable is set to the absolute path of the item to be deleted and
|
|
||||||
.Ev NCDU_LEVEL
|
|
||||||
is set in the same fashion as when spawning a shell from within ncdu.
|
|
||||||
.Pp
|
|
||||||
After command completion, the in-memory view of the selected item is refreshed
|
|
||||||
and directory sizes are adjusted as necessary.
|
|
||||||
This is not a full refresh of the complete directory tree, so if the item has
|
|
||||||
been renamed or moved to another directory, it's new location is not
|
|
||||||
automatically picked up.
|
|
||||||
.Pp
|
|
||||||
For example, to use
|
|
||||||
.Xr rm 1
|
|
||||||
interactive mode to prompt before each deletion:
|
|
||||||
.Dl ncdu --no-confirm-delete --delete-command \[aq]rm -ri --\[aq]
|
|
||||||
Or to move files to trash:
|
|
||||||
.Dl ncdu --delete-command \[aq]gio trash --\[aq]
|
|
||||||
.It Fl \-color Ar off | dark | dark-bg
|
.It Fl \-color Ar off | dark | dark-bg
|
||||||
Set the color scheme.
|
Set the color scheme.
|
||||||
The following schemes are recognized:
|
The following schemes are recognized:
|
||||||
|
|
@ -418,7 +361,6 @@ is given on the command line.
|
||||||
.Pp
|
.Pp
|
||||||
The configuration file format is simply one command line option per line.
|
The configuration file format is simply one command line option per line.
|
||||||
Lines starting with '#' are ignored.
|
Lines starting with '#' are ignored.
|
||||||
A line can be prefixed with '@' to suppress errors while parsing the option.
|
|
||||||
Example configuration file:
|
Example configuration file:
|
||||||
.Bd -literal -offset indent
|
.Bd -literal -offset indent
|
||||||
# Always enable extended mode
|
# Always enable extended mode
|
||||||
|
|
@ -429,9 +371,6 @@ Example configuration file:
|
||||||
|
|
||||||
# Exclude .git directories
|
# Exclude .git directories
|
||||||
\-\-exclude .git
|
\-\-exclude .git
|
||||||
|
|
||||||
# Read excludes from ~/.ncduexcludes, ignore error if the file does not exist
|
|
||||||
@--exclude-from ~/.ncduexcludes
|
|
||||||
.Ed
|
.Ed
|
||||||
.
|
.
|
||||||
.Sh KEYS
|
.Sh KEYS
|
||||||
|
|
@ -548,28 +487,34 @@ Empty directory.
|
||||||
.Sh EXAMPLES
|
.Sh EXAMPLES
|
||||||
To scan and browse the directory you're currently in, all you need is a simple:
|
To scan and browse the directory you're currently in, all you need is a simple:
|
||||||
.Dl ncdu
|
.Dl ncdu
|
||||||
To scan a full filesystem, for example your root filesystem, you'll want to use
|
If you want to scan a full filesystem, for example your root filesystem, then
|
||||||
|
you'll want to use
|
||||||
.Fl x :
|
.Fl x :
|
||||||
.Dl ncdu \-x /
|
.Dl ncdu \-x /
|
||||||
.Pp
|
.Pp
|
||||||
Since scanning a large directory may take a while, you can scan a directory and
|
Since scanning a large directory may take a while, you can scan a directory and
|
||||||
export the results for later viewing:
|
export the results for later viewing:
|
||||||
.Bd -literal -offset indent
|
.Bd -literal -offset indent
|
||||||
ncdu \-1xO export.ncdu /
|
ncdu \-1xo\- / | gzip >export.gz
|
||||||
# ...some time later:
|
# ...some time later:
|
||||||
ncdu \-f export.ncdu
|
zcat export.gz | ncdu \-f\-
|
||||||
.Ed
|
.Ed
|
||||||
To export from a cron job, make sure to replace
|
To export from a cron job, make sure to replace
|
||||||
.Fl 1
|
.Fl 1
|
||||||
with
|
with
|
||||||
.Fl 0
|
.Fl 0
|
||||||
to suppress unnecessary progress output.
|
to suppress any unnecessary output.
|
||||||
.Pp
|
.Pp
|
||||||
You can also export a directory and browse it once scanning is done:
|
You can also export a directory and browse it once scanning is done:
|
||||||
.Dl ncdu \-co\- | tee export.json.zst | ./ncdu \-f\-
|
.Dl ncdu \-o\- | tee export.file | ./ncdu \-f\-
|
||||||
|
The same is possible with gzip compression, but is a bit kludgey:
|
||||||
|
.Dl ncdu \-o\- | gzip | tee export.gz | gunzip | ./ncdu \-f\-
|
||||||
.Pp
|
.Pp
|
||||||
To scan a system remotely, but browse through the files locally:
|
To scan a system remotely, but browse through the files locally:
|
||||||
.Dl ssh user@system ncdu \-co\- / | ./ncdu \-f\-
|
.Dl ssh \-C user@system ncdu \-o\- / | ./ncdu \-f\-
|
||||||
|
The
|
||||||
|
.Fl C
|
||||||
|
option to ssh enables compression, which will be very useful over slow links.
|
||||||
Remote scanning and local viewing has two major advantages when
|
Remote scanning and local viewing has two major advantages when
|
||||||
compared to running
|
compared to running
|
||||||
.Nm
|
.Nm
|
||||||
|
|
|
||||||
242
ncdubinexp.pl
Executable file
242
ncdubinexp.pl
Executable file
|
|
@ -0,0 +1,242 @@
|
||||||
|
#!/usr/bin/perl
|
||||||
|
# SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
|
||||||
|
# Usage: ncdubinexp.pl [options] <export.ncdu
|
||||||
|
# Or: ncdu -O- | ncdubinexp.pl [options]
|
||||||
|
#
|
||||||
|
# Reads and validates a binary ncdu export file and optionally prints out
|
||||||
|
# various diagnostic data and statistics.
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# blocks - print a listing of all blocks as they are read
|
||||||
|
# items - print a listing of all items as they are read
|
||||||
|
# dirs - print out dir listing stats
|
||||||
|
# stats - print some overview stats
|
||||||
|
#
|
||||||
|
# This script is highly inefficient in both RAM and CPU, not suitable for large
|
||||||
|
# exports.
|
||||||
|
# This script does not permit unknown blocks or item keys, although that is
|
||||||
|
# technically valid.
|
||||||
|
|
||||||
|
|
||||||
|
use v5.36;
|
||||||
|
use autodie;
|
||||||
|
use bytes;
|
||||||
|
no warnings 'portable';
|
||||||
|
use List::Util 'min', 'max';
|
||||||
|
use CBOR::XS; # Does not officially support recent perl versions, but it's the only CPAN module that supports streaming.
|
||||||
|
use Compress::Zstd;
|
||||||
|
|
||||||
|
my $printblocks = grep $_ eq 'blocks', @ARGV;
|
||||||
|
my $printitems = grep $_ eq 'items', @ARGV;
|
||||||
|
my $printdirs = grep $_ eq 'dirs', @ARGV;
|
||||||
|
my $printstats = grep $_ eq 'stats', @ARGV;
|
||||||
|
|
||||||
|
my %datablocks;
|
||||||
|
my %items;
|
||||||
|
my $root_itemref;
|
||||||
|
my $datablock_len = 0;
|
||||||
|
my $rawdata_len = 0;
|
||||||
|
my $minitemsperblock = 1e10;
|
||||||
|
my $maxitemsperblock = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
die "Input too short\n" if 8 != read STDIN, my $sig, 8;
|
||||||
|
die "Invalid file signature\n" if $sig ne "\xbfncduEX1";
|
||||||
|
}
|
||||||
|
|
||||||
|
my @itemkeys = qw/
|
||||||
|
type
|
||||||
|
name
|
||||||
|
prev
|
||||||
|
asize
|
||||||
|
dsize
|
||||||
|
dev
|
||||||
|
rderr
|
||||||
|
cumasize
|
||||||
|
cumdsize
|
||||||
|
shrasize
|
||||||
|
shrdsize
|
||||||
|
items
|
||||||
|
sub
|
||||||
|
ino
|
||||||
|
nlink
|
||||||
|
uid
|
||||||
|
gid
|
||||||
|
mode
|
||||||
|
mtime
|
||||||
|
/;
|
||||||
|
|
||||||
|
|
||||||
|
sub datablock($prefix, $off, $blklen, $content) {
|
||||||
|
die "$prefix: Data block too small\n" if length $content < 8;
|
||||||
|
die "$prefix: Data block too large\n" if length $content >= (1<<24);
|
||||||
|
|
||||||
|
my $num = unpack 'N', $content;
|
||||||
|
die sprintf "%s: Duplicate block id %d (first at %010x)", $prefix, $num, $datablocks{$num}>>24 if $datablocks{$num};
|
||||||
|
$datablocks{$num} = ($off << 24) | $blklen;
|
||||||
|
|
||||||
|
my $compressed = substr $content, 4;
|
||||||
|
my $rawdata = decompress($compressed);
|
||||||
|
die "$prefix: Block id $num failed decompression\n" if !defined $rawdata;
|
||||||
|
die "$prefix: Uncompressed data block size too large\n" if length $rawdata >= (1<<24);
|
||||||
|
|
||||||
|
$printblocks && printf "%s: data block %d rawlen %d (%.2f)\n", $prefix, $num, length($rawdata), length($compressed)/length($rawdata)*100;
|
||||||
|
|
||||||
|
$datablock_len += length($compressed);
|
||||||
|
$rawdata_len += length($rawdata);
|
||||||
|
|
||||||
|
cbordata($num, $rawdata);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sub fmtitem($val) {
|
||||||
|
join ' ', map "$_:$val->{$_}", grep exists $val->{$_}, @itemkeys;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sub cbordata($blknum, $data) {
|
||||||
|
my $cbor = CBOR::XS->new_safe;
|
||||||
|
my $off = 0;
|
||||||
|
my $nitems = 0;
|
||||||
|
while ($off < length $data) { # This substr madness is prolly quite slow
|
||||||
|
my($val, $len) = $cbor->decode_prefix(substr $data, $off);
|
||||||
|
my $itemref = ($blknum << 24) | $off;
|
||||||
|
$off += $len;
|
||||||
|
$nitems++;
|
||||||
|
|
||||||
|
# Basic validation of the CBOR data. Doesn't validate that every value
|
||||||
|
# has the correct CBOR type or that integers are within range.
|
||||||
|
$val = { _itemref => $itemref, map {
|
||||||
|
die sprintf "#%010x: Invalid CBOR key '%s'\n", $itemref, $_ if !/^[0-9]+$/ || !$itemkeys[$_];
|
||||||
|
my($k, $v) = ($itemkeys[$_], $val->{$_});
|
||||||
|
die sprintf "#%010x: Invalid value for key '%s': '%s'\n", $itemref, $k, $v
|
||||||
|
if ref $v eq 'ARRAY' || ref $v eq 'HASH' || !defined $v || !(
|
||||||
|
$k eq 'type' ? ($v =~ /^(-[1-4]|[0-3])$/) :
|
||||||
|
$k eq 'prev' || $k eq 'sub' || $k eq 'prevlnk' ? 1 : # itemrefs are validated separately
|
||||||
|
$k eq 'name' ? length $v :
|
||||||
|
$k eq 'rderr' ? Types::Serialiser::is_bool($v) :
|
||||||
|
/^[0-9]+$/
|
||||||
|
);
|
||||||
|
($k,$v)
|
||||||
|
} keys %$val };
|
||||||
|
|
||||||
|
$printitems && printf "#%010x: %s\n", $itemref, fmtitem $val;
|
||||||
|
$items{$itemref} = $val;
|
||||||
|
}
|
||||||
|
$minitemsperblock = $nitems if $minitemsperblock > $nitems;
|
||||||
|
$maxitemsperblock = $nitems if $maxitemsperblock < $nitems;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sub indexblock($prefix, $content) {
|
||||||
|
$printblocks && print "$prefix: index block\n";
|
||||||
|
|
||||||
|
my $maxnum = max keys %datablocks;
|
||||||
|
die "$prefix: index block size incorrect for $maxnum+1 data blocks\n" if length($content) != 8*($maxnum+1) + 8;
|
||||||
|
|
||||||
|
my @ints = unpack 'Q>*', $content;
|
||||||
|
$root_itemref = pop @ints;
|
||||||
|
|
||||||
|
for my $i (0..$#ints-1) {
|
||||||
|
if (!$datablocks{$i}) {
|
||||||
|
die "$prefix: index entry for missing block (#$i) must be 0\n" if $ints[$i] != 0;
|
||||||
|
} else {
|
||||||
|
die sprintf "%s: invalid index entry for block #%d (got %016x expected %016x)\n",
|
||||||
|
$prefix, $i, $ints[$i], $datablocks{$i}
|
||||||
|
if $ints[$i] != $datablocks{$i};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
my $off = tell STDIN;
|
||||||
|
my $prefix = sprintf '%010x', $off;
|
||||||
|
die "$prefix Input too short, expected block header\n" if 4 != read STDIN, my $blkhead, 4;
|
||||||
|
$blkhead = unpack 'N', $blkhead;
|
||||||
|
my $blkid = $blkhead >> 28;
|
||||||
|
my $blklen = $blkhead & 0x0fffffff;
|
||||||
|
|
||||||
|
$prefix .= "[$blklen]";
|
||||||
|
die "$prefix: Short read on block content\n" if $blklen - 8 != read STDIN, my $content, $blklen - 8;
|
||||||
|
die "$prefix: Input too short, expected block footer\n" if 4 != read STDIN, my $blkfoot, 4;
|
||||||
|
die "$prefix: Block footer does not match header\n" if $blkhead != unpack 'N', $blkfoot;
|
||||||
|
|
||||||
|
if ($blkid == 0) {
|
||||||
|
datablock($prefix, $off, $blklen, $content);
|
||||||
|
} elsif ($blkid == 1) {
|
||||||
|
indexblock($prefix, $content);
|
||||||
|
last;
|
||||||
|
} else {
|
||||||
|
die "$prefix Unknown block id $blkid\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
die sprintf "0x%08x: Data after index block\n", tell(STDIN) if 0 != read STDIN, my $x, 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Each item must be referenced exactly once from either a 'prev' or 'sub' key,
|
||||||
|
# $nodup verifies the "at most once" part.
|
||||||
|
sub resolve($cur, $key, $nodup) {
|
||||||
|
my $ref = exists $cur->{$key} ? $cur->{$key} : return;
|
||||||
|
my $item = $ref < 0
|
||||||
|
? ($items{ $cur->{_itemref} + $ref } || die sprintf "#%010x: Invalid relative itemref %s: %d\n", $cur->{_itemref}, $key, $ref)
|
||||||
|
: ($items{$ref} || die sprintf "#%010x: Invalid reference %s to #%010x\n", $cur->{_itemref}, $key, $ref);
|
||||||
|
die sprintf "Item #%010x referenced more than once, from #%010x and #%010x\n", $item->{_itemref}, $item->{_lastseen}, $cur->{_itemref}
|
||||||
|
if $nodup && defined $item->{_lastseen};
|
||||||
|
$item->{_lastseen} = $cur->{_itemref} if $nodup;
|
||||||
|
return $item;
|
||||||
|
}
|
||||||
|
|
||||||
|
my @dirblocks; # [ path, nitems, nblocks ]
|
||||||
|
my %dirblocks; # nblocks => ndirs
|
||||||
|
|
||||||
|
sub traverse($parent, $path) {
|
||||||
|
my $sub = resolve($parent, 'sub', 1);
|
||||||
|
my %blocks;
|
||||||
|
my $items = 0;
|
||||||
|
while ($sub) {
|
||||||
|
$items++;
|
||||||
|
$blocks{ $sub->{_itemref} >> 24 }++;
|
||||||
|
traverse($sub, "$path/$sub->{name}") if $sub->{type} == 0;
|
||||||
|
$sub = resolve($sub, 'prev', 1);
|
||||||
|
}
|
||||||
|
push @dirblocks, [ $path, $items, scalar keys %blocks ] if scalar keys %blocks > 1;
|
||||||
|
$dirblocks{ keys %blocks }++ if $items > 0;
|
||||||
|
$items && $printdirs && printf "#%010x: %d items in %d blocks (%d .. %d) %s\n",
|
||||||
|
$parent->{_itemref}, $items, scalar keys %blocks,
|
||||||
|
min(values %blocks), max(values %blocks), $path;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
{
|
||||||
|
my $root = $items{$root_itemref} || die sprintf "Invalid root itemref: %010x\n", $root_itemref;
|
||||||
|
$root->{_lastseen} = 0xffffffffff;
|
||||||
|
traverse($root, $root->{name});
|
||||||
|
|
||||||
|
my($noref) = grep !$_->{_lastseen}, values %items;
|
||||||
|
die sprintf "No reference found to #%010x\n", $noref->{_itemref} if $noref;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($printstats) {
|
||||||
|
my $nblocks = keys %datablocks;
|
||||||
|
my $nitems = keys %items;
|
||||||
|
printf " Total items: %d\n", $nitems;
|
||||||
|
printf " Total blocks: %d\n", $nblocks;
|
||||||
|
printf " Items per block: %.1f (%d .. %d)\n", $nitems / $nblocks, $minitemsperblock, $maxitemsperblock;
|
||||||
|
printf " Avg block size: %d compressed, %d raw (%.1f)\n", $datablock_len/$nblocks, $rawdata_len/$nblocks, $datablock_len/$rawdata_len*100;
|
||||||
|
printf " Avg item size: %.1f compressed, %.1f raw\n", $datablock_len/$nitems, $rawdata_len/$nitems;
|
||||||
|
|
||||||
|
@dirblocks = sort { $b->[2] <=> $a->[2] } @dirblocks;
|
||||||
|
print "\nBlocks per directory listing histogram\n";
|
||||||
|
printf " %5d %6d\n", $_, $dirblocks{$_} for sort { $a <=> $b } keys %dirblocks;
|
||||||
|
print "\nMost blocks per directory listing\n";
|
||||||
|
print " items blks path\n";
|
||||||
|
printf "%10d %4d %s\n", @{$dirblocks[$_]}[1,2,0] for (0..min 9, $#dirblocks);
|
||||||
|
}
|
||||||
|
|
@ -7,16 +7,20 @@ const model = @import("model.zig");
|
||||||
const sink = @import("sink.zig");
|
const sink = @import("sink.zig");
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
extern fn ZSTD_compress(dst: ?*anyopaque, dstCapacity: usize, src: ?*const anyopaque, srcSize: usize, compressionLevel: c_int) usize;
|
||||||
|
extern fn ZSTD_isError(code: usize) c_uint;
|
||||||
|
|
||||||
pub const global = struct {
|
pub const global = struct {
|
||||||
var fd: std.fs.File = undefined;
|
var fd: std.fs.File = undefined;
|
||||||
var index: std.ArrayListUnmanaged(u8) = .empty;
|
var index = std.ArrayList(u8).init(main.allocator);
|
||||||
var file_off: u64 = 0;
|
var file_off: u64 = 0;
|
||||||
var lock: std.Thread.Mutex = .{};
|
var lock: std.Thread.Mutex = .{};
|
||||||
var root_itemref: u64 = 0;
|
var root_itemref: u64 = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const BLOCK_SIZE: usize = 64*1024;
|
||||||
|
|
||||||
pub const SIGNATURE = "\xbfncduEX1";
|
pub const SIGNATURE = "\xbfncduEX1";
|
||||||
|
|
||||||
pub const ItemKey = enum(u5) {
|
pub const ItemKey = enum(u5) {
|
||||||
|
|
@ -49,7 +53,7 @@ pub const ItemKey = enum(u5) {
|
||||||
|
|
||||||
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
||||||
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
||||||
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).@"enum".fields.len;
|
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).Enum.fields.len;
|
||||||
|
|
||||||
pub const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
pub const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
||||||
|
|
||||||
|
|
@ -61,6 +65,9 @@ inline fn blockHeader(id: u4, len: u28) [4]u8 { return bigu32((@as(u32, id) << 2
|
||||||
|
|
||||||
inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(major)) << 5) | arg; }
|
inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(major)) << 5) | arg; }
|
||||||
|
|
||||||
|
// ZSTD_COMPRESSBOUND(), assuming input does not exceed ZSTD_MAX_INPUT_SIZE
|
||||||
|
fn compressBound(size: usize) usize { return size + (size>>8) + (if (size < (128<<10)) ((128<<10) - size) >> 11 else 0); }
|
||||||
|
|
||||||
|
|
||||||
// (Uncompressed) data block size.
|
// (Uncompressed) data block size.
|
||||||
// Start with 64k, then use increasingly larger block sizes as the export file
|
// Start with 64k, then use increasingly larger block sizes as the export file
|
||||||
|
|
@ -70,8 +77,7 @@ inline fn cborByte(major: CborMajor, arg: u5) u8 { return (@as(u8, @intFromEnum(
|
||||||
fn blockSize(num: u32) usize {
|
fn blockSize(num: u32) usize {
|
||||||
// block size uncompressed data in this num range
|
// block size uncompressed data in this num range
|
||||||
// # mil # KiB # GiB
|
// # mil # KiB # GiB
|
||||||
return main.config.export_block_size
|
return if (num < ( 1<<20)) 64<<10 // 64
|
||||||
orelse if (num < ( 1<<20)) 64<<10 // 64
|
|
||||||
else if (num < ( 2<<20)) 128<<10 // 128
|
else if (num < ( 2<<20)) 128<<10 // 128
|
||||||
else if (num < ( 4<<20)) 256<<10 // 512
|
else if (num < ( 4<<20)) 256<<10 // 512
|
||||||
else if (num < ( 8<<20)) 512<<10 // 2048
|
else if (num < ( 8<<20)) 512<<10 // 2048
|
||||||
|
|
@ -79,15 +85,10 @@ fn blockSize(num: u32) usize {
|
||||||
else 2048<<10; // 32768
|
else 2048<<10; // 32768
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upper bound on the return value of blockSize()
|
|
||||||
// (config.export_block_size may be larger than the sizes listed above, let's
|
|
||||||
// stick with the maximum block size supported by the file format to be safe)
|
|
||||||
const MAX_BLOCK_SIZE: usize = 1<<28;
|
|
||||||
|
|
||||||
|
|
||||||
pub const Thread = struct {
|
pub const Thread = struct {
|
||||||
buf: []u8 = undefined,
|
buf: []u8 = undefined,
|
||||||
off: usize = MAX_BLOCK_SIZE, // pretend we have a full block to trigger a flush() for the first write
|
off: usize = std.math.maxInt(usize) - (1<<10), // large number to trigger a flush() for the first write
|
||||||
block_num: u32 = std.math.maxInt(u32),
|
block_num: u32 = std.math.maxInt(u32),
|
||||||
itemref: u64 = 0, // ref of item currently being written
|
itemref: u64 = 0, // ref of item currently being written
|
||||||
|
|
||||||
|
|
@ -99,17 +100,17 @@ pub const Thread = struct {
|
||||||
|
|
||||||
fn compressZstd(in: []const u8, out: []u8) usize {
|
fn compressZstd(in: []const u8, out: []u8) usize {
|
||||||
while (true) {
|
while (true) {
|
||||||
const r = c.ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
const r = ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
||||||
if (c.ZSTD_isError(r) == 0) return r;
|
if (ZSTD_isError(r) == 0) return r;
|
||||||
ui.oom(); // That *ought* to be the only reason the above call can fail.
|
ui.oom(); // That *ought* to be the only reason the above call can fail.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn createBlock(t: *Thread) std.ArrayListUnmanaged(u8) {
|
fn createBlock(t: *Thread) std.ArrayList(u8) {
|
||||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
var out = std.ArrayList(u8).init(main.allocator);
|
||||||
if (t.block_num == std.math.maxInt(u32) or t.off == 0) return out;
|
if (t.block_num == std.math.maxInt(u32) or t.off == 0) return out;
|
||||||
|
|
||||||
out.ensureTotalCapacityPrecise(main.allocator, 12 + @as(usize, @intCast(c.ZSTD_COMPRESSBOUND(@as(c_int, @intCast(t.off)))))) catch unreachable;
|
out.ensureTotalCapacityPrecise(12 + compressBound(t.off)) catch unreachable;
|
||||||
out.items.len = out.capacity;
|
out.items.len = out.capacity;
|
||||||
const bodylen = compressZstd(t.buf[0..t.off], out.items[8..]);
|
const bodylen = compressZstd(t.buf[0..t.off], out.items[8..]);
|
||||||
out.items.len = 12 + bodylen;
|
out.items.len = 12 + bodylen;
|
||||||
|
|
@ -121,13 +122,13 @@ pub const Thread = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush(t: *Thread, expected_len: usize) void {
|
fn flush(t: *Thread, expected_len: usize) void {
|
||||||
@branchHint(.unlikely);
|
@setCold(true);
|
||||||
var block = createBlock(t);
|
const block = createBlock(t);
|
||||||
defer block.deinit(main.allocator);
|
defer block.deinit();
|
||||||
|
|
||||||
global.lock.lock();
|
global.lock.lock();
|
||||||
defer global.lock.unlock();
|
defer global.lock.unlock();
|
||||||
// This can only really happen when the root path exceeds our block size,
|
// This can only really happen when the root path exceeds BLOCK_SIZE,
|
||||||
// in which case we would probably have error'ed out earlier anyway.
|
// in which case we would probably have error'ed out earlier anyway.
|
||||||
if (expected_len > t.buf.len) ui.die("Error writing data: path too long.\n", .{});
|
if (expected_len > t.buf.len) ui.die("Error writing data: path too long.\n", .{});
|
||||||
|
|
||||||
|
|
@ -141,7 +142,7 @@ pub const Thread = struct {
|
||||||
|
|
||||||
t.off = 0;
|
t.off = 0;
|
||||||
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
t.block_num = @intCast((global.index.items.len - 4) / 8);
|
||||||
global.index.appendSlice(main.allocator, &[1]u8{0}**8) catch unreachable;
|
global.index.appendSlice(&[1]u8{0}**8) catch unreachable;
|
||||||
if (global.index.items.len + 12 >= (1<<28)) ui.die("Too many data blocks, please report a bug.\n", .{});
|
if (global.index.items.len + 12 >= (1<<28)) ui.die("Too many data blocks, please report a bug.\n", .{});
|
||||||
|
|
||||||
const newsize = blockSize(t.block_num);
|
const newsize = blockSize(t.block_num);
|
||||||
|
|
@ -394,7 +395,7 @@ pub const Dir = struct {
|
||||||
p.last_sub = t.itemStart(.dir, p.last_sub, name);
|
p.last_sub = t.itemStart(.dir, p.last_sub, name);
|
||||||
} else {
|
} else {
|
||||||
d.countLinks(null);
|
d.countLinks(null);
|
||||||
global.root_itemref = t.itemStart(.dir, null, name);
|
global.root_itemref = t.itemStart(.dir, 0, name);
|
||||||
}
|
}
|
||||||
d.inodes.deinit();
|
d.inodes.deinit();
|
||||||
|
|
||||||
|
|
@ -433,7 +434,7 @@ pub const Dir = struct {
|
||||||
|
|
||||||
pub fn createRoot(stat: *const sink.Stat, threads: []sink.Thread) Dir {
|
pub fn createRoot(stat: *const sink.Stat, threads: []sink.Thread) Dir {
|
||||||
for (threads) |*t| {
|
for (threads) |*t| {
|
||||||
t.sink.bin.buf = main.allocator.alloc(u8, blockSize(0)) catch unreachable;
|
t.sink.bin.buf = main.allocator.alloc(u8, BLOCK_SIZE) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
return .{ .stat = stat.* };
|
return .{ .stat = stat.* };
|
||||||
|
|
@ -447,12 +448,12 @@ pub fn done(threads: []sink.Thread) void {
|
||||||
|
|
||||||
while (std.mem.endsWith(u8, global.index.items, &[1]u8{0}**8))
|
while (std.mem.endsWith(u8, global.index.items, &[1]u8{0}**8))
|
||||||
global.index.shrinkRetainingCapacity(global.index.items.len - 8);
|
global.index.shrinkRetainingCapacity(global.index.items.len - 8);
|
||||||
global.index.appendSlice(main.allocator, &bigu64(global.root_itemref)) catch unreachable;
|
global.index.appendSlice(&bigu64(global.root_itemref)) catch unreachable;
|
||||||
global.index.appendSlice(main.allocator, &blockHeader(1, @intCast(global.index.items.len + 4))) catch unreachable;
|
global.index.appendSlice(&blockHeader(1, @intCast(global.index.items.len + 4))) catch unreachable;
|
||||||
global.index.items[0..4].* = blockHeader(1, @intCast(global.index.items.len));
|
global.index.items[0..4].* = blockHeader(1, @intCast(global.index.items.len));
|
||||||
global.fd.writeAll(global.index.items) catch |e|
|
global.fd.writeAll(global.index.items) catch |e|
|
||||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||||
global.index.clearAndFree(main.allocator);
|
global.index.clearAndFree();
|
||||||
|
|
||||||
global.fd.close();
|
global.fd.close();
|
||||||
}
|
}
|
||||||
|
|
@ -464,5 +465,5 @@ pub fn setupOutput(fd: std.fs.File) void {
|
||||||
global.file_off = 8;
|
global.file_off = 8;
|
||||||
|
|
||||||
// Placeholder for the index block header.
|
// Placeholder for the index block header.
|
||||||
global.index.appendSlice(main.allocator, "aaaa") catch unreachable;
|
global.index.appendSlice("aaaa") catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,9 @@ const util = @import("util.zig");
|
||||||
const sink = @import("sink.zig");
|
const sink = @import("sink.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const bin_export = @import("bin_export.zig");
|
const bin_export = @import("bin_export.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
extern fn ZSTD_decompress(dst: ?*anyopaque, dstCapacity: usize, src: ?*const anyopaque, compressedSize: usize) usize;
|
||||||
|
extern fn ZSTD_getFrameContentSize(src: ?*const anyopaque, srcSize: usize) c_ulonglong;
|
||||||
|
|
||||||
|
|
||||||
const CborMajor = bin_export.CborMajor;
|
const CborMajor = bin_export.CborMajor;
|
||||||
|
|
@ -63,7 +65,7 @@ inline fn bigu32(v: [4]u8) u32 { return std.mem.bigToNative(u32, @bitCast(v)); }
|
||||||
inline fn bigu64(v: [8]u8) u64 { return std.mem.bigToNative(u64, @bitCast(v)); }
|
inline fn bigu64(v: [8]u8) u64 { return std.mem.bigToNative(u64, @bitCast(v)); }
|
||||||
|
|
||||||
fn die() noreturn {
|
fn die() noreturn {
|
||||||
@branchHint(.cold);
|
@setCold(true);
|
||||||
if (global.lastitem) |e| ui.die("Error reading item {x} from file\n", .{e})
|
if (global.lastitem) |e| ui.die("Error reading item {x} from file\n", .{e})
|
||||||
else ui.die("Error reading from file\n", .{});
|
else ui.die("Error reading from file\n", .{});
|
||||||
}
|
}
|
||||||
|
|
@ -101,11 +103,11 @@ fn readBlock(num: u32) []const u8 {
|
||||||
catch |e| ui.die("Error reading from file: {s}\n", .{ui.errorString(e)});
|
catch |e| ui.die("Error reading from file: {s}\n", .{ui.errorString(e)});
|
||||||
if (rdlen != buf.len) die();
|
if (rdlen != buf.len) die();
|
||||||
|
|
||||||
const rawlen = c.ZSTD_getFrameContentSize(buf.ptr, buf.len);
|
const rawlen = ZSTD_getFrameContentSize(buf.ptr, buf.len);
|
||||||
if (rawlen <= 0 or rawlen >= (1<<24)) die();
|
if (rawlen <= 0 or rawlen >= (1<<24)) die();
|
||||||
block.data = main.allocator.alloc(u8, @intCast(rawlen)) catch unreachable;
|
block.data = main.allocator.alloc(u8, @intCast(rawlen)) catch unreachable;
|
||||||
|
|
||||||
const res = c.ZSTD_decompress(block.data.ptr, block.data.len, buf.ptr, buf.len);
|
const res = ZSTD_decompress(block.data.ptr, block.data.len, buf.ptr, buf.len);
|
||||||
if (res != block.data.len) ui.die("Error decompressing block {} (expected {} got {})\n", .{ num, block.data.len, res });
|
if (res != block.data.len) ui.die("Error decompressing block {} (expected {} got {})\n", .{ num, block.data.len, res });
|
||||||
|
|
||||||
return block.data;
|
return block.data;
|
||||||
|
|
@ -338,7 +340,7 @@ const ItemParser = struct {
|
||||||
// Skips over any fields that don't fit into an ItemKey.
|
// Skips over any fields that don't fit into an ItemKey.
|
||||||
fn next(r: *ItemParser) ?Field {
|
fn next(r: *ItemParser) ?Field {
|
||||||
while (r.key()) |k| {
|
while (r.key()) |k| {
|
||||||
if (k.major == .pos and k.arg <= std.math.maxInt(@typeInfo(ItemKey).@"enum".tag_type)) return .{
|
if (k.major == .pos and k.arg <= std.math.maxInt(@typeInfo(ItemKey).Enum.tag_type)) return .{
|
||||||
.key = @enumFromInt(k.arg),
|
.key = @enumFromInt(k.arg),
|
||||||
.val = r.r.next(),
|
.val = r.r.next(),
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -504,9 +506,7 @@ pub fn import() void {
|
||||||
pub fn open(fd: std.fs.File) !void {
|
pub fn open(fd: std.fs.File) !void {
|
||||||
global.fd = fd;
|
global.fd = fd;
|
||||||
|
|
||||||
// Do not use fd.getEndPos() because that requires newer kernels supporting statx() #261.
|
const size = try fd.getEndPos();
|
||||||
try fd.seekFromEnd(0);
|
|
||||||
const size = try fd.getPos();
|
|
||||||
if (size < 16) return error.EndOfStream;
|
if (size < 16) return error.EndOfStream;
|
||||||
|
|
||||||
// Read index block
|
// Read index block
|
||||||
|
|
|
||||||
|
|
@ -9,22 +9,22 @@ const mem_sink = @import("mem_sink.zig");
|
||||||
const bin_reader = @import("bin_reader.zig");
|
const bin_reader = @import("bin_reader.zig");
|
||||||
const delete = @import("delete.zig");
|
const delete = @import("delete.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const c = @import("c.zig").c;
|
const c = @cImport(@cInclude("time.h"));
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
|
|
||||||
// Currently opened directory.
|
// Currently opened directory.
|
||||||
pub var dir_parent: *model.Dir = undefined;
|
pub var dir_parent: *model.Dir = undefined;
|
||||||
pub var dir_path: [:0]u8 = undefined;
|
pub var dir_path: [:0]u8 = undefined;
|
||||||
var dir_parents: std.ArrayListUnmanaged(model.Ref) = .empty;
|
var dir_parents = std.ArrayList(model.Ref).init(main.allocator);
|
||||||
var dir_alloc = std.heap.ArenaAllocator.init(main.allocator);
|
var dir_alloc = std.heap.ArenaAllocator.init(main.allocator);
|
||||||
|
|
||||||
// Used to keep track of which dir is which ref, so we can enter it.
|
// Used to keep track of which dir is which ref, so we can enter it.
|
||||||
// Only used for binreader browsing.
|
// Only used for binreader browsing.
|
||||||
var dir_refs: std.ArrayListUnmanaged(struct { ptr: *model.Dir, ref: u64 }) = .empty;
|
var dir_refs = std.ArrayList(struct { ptr: *model.Dir, ref: u64 }).init(main.allocator);
|
||||||
|
|
||||||
// Sorted list of all items in the currently opened directory.
|
// Sorted list of all items in the currently opened directory.
|
||||||
// (first item may be null to indicate the "parent directory" item)
|
// (first item may be null to indicate the "parent directory" item)
|
||||||
var dir_items: std.ArrayListUnmanaged(?*model.Entry) = .empty;
|
var dir_items = std.ArrayList(?*model.Entry).init(main.allocator);
|
||||||
|
|
||||||
var dir_max_blocks: u64 = 0;
|
var dir_max_blocks: u64 = 0;
|
||||||
var dir_max_size: u64 = 0;
|
var dir_max_size: u64 = 0;
|
||||||
|
|
@ -146,7 +146,7 @@ pub fn loadDir(next_sel: u64) void {
|
||||||
dir_has_shared = false;
|
dir_has_shared = false;
|
||||||
|
|
||||||
if (dir_parents.items.len > 1)
|
if (dir_parents.items.len > 1)
|
||||||
dir_items.append(main.allocator, null) catch unreachable;
|
dir_items.append(null) catch unreachable;
|
||||||
var ref = dir_parent.sub;
|
var ref = dir_parent.sub;
|
||||||
while (!ref.isNull()) {
|
while (!ref.isNull()) {
|
||||||
const e =
|
const e =
|
||||||
|
|
@ -164,10 +164,10 @@ pub fn loadDir(next_sel: u64) void {
|
||||||
break :blk !excl and name[0] != '.' and name[name.len-1] != '~';
|
break :blk !excl and name[0] != '.' and name[name.len-1] != '~';
|
||||||
};
|
};
|
||||||
if (shown) {
|
if (shown) {
|
||||||
dir_items.append(main.allocator, e) catch unreachable;
|
dir_items.append(e) catch unreachable;
|
||||||
if (e.dir()) |d| {
|
if (e.dir()) |d| {
|
||||||
if (d.shared_blocks > 0 or d.shared_size > 0) dir_has_shared = true;
|
if (d.shared_blocks > 0 or d.shared_size > 0) dir_has_shared = true;
|
||||||
if (main.config.binreader) dir_refs.append(main.allocator, .{ .ptr = d, .ref = ref.ref }) catch unreachable;
|
if (main.config.binreader) dir_refs.append(.{ .ptr = d, .ref = ref.ref }) catch unreachable;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -185,10 +185,10 @@ pub fn initRoot() void {
|
||||||
if (main.config.binreader) {
|
if (main.config.binreader) {
|
||||||
const ref = bin_reader.getRoot();
|
const ref = bin_reader.getRoot();
|
||||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse ui.die("Invalid import\n", .{});
|
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse ui.die("Invalid import\n", .{});
|
||||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
dir_parents.append(.{ .ref = ref }) catch unreachable;
|
||||||
} else {
|
} else {
|
||||||
dir_parent = model.root;
|
dir_parent = model.root;
|
||||||
dir_parents.append(main.allocator, .{ .ptr = &dir_parent.entry }) catch unreachable;
|
dir_parents.append(.{ .ptr = &dir_parent.entry }) catch unreachable;
|
||||||
}
|
}
|
||||||
dir_path = main.allocator.dupeZ(u8, dir_parent.entry.name()) catch unreachable;
|
dir_path = main.allocator.dupeZ(u8, dir_parent.entry.name()) catch unreachable;
|
||||||
loadDir(0);
|
loadDir(0);
|
||||||
|
|
@ -202,10 +202,10 @@ fn enterSub(e: *model.Dir) void {
|
||||||
};
|
};
|
||||||
dir_parent.entry.destroy(main.allocator);
|
dir_parent.entry.destroy(main.allocator);
|
||||||
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse unreachable;
|
dir_parent = bin_reader.get(ref, main.allocator).dir() orelse unreachable;
|
||||||
dir_parents.append(main.allocator, .{ .ref = ref }) catch unreachable;
|
dir_parents.append(.{ .ref = ref }) catch unreachable;
|
||||||
} else {
|
} else {
|
||||||
dir_parent = e;
|
dir_parent = e;
|
||||||
dir_parents.append(main.allocator, .{ .ptr = &e.entry }) catch unreachable;
|
dir_parents.append(.{ .ptr = &e.entry }) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
const newpath = std.fs.path.joinZ(main.allocator, &[_][]const u8{ dir_path, e.entry.name() }) catch unreachable;
|
const newpath = std.fs.path.joinZ(main.allocator, &[_][]const u8{ dir_path, e.entry.name() }) catch unreachable;
|
||||||
|
|
@ -430,7 +430,7 @@ const info = struct {
|
||||||
|
|
||||||
var tab: Tab = .info;
|
var tab: Tab = .info;
|
||||||
var entry: ?*model.Entry = null;
|
var entry: ?*model.Entry = null;
|
||||||
var links: ?std.ArrayListUnmanaged(*model.Link) = null;
|
var links: ?std.ArrayList(*model.Link) = null;
|
||||||
var links_top: usize = 0;
|
var links_top: usize = 0;
|
||||||
var links_idx: usize = 0;
|
var links_idx: usize = 0;
|
||||||
|
|
||||||
|
|
@ -445,7 +445,7 @@ const info = struct {
|
||||||
// Set the displayed entry to the currently selected item and open the tab.
|
// Set the displayed entry to the currently selected item and open the tab.
|
||||||
fn set(e: ?*model.Entry, t: Tab) void {
|
fn set(e: ?*model.Entry, t: Tab) void {
|
||||||
if (e != entry) {
|
if (e != entry) {
|
||||||
if (links) |*l| l.deinit(main.allocator);
|
if (links) |*l| l.deinit();
|
||||||
links = null;
|
links = null;
|
||||||
links_top = 0;
|
links_top = 0;
|
||||||
links_idx = 0;
|
links_idx = 0;
|
||||||
|
|
@ -458,10 +458,10 @@ const info = struct {
|
||||||
state = .info;
|
state = .info;
|
||||||
tab = t;
|
tab = t;
|
||||||
if (tab == .links and links == null and !main.config.binreader) {
|
if (tab == .links and links == null and !main.config.binreader) {
|
||||||
var list: std.ArrayListUnmanaged(*model.Link) = .empty;
|
var list = std.ArrayList(*model.Link).init(main.allocator);
|
||||||
var l = e.?.link().?;
|
var l = e.?.link().?;
|
||||||
while (true) {
|
while (true) {
|
||||||
list.append(main.allocator, l) catch unreachable;
|
list.append(l) catch unreachable;
|
||||||
l = l.next;
|
l = l.next;
|
||||||
if (&l.entry == e)
|
if (&l.entry == e)
|
||||||
break;
|
break;
|
||||||
|
|
@ -644,8 +644,8 @@ const info = struct {
|
||||||
fn keyInput(ch: i32) bool {
|
fn keyInput(ch: i32) bool {
|
||||||
if (entry.?.pack.etype == .link) {
|
if (entry.?.pack.etype == .link) {
|
||||||
switch (ch) {
|
switch (ch) {
|
||||||
'1', 'h', c.KEY_LEFT => { set(entry, .info); return true; },
|
'1', 'h', ui.c.KEY_LEFT => { set(entry, .info); return true; },
|
||||||
'2', 'l', c.KEY_RIGHT => { set(entry, .links); return true; },
|
'2', 'l', ui.c.KEY_RIGHT => { set(entry, .links); return true; },
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -802,9 +802,9 @@ const help = struct {
|
||||||
'1' => tab = .keys,
|
'1' => tab = .keys,
|
||||||
'2' => tab = .flags,
|
'2' => tab = .flags,
|
||||||
'3' => tab = .about,
|
'3' => tab = .about,
|
||||||
'h', c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
'h', ui.c.KEY_LEFT => tab = if (tab == .about) .flags else .keys,
|
||||||
'l', c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
'l', ui.c.KEY_RIGHT => tab = if (tab == .keys) .flags else .about,
|
||||||
'j', ' ', c.KEY_DOWN, c.KEY_NPAGE => {
|
'j', ' ', ui.c.KEY_DOWN, ui.c.KEY_NPAGE => {
|
||||||
const max = switch (tab) {
|
const max = switch (tab) {
|
||||||
.keys => keys.len/2 - keylines,
|
.keys => keys.len/2 - keylines,
|
||||||
else => @as(u32, 0),
|
else => @as(u32, 0),
|
||||||
|
|
@ -812,7 +812,7 @@ const help = struct {
|
||||||
if (offset < max)
|
if (offset < max)
|
||||||
offset += 1;
|
offset += 1;
|
||||||
},
|
},
|
||||||
'k', c.KEY_UP, c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
'k', ui.c.KEY_UP, ui.c.KEY_PPAGE => { if (offset > 0) offset -= 1; },
|
||||||
else => state = .main,
|
else => state = .main,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -917,16 +917,16 @@ fn sortToggle(col: main.config.SortCol, default_order: main.config.SortOrder) vo
|
||||||
|
|
||||||
fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool {
|
fn keyInputSelection(ch: i32, idx: *usize, len: usize, page: u32) bool {
|
||||||
switch (ch) {
|
switch (ch) {
|
||||||
'j', c.KEY_DOWN => {
|
'j', ui.c.KEY_DOWN => {
|
||||||
if (idx.*+1 < len) idx.* += 1;
|
if (idx.*+1 < len) idx.* += 1;
|
||||||
},
|
},
|
||||||
'k', c.KEY_UP => {
|
'k', ui.c.KEY_UP => {
|
||||||
if (idx.* > 0) idx.* -= 1;
|
if (idx.* > 0) idx.* -= 1;
|
||||||
},
|
},
|
||||||
c.KEY_HOME => idx.* = 0,
|
ui.c.KEY_HOME => idx.* = 0,
|
||||||
c.KEY_END, c.KEY_LL => idx.* = len -| 1,
|
ui.c.KEY_END, ui.c.KEY_LL => idx.* = len -| 1,
|
||||||
c.KEY_PPAGE => idx.* = idx.* -| page,
|
ui.c.KEY_PPAGE => idx.* = idx.* -| page,
|
||||||
c.KEY_NPAGE => idx.* = @min(len -| 1, idx.* + page),
|
ui.c.KEY_NPAGE => idx.* = @min(len -| 1, idx.* + page),
|
||||||
else => return false,
|
else => return false,
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|
@ -1017,7 +1017,7 @@ pub fn keyInput(ch: i32) void {
|
||||||
},
|
},
|
||||||
|
|
||||||
// Navigation
|
// Navigation
|
||||||
10, 'l', c.KEY_RIGHT => {
|
10, 'l', ui.c.KEY_RIGHT => {
|
||||||
if (dir_items.items.len == 0) {
|
if (dir_items.items.len == 0) {
|
||||||
} else if (dir_items.items[cursor_idx]) |e| {
|
} else if (dir_items.items[cursor_idx]) |e| {
|
||||||
if (e.dir()) |d| {
|
if (e.dir()) |d| {
|
||||||
|
|
@ -1032,7 +1032,7 @@ pub fn keyInput(ch: i32) void {
|
||||||
state = .main;
|
state = .main;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'h', '<', c.KEY_BACKSPACE, c.KEY_LEFT => {
|
'h', '<', ui.c.KEY_BACKSPACE, ui.c.KEY_LEFT => {
|
||||||
if (dir_parents.items.len > 1) {
|
if (dir_parents.items.len > 1) {
|
||||||
//const h = dir_parent.entry.nameHash();
|
//const h = dir_parent.entry.nameHash();
|
||||||
enterParent();
|
enterParent();
|
||||||
|
|
|
||||||
20
src/c.zig
20
src/c.zig
|
|
@ -1,20 +0,0 @@
|
||||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
pub const c = @cImport({
|
|
||||||
@cDefine("_XOPEN_SOURCE", "1"); // for wcwidth()
|
|
||||||
@cInclude("stdio.h"); // fopen(), used to initialize ncurses
|
|
||||||
@cInclude("string.h"); // strerror()
|
|
||||||
@cInclude("time.h"); // strftime()
|
|
||||||
@cInclude("wchar.h"); // wcwidth()
|
|
||||||
@cInclude("locale.h"); // setlocale() and localeconv()
|
|
||||||
@cInclude("fnmatch.h"); // fnmatch()
|
|
||||||
@cInclude("unistd.h"); // getuid()
|
|
||||||
@cInclude("sys/types.h"); // struct passwd
|
|
||||||
@cInclude("pwd.h"); // getpwnam(), getpwuid()
|
|
||||||
if (@import("builtin").os.tag == .linux) {
|
|
||||||
@cInclude("sys/vfs.h"); // statfs()
|
|
||||||
}
|
|
||||||
@cInclude("curses.h");
|
|
||||||
@cInclude("zstd.h");
|
|
||||||
});
|
|
||||||
140
src/delete.zig
140
src/delete.zig
|
|
@ -6,11 +6,7 @@ const main = @import("main.zig");
|
||||||
const model = @import("model.zig");
|
const model = @import("model.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const browser = @import("browser.zig");
|
const browser = @import("browser.zig");
|
||||||
const scan = @import("scan.zig");
|
|
||||||
const sink = @import("sink.zig");
|
|
||||||
const mem_sink = @import("mem_sink.zig");
|
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
|
||||||
var parent: *model.Dir = undefined;
|
var parent: *model.Dir = undefined;
|
||||||
var entry: *model.Entry = undefined;
|
var entry: *model.Entry = undefined;
|
||||||
|
|
@ -71,57 +67,6 @@ fn deleteItem(dir: std.fs.Dir, path: [:0]const u8, ptr: *align(1) ?*model.Entry)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the item has been deleted successfully.
|
|
||||||
fn deleteCmd(path: [:0]const u8, ptr: *align(1) ?*model.Entry) bool {
|
|
||||||
{
|
|
||||||
var env = std.process.getEnvMap(main.allocator) catch unreachable;
|
|
||||||
defer env.deinit();
|
|
||||||
env.put("NCDU_DELETE_PATH", path) catch unreachable;
|
|
||||||
|
|
||||||
// Since we're passing the path as an environment variable and go through
|
|
||||||
// the shell anyway, we can refer to the variable and avoid error-prone
|
|
||||||
// shell escaping.
|
|
||||||
const cmd = std.fmt.allocPrint(main.allocator, "{s} \"$NCDU_DELETE_PATH\"", .{main.config.delete_command}) catch unreachable;
|
|
||||||
defer main.allocator.free(cmd);
|
|
||||||
ui.runCmd(&.{"/bin/sh", "-c", cmd}, null, &env, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
const stat = scan.statAt(std.fs.cwd(), path, false, null) catch {
|
|
||||||
// Stat failed. Would be nice to display an error if it's not
|
|
||||||
// 'FileNotFound', but w/e, let's just assume the item has been
|
|
||||||
// deleted as expected.
|
|
||||||
ptr.*.?.zeroStats(parent);
|
|
||||||
ptr.* = ptr.*.?.next.ptr;
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
// If either old or new entry is not a dir, remove & re-add entry in the in-memory tree.
|
|
||||||
if (ptr.*.?.pack.etype != .dir or stat.etype != .dir) {
|
|
||||||
ptr.*.?.zeroStats(parent);
|
|
||||||
const e = model.Entry.create(main.allocator, stat.etype, main.config.extended and !stat.ext.isEmpty(), ptr.*.?.name());
|
|
||||||
e.next.ptr = ptr.*.?.next.ptr;
|
|
||||||
mem_sink.statToEntry(&stat, e, parent);
|
|
||||||
ptr.* = e;
|
|
||||||
|
|
||||||
var it : ?*model.Dir = parent;
|
|
||||||
while (it) |p| : (it = p.parent) {
|
|
||||||
if (stat.etype != .link) {
|
|
||||||
p.entry.pack.blocks +|= e.pack.blocks;
|
|
||||||
p.entry.size +|= e.size;
|
|
||||||
}
|
|
||||||
p.items +|= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If new entry is a dir, recursively scan.
|
|
||||||
if (ptr.*.?.dir()) |d| {
|
|
||||||
main.state = .refresh;
|
|
||||||
sink.global.sink = .mem;
|
|
||||||
mem_sink.global.root = d;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the item that should be selected in the browser.
|
// Returns the item that should be selected in the browser.
|
||||||
pub fn delete() ?*model.Entry {
|
pub fn delete() ?*model.Entry {
|
||||||
while (main.state == .delete and state == .confirm)
|
while (main.state == .delete and state == .confirm)
|
||||||
|
|
@ -136,46 +81,30 @@ pub fn delete() ?*model.Entry {
|
||||||
if (it.* == entry)
|
if (it.* == entry)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
var path = std.ArrayList(u8).init(main.allocator);
|
||||||
defer path.deinit(main.allocator);
|
defer path.deinit();
|
||||||
parent.fmtPath(main.allocator, true, &path);
|
parent.fmtPath(true, &path);
|
||||||
if (path.items.len == 0 or path.items[path.items.len-1] != '/')
|
if (path.items.len == 0 or path.items[path.items.len-1] != '/')
|
||||||
path.append(main.allocator, '/') catch unreachable;
|
path.append('/') catch unreachable;
|
||||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
path.appendSlice(entry.name()) catch unreachable;
|
||||||
|
|
||||||
if (main.config.delete_command.len == 0) {
|
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path), it);
|
||||||
_ = deleteItem(std.fs.cwd(), util.arrayListBufZ(&path, main.allocator), it);
|
model.inodes.addAllStats();
|
||||||
model.inodes.addAllStats();
|
return if (it.* == e) e else next_sel;
|
||||||
return if (it.* == e) e else next_sel;
|
|
||||||
} else {
|
|
||||||
const isdel = deleteCmd(util.arrayListBufZ(&path, main.allocator), it);
|
|
||||||
model.inodes.addAllStats();
|
|
||||||
return if (isdel) next_sel else it.*;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drawConfirm() void {
|
fn drawConfirm() void {
|
||||||
browser.draw();
|
browser.draw();
|
||||||
const box = ui.Box.create(6, 60, "Confirm delete");
|
const box = ui.Box.create(6, 60, "Confirm delete");
|
||||||
box.move(1, 2);
|
box.move(1, 2);
|
||||||
if (main.config.delete_command.len == 0) {
|
ui.addstr("Are you sure you want to delete \"");
|
||||||
ui.addstr("Are you sure you want to delete \"");
|
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 21));
|
||||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 21));
|
ui.addch('"');
|
||||||
ui.addch('"');
|
if (entry.pack.etype != .dir)
|
||||||
if (entry.pack.etype != .dir)
|
ui.addch('?')
|
||||||
ui.addch('?')
|
else {
|
||||||
else {
|
box.move(2, 18);
|
||||||
box.move(2, 18);
|
ui.addstr("and all of its contents?");
|
||||||
ui.addstr("and all of its contents?");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ui.addstr("Are you sure you want to run \"");
|
|
||||||
ui.addstr(ui.shorten(ui.toUtf8(main.config.delete_command), 25));
|
|
||||||
ui.addch('"');
|
|
||||||
box.move(2, 4);
|
|
||||||
ui.addstr("on \"");
|
|
||||||
ui.addstr(ui.shorten(ui.toUtf8(entry.name()), 50));
|
|
||||||
ui.addch('"');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
box.move(4, 15);
|
box.move(4, 15);
|
||||||
|
|
@ -189,25 +118,20 @@ fn drawConfirm() void {
|
||||||
box.move(4, 31);
|
box.move(4, 31);
|
||||||
ui.style(if (confirm == .ignore) .sel else .default);
|
ui.style(if (confirm == .ignore) .sel else .default);
|
||||||
ui.addstr("don't ask me again");
|
ui.addstr("don't ask me again");
|
||||||
box.move(4, switch (confirm) {
|
|
||||||
.yes => 15,
|
|
||||||
.no => 25,
|
|
||||||
.ignore => 31
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drawProgress() void {
|
fn drawProgress() void {
|
||||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
var path = std.ArrayList(u8).init(main.allocator);
|
||||||
defer path.deinit(main.allocator);
|
defer path.deinit();
|
||||||
parent.fmtPath(main.allocator, false, &path);
|
parent.fmtPath(false, &path);
|
||||||
path.append(main.allocator, '/') catch unreachable;
|
path.append('/') catch unreachable;
|
||||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
path.appendSlice(entry.name()) catch unreachable;
|
||||||
|
|
||||||
// TODO: Item counts and progress bar would be nice.
|
// TODO: Item counts and progress bar would be nice.
|
||||||
|
|
||||||
const box = ui.Box.create(6, 60, "Deleting...");
|
const box = ui.Box.create(6, 60, "Deleting...");
|
||||||
box.move(2, 2);
|
box.move(2, 2);
|
||||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 56));
|
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 56));
|
||||||
box.move(4, 41);
|
box.move(4, 41);
|
||||||
ui.addstr("Press ");
|
ui.addstr("Press ");
|
||||||
ui.style(.key);
|
ui.style(.key);
|
||||||
|
|
@ -217,16 +141,16 @@ fn drawProgress() void {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drawErr() void {
|
fn drawErr() void {
|
||||||
var path: std.ArrayListUnmanaged(u8) = .empty;
|
var path = std.ArrayList(u8).init(main.allocator);
|
||||||
defer path.deinit(main.allocator);
|
defer path.deinit();
|
||||||
parent.fmtPath(main.allocator, false, &path);
|
parent.fmtPath(false, &path);
|
||||||
path.append(main.allocator, '/') catch unreachable;
|
path.append('/') catch unreachable;
|
||||||
path.appendSlice(main.allocator, entry.name()) catch unreachable;
|
path.appendSlice(entry.name()) catch unreachable;
|
||||||
|
|
||||||
const box = ui.Box.create(6, 60, "Error");
|
const box = ui.Box.create(6, 60, "Error");
|
||||||
box.move(1, 2);
|
box.move(1, 2);
|
||||||
ui.addstr("Error deleting ");
|
ui.addstr("Error deleting ");
|
||||||
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path, main.allocator)), 41));
|
ui.addstr(ui.shorten(ui.toUtf8(util.arrayListBufZ(&path)), 41));
|
||||||
box.move(2, 4);
|
box.move(2, 4);
|
||||||
ui.addstr(ui.errorString(error_code));
|
ui.addstr(ui.errorString(error_code));
|
||||||
|
|
||||||
|
|
@ -254,11 +178,11 @@ pub fn draw() void {
|
||||||
pub fn keyInput(ch: i32) void {
|
pub fn keyInput(ch: i32) void {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
.confirm => switch (ch) {
|
.confirm => switch (ch) {
|
||||||
'h', c.KEY_LEFT => confirm = switch (confirm) {
|
'h', ui.c.KEY_LEFT => confirm = switch (confirm) {
|
||||||
.ignore => .no,
|
.ignore => .no,
|
||||||
else => .yes,
|
else => .yes,
|
||||||
},
|
},
|
||||||
'l', c.KEY_RIGHT => confirm = switch (confirm) {
|
'l', ui.c.KEY_RIGHT => confirm = switch (confirm) {
|
||||||
.yes => .no,
|
.yes => .no,
|
||||||
else => .ignore,
|
else => .ignore,
|
||||||
},
|
},
|
||||||
|
|
@ -278,11 +202,11 @@ pub fn keyInput(ch: i32) void {
|
||||||
main.state = .browse;
|
main.state = .browse;
|
||||||
},
|
},
|
||||||
.err => switch (ch) {
|
.err => switch (ch) {
|
||||||
'h', c.KEY_LEFT => error_option = switch (error_option) {
|
'h', ui.c.KEY_LEFT => error_option = switch (error_option) {
|
||||||
.all => .ignore,
|
.all => .ignore,
|
||||||
else => .abort,
|
else => .abort,
|
||||||
},
|
},
|
||||||
'l', c.KEY_RIGHT => error_option = switch (error_option) {
|
'l', ui.c.KEY_RIGHT => error_option = switch (error_option) {
|
||||||
.abort => .ignore,
|
.abort => .ignore,
|
||||||
else => .all,
|
else => .all,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const main = @import("main.zig");
|
const main = @import("main.zig");
|
||||||
const c = @import("c.zig").c;
|
const c = @cImport(@cInclude("fnmatch.h"));
|
||||||
|
|
||||||
// Reference:
|
// Reference:
|
||||||
// https://manned.org/glob.7
|
// https://manned.org/glob.7
|
||||||
|
|
@ -123,7 +123,7 @@ test "parse" {
|
||||||
fn PatternList(comptime withsub: bool) type {
|
fn PatternList(comptime withsub: bool) type {
|
||||||
return struct {
|
return struct {
|
||||||
literals: std.HashMapUnmanaged(*const Pattern, Val, Ctx, 80) = .{},
|
literals: std.HashMapUnmanaged(*const Pattern, Val, Ctx, 80) = .{},
|
||||||
wild: std.ArrayListUnmanaged(*const Pattern) = .empty,
|
wild: std.ArrayListUnmanaged(*const Pattern) = .{},
|
||||||
|
|
||||||
// Not a fan of the map-of-arrays approach in the 'withsub' case, it
|
// Not a fan of the map-of-arrays approach in the 'withsub' case, it
|
||||||
// has a lot of extra allocations. Linking the Patterns together in a
|
// has a lot of extra allocations. Linking the Patterns together in a
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ const model = @import("model.zig");
|
||||||
const sink = @import("sink.zig");
|
const sink = @import("sink.zig");
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
|
||||||
// JSON output is necessarily single-threaded and items MUST be added depth-first.
|
// JSON output is necessarily single-threaded and items MUST be added depth-first.
|
||||||
|
|
||||||
|
|
@ -15,55 +14,8 @@ pub const global = struct {
|
||||||
var writer: *Writer = undefined;
|
var writer: *Writer = undefined;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
const ZstdWriter = struct {
|
|
||||||
ctx: ?*c.ZSTD_CStream,
|
|
||||||
out: c.ZSTD_outBuffer,
|
|
||||||
outbuf: [c.ZSTD_BLOCKSIZE_MAX + 64]u8,
|
|
||||||
|
|
||||||
fn create() *ZstdWriter {
|
|
||||||
const w = main.allocator.create(ZstdWriter) catch unreachable;
|
|
||||||
w.out = .{
|
|
||||||
.dst = &w.outbuf,
|
|
||||||
.size = w.outbuf.len,
|
|
||||||
.pos = 0,
|
|
||||||
};
|
|
||||||
while (true) {
|
|
||||||
w.ctx = c.ZSTD_createCStream();
|
|
||||||
if (w.ctx != null) break;
|
|
||||||
ui.oom();
|
|
||||||
}
|
|
||||||
_ = c.ZSTD_CCtx_setParameter(w.ctx, c.ZSTD_c_compressionLevel, main.config.complevel);
|
|
||||||
return w;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn destroy(w: *ZstdWriter) void {
|
|
||||||
_ = c.ZSTD_freeCStream(w.ctx);
|
|
||||||
main.allocator.destroy(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(w: *ZstdWriter, f: std.fs.File, in: []const u8, flush: bool) !void {
|
|
||||||
var arg = c.ZSTD_inBuffer{
|
|
||||||
.src = in.ptr,
|
|
||||||
.size = in.len,
|
|
||||||
.pos = 0,
|
|
||||||
};
|
|
||||||
while (true) {
|
|
||||||
const v = c.ZSTD_compressStream2(w.ctx, &w.out, &arg, if (flush) c.ZSTD_e_end else c.ZSTD_e_continue);
|
|
||||||
if (c.ZSTD_isError(v) != 0) return error.ZstdCompressError;
|
|
||||||
if (flush or w.out.pos > w.outbuf.len / 2) {
|
|
||||||
try f.writeAll(w.outbuf[0..w.out.pos]);
|
|
||||||
w.out.pos = 0;
|
|
||||||
}
|
|
||||||
if (!flush and arg.pos == arg.size) break;
|
|
||||||
if (flush and v == 0) break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Writer = struct {
|
pub const Writer = struct {
|
||||||
fd: std.fs.File,
|
fd: std.fs.File,
|
||||||
zstd: ?*ZstdWriter = null,
|
|
||||||
// Must be large enough to hold PATH_MAX*6 plus some overhead.
|
// Must be large enough to hold PATH_MAX*6 plus some overhead.
|
||||||
// (The 6 is because, in the worst case, every byte expands to a "\u####"
|
// (The 6 is because, in the worst case, every byte expands to a "\u####"
|
||||||
// escape, and we do pessimistic estimates here in order to avoid checking
|
// escape, and we do pessimistic estimates here in order to avoid checking
|
||||||
|
|
@ -73,12 +25,11 @@ pub const Writer = struct {
|
||||||
dir_entry_open: bool = false,
|
dir_entry_open: bool = false,
|
||||||
|
|
||||||
fn flush(ctx: *Writer, bytes: usize) void {
|
fn flush(ctx: *Writer, bytes: usize) void {
|
||||||
@branchHint(.unlikely);
|
@setCold(true);
|
||||||
// This can only really happen when the root path exceeds PATH_MAX,
|
// This can only really happen when the root path exceeds PATH_MAX,
|
||||||
// in which case we would probably have error'ed out earlier anyway.
|
// in which case we would probably have error'ed out earlier anyway.
|
||||||
if (bytes > ctx.buf.len) ui.die("Error writing JSON export: path too long.\n", .{});
|
if (bytes > ctx.buf.len) ui.die("Error writing JSON export: path too long.\n", .{});
|
||||||
const buf = ctx.buf[0..ctx.off];
|
ctx.fd.writeAll(ctx.buf[0..ctx.off]) catch |e|
|
||||||
(if (ctx.zstd) |z| z.write(ctx.fd, buf, bytes == 0) else ctx.fd.writeAll(buf)) catch |e|
|
|
||||||
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
|
||||||
ctx.off = 0;
|
ctx.off = 0;
|
||||||
}
|
}
|
||||||
|
|
@ -126,14 +77,14 @@ pub const Writer = struct {
|
||||||
var index: usize = buf.len;
|
var index: usize = buf.len;
|
||||||
while (a >= 100) : (a = @divTrunc(a, 100)) {
|
while (a >= 100) : (a = @divTrunc(a, 100)) {
|
||||||
index -= 2;
|
index -= 2;
|
||||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a % 100)));
|
buf[index..][0..2].* = std.fmt.digits2(@as(usize, @intCast(a % 100)));
|
||||||
}
|
}
|
||||||
if (a < 10) {
|
if (a < 10) {
|
||||||
index -= 1;
|
index -= 1;
|
||||||
buf[index] = '0' + @as(u8, @intCast(a));
|
buf[index] = '0' + @as(u8, @intCast(a));
|
||||||
} else {
|
} else {
|
||||||
index -= 2;
|
index -= 2;
|
||||||
buf[index..][0..2].* = std.fmt.digits2(@as(u8, @intCast(a)));
|
buf[index..][0..2].* = std.fmt.digits2(@as(usize, @intCast(a)));
|
||||||
}
|
}
|
||||||
ctx.write(buf[index..]);
|
ctx.write(buf[index..]);
|
||||||
}
|
}
|
||||||
|
|
@ -141,7 +92,6 @@ pub const Writer = struct {
|
||||||
fn init(out: std.fs.File) *Writer {
|
fn init(out: std.fs.File) *Writer {
|
||||||
var ctx = main.allocator.create(Writer) catch unreachable;
|
var ctx = main.allocator.create(Writer) catch unreachable;
|
||||||
ctx.* = .{ .fd = out };
|
ctx.* = .{ .fd = out };
|
||||||
if (main.config.compress) ctx.zstd = ZstdWriter.create();
|
|
||||||
ctx.write("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
ctx.write("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
|
||||||
ctx.writeUint(@intCast(@max(0, std.time.timestamp())));
|
ctx.writeUint(@intCast(@max(0, std.time.timestamp())));
|
||||||
ctx.writeByte('}');
|
ctx.writeByte('}');
|
||||||
|
|
@ -260,7 +210,6 @@ pub fn createRoot(path: []const u8, stat: *const sink.Stat) Dir {
|
||||||
pub fn done() void {
|
pub fn done() void {
|
||||||
global.writer.write("]\n");
|
global.writer.write("]\n");
|
||||||
global.writer.flush(0);
|
global.writer.flush(0);
|
||||||
if (global.writer.zstd) |z| z.destroy();
|
|
||||||
global.writer.fd.close();
|
global.writer.fd.close();
|
||||||
main.allocator.destroy(global.writer);
|
main.allocator.destroy(global.writer);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,54 +7,6 @@ const util = @import("util.zig");
|
||||||
const model = @import("model.zig");
|
const model = @import("model.zig");
|
||||||
const sink = @import("sink.zig");
|
const sink = @import("sink.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
|
||||||
|
|
||||||
const ZstdReader = struct {
|
|
||||||
ctx: ?*c.ZSTD_DStream,
|
|
||||||
in: c.ZSTD_inBuffer,
|
|
||||||
lastret: usize = 0,
|
|
||||||
inbuf: [c.ZSTD_BLOCKSIZE_MAX + 16]u8, // This ZSTD_DStreamInSize() + a little bit extra
|
|
||||||
|
|
||||||
fn create(head: []const u8) *ZstdReader {
|
|
||||||
const r = main.allocator.create(ZstdReader) catch unreachable;
|
|
||||||
@memcpy(r.inbuf[0..head.len], head);
|
|
||||||
r.in = .{
|
|
||||||
.src = &r.inbuf,
|
|
||||||
.size = head.len,
|
|
||||||
.pos = 0,
|
|
||||||
};
|
|
||||||
while (true) {
|
|
||||||
r.ctx = c.ZSTD_createDStream();
|
|
||||||
if (r.ctx != null) break;
|
|
||||||
ui.oom();
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn destroy(r: *ZstdReader) void {
|
|
||||||
_ = c.ZSTD_freeDStream(r.ctx);
|
|
||||||
main.allocator.destroy(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read(r: *ZstdReader, f: std.fs.File, out: []u8) !usize {
|
|
||||||
while (true) {
|
|
||||||
if (r.in.size == r.in.pos) {
|
|
||||||
r.in.pos = 0;
|
|
||||||
r.in.size = try f.read(&r.inbuf);
|
|
||||||
if (r.in.size == 0) {
|
|
||||||
if (r.lastret == 0) return 0;
|
|
||||||
return error.ZstdDecompressError; // Early EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var arg = c.ZSTD_outBuffer{ .dst = out.ptr, .size = out.len, .pos = 0 };
|
|
||||||
r.lastret = c.ZSTD_decompressStream(r.ctx, &arg, &r.in);
|
|
||||||
if (c.ZSTD_isError(r.lastret) != 0) return error.ZstdDecompressError;
|
|
||||||
if (arg.pos > 0) return arg.pos;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// Using a custom JSON parser here because, while std.json is great, it does
|
// Using a custom JSON parser here because, while std.json is great, it does
|
||||||
|
|
@ -64,12 +16,11 @@ const ZstdReader = struct {
|
||||||
|
|
||||||
const Parser = struct {
|
const Parser = struct {
|
||||||
rd: std.fs.File,
|
rd: std.fs.File,
|
||||||
zstd: ?*ZstdReader = null,
|
|
||||||
rdoff: usize = 0,
|
rdoff: usize = 0,
|
||||||
rdsize: usize = 0,
|
rdsize: usize = 0,
|
||||||
byte: u64 = 1,
|
byte: u64 = 1,
|
||||||
line: u64 = 1,
|
line: u64 = 1,
|
||||||
buf: [129*1024]u8 = undefined,
|
buf: [16*1024]u8 = undefined,
|
||||||
|
|
||||||
fn die(p: *Parser, str: []const u8) noreturn {
|
fn die(p: *Parser, str: []const u8) noreturn {
|
||||||
ui.die("Error importing file on line {}:{}: {s}.\n", .{ p.line, p.byte, str });
|
ui.die("Error importing file on line {}:{}: {s}.\n", .{ p.line, p.byte, str });
|
||||||
|
|
@ -83,11 +34,11 @@ const Parser = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fill(p: *Parser) void {
|
fn fill(p: *Parser) void {
|
||||||
|
@setCold(true);
|
||||||
p.rdoff = 0;
|
p.rdoff = 0;
|
||||||
p.rdsize = (if (p.zstd) |z| z.read(p.rd, &p.buf) else p.rd.read(&p.buf)) catch |e| switch (e) {
|
p.rdsize = p.rd.read(&p.buf) catch |e| switch (e) {
|
||||||
error.IsDir => p.die("not a file"), // should be detected at open() time, but no flag for that...
|
error.IsDir => p.die("not a file"), // should be detected at open() time, but no flag for that...
|
||||||
error.SystemResources => p.die("out of memory"),
|
error.SystemResources => p.die("out of memory"),
|
||||||
error.ZstdDecompressError => p.die("decompression error"),
|
|
||||||
else => p.die("I/O error"),
|
else => p.die("I/O error"),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -97,7 +48,6 @@ const Parser = struct {
|
||||||
// (Returning a '?u8' here is nicer but kills performance by about +30%)
|
// (Returning a '?u8' here is nicer but kills performance by about +30%)
|
||||||
fn nextByte(p: *Parser) u8 {
|
fn nextByte(p: *Parser) u8 {
|
||||||
if (p.rdoff == p.rdsize) {
|
if (p.rdoff == p.rdsize) {
|
||||||
@branchHint(.unlikely);
|
|
||||||
p.fill();
|
p.fill();
|
||||||
if (p.rdsize == 0) return 0;
|
if (p.rdsize == 0) return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -133,7 +83,7 @@ const Parser = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stringContentSlow(p: *Parser, buf: []u8, head: u8, off: usize) []u8 {
|
fn stringContentSlow(p: *Parser, buf: []u8, head: u8, off: usize) []u8 {
|
||||||
@branchHint(.unlikely);
|
@setCold(true);
|
||||||
var b = head;
|
var b = head;
|
||||||
var n = off;
|
var n = off;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
|
@ -149,16 +99,9 @@ const Parser = struct {
|
||||||
'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
|
'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
|
||||||
't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
|
't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
|
||||||
'u' => {
|
'u' => {
|
||||||
const first = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
const char = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
||||||
var unit = @as(u21, first);
|
|
||||||
if (std.unicode.utf16IsLowSurrogate(first)) p.die("Unexpected low surrogate");
|
|
||||||
if (std.unicode.utf16IsHighSurrogate(first)) {
|
|
||||||
p.expectLit("\\u");
|
|
||||||
const second = (p.hexdig()<<12) + (p.hexdig()<<8) + (p.hexdig()<<4) + p.hexdig();
|
|
||||||
unit = std.unicode.utf16DecodeSurrogatePair(&.{first, second}) catch p.die("Invalid low surrogate");
|
|
||||||
}
|
|
||||||
if (n + 6 < buf.len)
|
if (n + 6 < buf.len)
|
||||||
n += std.unicode.utf8Encode(unit, buf[n..n+5]) catch unreachable;
|
n += std.unicode.utf8Encode(char, buf[n..n+5]) catch unreachable;
|
||||||
},
|
},
|
||||||
else => p.die("invalid escape sequence"),
|
else => p.die("invalid escape sequence"),
|
||||||
},
|
},
|
||||||
|
|
@ -532,15 +475,8 @@ pub fn import(fd: std.fs.File, head: []const u8) void {
|
||||||
const sink_threads = sink.createThreads(1);
|
const sink_threads = sink.createThreads(1);
|
||||||
defer sink.done();
|
defer sink.done();
|
||||||
|
|
||||||
var p = Parser{.rd = fd};
|
var p = Parser{.rd = fd, .rdsize = head.len};
|
||||||
defer if (p.zstd) |z| z.destroy();
|
@memcpy(p.buf[0..head.len], head);
|
||||||
|
|
||||||
if (head.len >= 4 and std.mem.eql(u8, head[0..4], "\x28\xb5\x2f\xfd")) {
|
|
||||||
p.zstd = ZstdReader.create(head);
|
|
||||||
} else {
|
|
||||||
p.rdsize = head.len;
|
|
||||||
@memcpy(p.buf[0..head.len], head);
|
|
||||||
}
|
|
||||||
p.array();
|
p.array();
|
||||||
if (p.uint(u16) != 1) p.die("incompatible major format version");
|
if (p.uint(u16) != 1) p.die("incompatible major format version");
|
||||||
if (!p.elem(false)) p.die("expected array element");
|
if (!p.elem(false)) p.die("expected array element");
|
||||||
|
|
|
||||||
326
src/main.zig
326
src/main.zig
|
|
@ -1,7 +1,7 @@
|
||||||
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
// SPDX-FileCopyrightText: Yorhel <projects@yorhel.nl>
|
||||||
// SPDX-License-Identifier: MIT
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
pub const program_version = "2.9.2";
|
pub const program_version = "2.6";
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const model = @import("model.zig");
|
const model = @import("model.zig");
|
||||||
|
|
@ -18,7 +18,7 @@ const browser = @import("browser.zig");
|
||||||
const delete = @import("delete.zig");
|
const delete = @import("delete.zig");
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
const exclude = @import("exclude.zig");
|
const exclude = @import("exclude.zig");
|
||||||
const c = @import("c.zig").c;
|
const c = @cImport(@cInclude("locale.h"));
|
||||||
|
|
||||||
test "imports" {
|
test "imports" {
|
||||||
_ = model;
|
_ = model;
|
||||||
|
|
@ -41,7 +41,7 @@ test "imports" {
|
||||||
// This allocator never returns an error, it either succeeds or causes ncdu to quit.
|
// This allocator never returns an error, it either succeeds or causes ncdu to quit.
|
||||||
// (Which means you'll find a lot of "catch unreachable" sprinkled through the code,
|
// (Which means you'll find a lot of "catch unreachable" sprinkled through the code,
|
||||||
// they look scarier than they are)
|
// they look scarier than they are)
|
||||||
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: std.mem.Alignment, return_address: usize) ?[*]u8 {
|
fn wrapAlloc(_: *anyopaque, len: usize, ptr_alignment: u8, return_address: usize) ?[*]u8 {
|
||||||
while (true) {
|
while (true) {
|
||||||
if (std.heap.c_allocator.vtable.alloc(undefined, len, ptr_alignment, return_address)) |r|
|
if (std.heap.c_allocator.vtable.alloc(undefined, len, ptr_alignment, return_address)) |r|
|
||||||
return r
|
return r
|
||||||
|
|
@ -56,20 +56,18 @@ pub const allocator = std.mem.Allocator{
|
||||||
.alloc = wrapAlloc,
|
.alloc = wrapAlloc,
|
||||||
// AFAIK, all uses of resize() to grow an allocation will fall back to alloc() on failure.
|
// AFAIK, all uses of resize() to grow an allocation will fall back to alloc() on failure.
|
||||||
.resize = std.heap.c_allocator.vtable.resize,
|
.resize = std.heap.c_allocator.vtable.resize,
|
||||||
.remap = std.heap.c_allocator.vtable.remap,
|
|
||||||
.free = std.heap.c_allocator.vtable.free,
|
.free = std.heap.c_allocator.vtable.free,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Custom panic impl to reset the terminal before spewing out an error message.
|
// Custom panic impl to reset the terminal before spewing out an error message.
|
||||||
pub const panic = std.debug.FullPanic(struct {
|
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, ret_addr: ?usize) noreturn {
|
||||||
pub fn panicFn(msg: []const u8, first_trace_addr: ?usize) noreturn {
|
@setCold(true);
|
||||||
@branchHint(.cold);
|
ui.deinit();
|
||||||
ui.deinit();
|
std.debug.panicImpl(error_return_trace, ret_addr orelse @returnAddress(), msg);
|
||||||
std.debug.defaultPanic(msg, first_trace_addr);
|
}
|
||||||
}
|
|
||||||
}.panicFn);
|
|
||||||
|
|
||||||
pub const config = struct {
|
pub const config = struct {
|
||||||
pub const SortCol = enum { name, blocks, size, items, mtime };
|
pub const SortCol = enum { name, blocks, size, items, mtime };
|
||||||
|
|
@ -80,10 +78,9 @@ pub const config = struct {
|
||||||
pub var follow_symlinks: bool = false;
|
pub var follow_symlinks: bool = false;
|
||||||
pub var exclude_caches: bool = false;
|
pub var exclude_caches: bool = false;
|
||||||
pub var exclude_kernfs: bool = false;
|
pub var exclude_kernfs: bool = false;
|
||||||
|
pub var exclude_patterns: std.ArrayList([:0]const u8) = std.ArrayList([:0]const u8).init(allocator);
|
||||||
pub var threads: usize = 1;
|
pub var threads: usize = 1;
|
||||||
pub var complevel: u8 = 4;
|
pub var complevel: u8 = 4;
|
||||||
pub var compress: bool = false;
|
|
||||||
pub var export_block_size: ?usize = null;
|
|
||||||
|
|
||||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||||
pub var scan_ui: ?enum { none, line, full } = null;
|
pub var scan_ui: ?enum { none, line, full } = null;
|
||||||
|
|
@ -113,14 +110,10 @@ pub const config = struct {
|
||||||
pub var confirm_quit: bool = false;
|
pub var confirm_quit: bool = false;
|
||||||
pub var confirm_delete: bool = true;
|
pub var confirm_delete: bool = true;
|
||||||
pub var ignore_delete_errors: bool = false;
|
pub var ignore_delete_errors: bool = false;
|
||||||
pub var delete_command: [:0]const u8 = "";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub var state: enum { scan, browse, refresh, shell, delete } = .scan;
|
pub var state: enum { scan, browse, refresh, shell, delete } = .scan;
|
||||||
|
|
||||||
const stdin = if (@hasDecl(std.io, "getStdIn")) std.io.getStdIn() else std.fs.File.stdin();
|
|
||||||
const stdout = if (@hasDecl(std.io, "getStdOut")) std.io.getStdOut() else std.fs.File.stdout();
|
|
||||||
|
|
||||||
// Simple generic argument parser, supports getopt_long() style arguments.
|
// Simple generic argument parser, supports getopt_long() style arguments.
|
||||||
const Args = struct {
|
const Args = struct {
|
||||||
lst: []const [:0]const u8,
|
lst: []const [:0]const u8,
|
||||||
|
|
@ -129,7 +122,6 @@ const Args = struct {
|
||||||
last_arg: ?[:0]const u8 = null, // In the case of --option=<arg>
|
last_arg: ?[:0]const u8 = null, // In the case of --option=<arg>
|
||||||
shortbuf: [2]u8 = undefined,
|
shortbuf: [2]u8 = undefined,
|
||||||
argsep: bool = false,
|
argsep: bool = false,
|
||||||
ignerror: bool = false,
|
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
const Option = struct {
|
const Option = struct {
|
||||||
|
|
@ -159,27 +151,22 @@ const Args = struct {
|
||||||
return .{ .opt = true, .val = &self.shortbuf };
|
return .{ .opt = true, .val = &self.shortbuf };
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn die(self: *const Self, comptime msg: []const u8, args: anytype) !noreturn {
|
|
||||||
if (self.ignerror) return error.InvalidArg;
|
|
||||||
ui.die(msg, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the next option or positional argument.
|
/// Return the next option or positional argument.
|
||||||
/// 'opt' indicates whether it's an option or positional argument,
|
/// 'opt' indicates whether it's an option or positional argument,
|
||||||
/// 'val' will be either -x, --something or the argument.
|
/// 'val' will be either -x, --something or the argument.
|
||||||
pub fn next(self: *Self) !?Option {
|
pub fn next(self: *Self) ?Option {
|
||||||
if (self.last_arg != null) try self.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
if (self.last_arg != null) ui.die("Option '{s}' does not expect an argument.\n", .{ self.last.? });
|
||||||
if (self.short) |s| return self.shortopt(s);
|
if (self.short) |s| return self.shortopt(s);
|
||||||
const val = self.pop() orelse return null;
|
const val = self.pop() orelse return null;
|
||||||
if (self.argsep or val.len == 0 or val[0] != '-') return Option{ .opt = false, .val = val };
|
if (self.argsep or val.len == 0 or val[0] != '-') return Option{ .opt = false, .val = val };
|
||||||
if (val.len == 1) try self.die("Invalid option '-'.\n", .{});
|
if (val.len == 1) ui.die("Invalid option '-'.\n", .{});
|
||||||
if (val.len == 2 and val[1] == '-') {
|
if (val.len == 2 and val[1] == '-') {
|
||||||
self.argsep = true;
|
self.argsep = true;
|
||||||
return self.next();
|
return self.next();
|
||||||
}
|
}
|
||||||
if (val[1] == '-') {
|
if (val[1] == '-') {
|
||||||
if (std.mem.indexOfScalar(u8, val, '=')) |sep| {
|
if (std.mem.indexOfScalar(u8, val, '=')) |sep| {
|
||||||
if (sep == 2) try self.die("Invalid option '{s}'.\n", .{val});
|
if (sep == 2) ui.die("Invalid option '{s}'.\n", .{val});
|
||||||
self.last_arg = val[sep+1.. :0];
|
self.last_arg = val[sep+1.. :0];
|
||||||
self.last = val[0..sep];
|
self.last = val[0..sep];
|
||||||
return Option{ .opt = true, .val = self.last.? };
|
return Option{ .opt = true, .val = self.last.? };
|
||||||
|
|
@ -191,7 +178,7 @@ const Args = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the argument given to the last returned option. Dies with an error if no argument is provided.
|
/// Returns the argument given to the last returned option. Dies with an error if no argument is provided.
|
||||||
pub fn arg(self: *Self) ![:0]const u8 {
|
pub fn arg(self: *Self) [:0]const u8 {
|
||||||
if (self.short) |a| {
|
if (self.short) |a| {
|
||||||
defer self.short = null;
|
defer self.short = null;
|
||||||
return a;
|
return a;
|
||||||
|
|
@ -201,11 +188,11 @@ const Args = struct {
|
||||||
return a;
|
return a;
|
||||||
}
|
}
|
||||||
if (self.pop()) |o| return o;
|
if (self.pop()) |o| return o;
|
||||||
try self.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
ui.die("Option '{s}' requires an argument.\n", .{ self.last.? });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
fn argConfig(args: *Args, opt: Args.Option) bool {
|
||||||
if (opt.is("-q") or opt.is("--slow-ui-updates")) config.update_delay = 2*std.time.ns_per_s
|
if (opt.is("-q") or opt.is("--slow-ui-updates")) config.update_delay = 2*std.time.ns_per_s
|
||||||
else if (opt.is("--fast-ui-updates")) config.update_delay = 100*std.time.ns_per_ms
|
else if (opt.is("--fast-ui-updates")) config.update_delay = 100*std.time.ns_per_ms
|
||||||
else if (opt.is("-x") or opt.is("--one-file-system")) config.same_fs = true
|
else if (opt.is("-x") or opt.is("--one-file-system")) config.same_fs = true
|
||||||
|
|
@ -235,13 +222,13 @@ fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||||
else if (opt.is("--enable-natsort")) config.sort_natural = true
|
else if (opt.is("--enable-natsort")) config.sort_natural = true
|
||||||
else if (opt.is("--disable-natsort")) config.sort_natural = false
|
else if (opt.is("--disable-natsort")) config.sort_natural = false
|
||||||
else if (opt.is("--graph-style")) {
|
else if (opt.is("--graph-style")) {
|
||||||
const val = try args.arg();
|
const val = args.arg();
|
||||||
if (std.mem.eql(u8, val, "hash")) config.graph_style = .hash
|
if (std.mem.eql(u8, val, "hash")) config.graph_style = .hash
|
||||||
else if (std.mem.eql(u8, val, "half-block")) config.graph_style = .half
|
else if (std.mem.eql(u8, val, "half-block")) config.graph_style = .half
|
||||||
else if (std.mem.eql(u8, val, "eighth-block") or std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eighth
|
else if (std.mem.eql(u8, val, "eighth-block") or std.mem.eql(u8, val, "eigth-block")) config.graph_style = .eighth
|
||||||
else try args.die("Unknown --graph-style option: {s}.\n", .{val});
|
else ui.die("Unknown --graph-style option: {s}.\n", .{val});
|
||||||
} else if (opt.is("--sort")) {
|
} else if (opt.is("--sort")) {
|
||||||
var val: []const u8 = try args.arg();
|
var val: []const u8 = args.arg();
|
||||||
var ord: ?config.SortOrder = null;
|
var ord: ?config.SortOrder = null;
|
||||||
if (std.mem.endsWith(u8, val, "-asc")) {
|
if (std.mem.endsWith(u8, val, "-asc")) {
|
||||||
val = val[0..val.len-4];
|
val = val[0..val.len-4];
|
||||||
|
|
@ -265,13 +252,13 @@ fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||||
} else if (std.mem.eql(u8, val, "mtime")) {
|
} else if (std.mem.eql(u8, val, "mtime")) {
|
||||||
config.sort_col = .mtime;
|
config.sort_col = .mtime;
|
||||||
config.sort_order = ord orelse .asc;
|
config.sort_order = ord orelse .asc;
|
||||||
} else try args.die("Unknown --sort option: {s}.\n", .{val});
|
} else ui.die("Unknown --sort option: {s}.\n", .{val});
|
||||||
} else if (opt.is("--shared-column")) {
|
} else if (opt.is("--shared-column")) {
|
||||||
const val = try args.arg();
|
const val = args.arg();
|
||||||
if (std.mem.eql(u8, val, "off")) config.show_shared = .off
|
if (std.mem.eql(u8, val, "off")) config.show_shared = .off
|
||||||
else if (std.mem.eql(u8, val, "shared")) config.show_shared = .shared
|
else if (std.mem.eql(u8, val, "shared")) config.show_shared = .shared
|
||||||
else if (std.mem.eql(u8, val, "unique")) config.show_shared = .unique
|
else if (std.mem.eql(u8, val, "unique")) config.show_shared = .unique
|
||||||
else try args.die("Unknown --shared-column option: {s}.\n", .{val});
|
else ui.die("Unknown --shared-column option: {s}.\n", .{val});
|
||||||
} else if (opt.is("--apparent-size")) config.show_blocks = false
|
} else if (opt.is("--apparent-size")) config.show_blocks = false
|
||||||
else if (opt.is("--disk-usage")) config.show_blocks = true
|
else if (opt.is("--disk-usage")) config.show_blocks = true
|
||||||
else if (opt.is("-0")) config.scan_ui = .none
|
else if (opt.is("-0")) config.scan_ui = .none
|
||||||
|
|
@ -281,45 +268,33 @@ fn argConfig(args: *Args, opt: Args.Option, infile: bool) !void {
|
||||||
else if (opt.is("--no-si")) config.si = false
|
else if (opt.is("--no-si")) config.si = false
|
||||||
else if (opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
|
else if (opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
|
||||||
else if (opt.is("--no-follow-symlinks")) config.follow_symlinks = false
|
else if (opt.is("--no-follow-symlinks")) config.follow_symlinks = false
|
||||||
else if (opt.is("--exclude")) {
|
else if (opt.is("--exclude")) exclude.addPattern(args.arg())
|
||||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
else if (opt.is("-X") or opt.is("--exclude-from")) {
|
||||||
defer if (infile) allocator.free(arg);
|
const arg = args.arg();
|
||||||
exclude.addPattern(arg);
|
readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
||||||
} else if (opt.is("-X") or opt.is("--exclude-from")) {
|
|
||||||
const arg = if (infile) (util.expanduser(try args.arg(), allocator) catch unreachable) else try args.arg();
|
|
||||||
defer if (infile) allocator.free(arg);
|
|
||||||
readExcludeFile(arg) catch |e| try args.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
|
|
||||||
} else if (opt.is("--exclude-caches")) config.exclude_caches = true
|
} else if (opt.is("--exclude-caches")) config.exclude_caches = true
|
||||||
else if (opt.is("--include-caches")) config.exclude_caches = false
|
else if (opt.is("--include-caches")) config.exclude_caches = false
|
||||||
else if (opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
else if (opt.is("--exclude-kernfs")) config.exclude_kernfs = true
|
||||||
else if (opt.is("--include-kernfs")) config.exclude_kernfs = false
|
else if (opt.is("--include-kernfs")) config.exclude_kernfs = false
|
||||||
else if (opt.is("-c") or opt.is("--compress")) config.compress = true
|
|
||||||
else if (opt.is("--no-compress")) config.compress = false
|
|
||||||
else if (opt.is("--compress-level")) {
|
else if (opt.is("--compress-level")) {
|
||||||
const val = try args.arg();
|
const val = args.arg();
|
||||||
const num = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
config.complevel = std.fmt.parseInt(u8, val, 10) catch ui.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||||
if (num <= 0 or num > 20) try args.die("Invalid number for --compress-level: {s}.\n", .{val});
|
if (config.complevel <= 0 or config.complevel > 20) ui.die("Invalid number for --compress-level: {s}.\n", .{val});
|
||||||
config.complevel = num;
|
|
||||||
} else if (opt.is("--export-block-size")) {
|
|
||||||
const val = try args.arg();
|
|
||||||
const num = std.fmt.parseInt(u14, val, 10) catch try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
|
||||||
if (num < 4 or num > 16000) try args.die("Invalid number for --export-block-size: {s}.\n", .{val});
|
|
||||||
config.export_block_size = @as(usize, num) * 1024;
|
|
||||||
} else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
} else if (opt.is("--confirm-quit")) config.confirm_quit = true
|
||||||
else if (opt.is("--no-confirm-quit")) config.confirm_quit = false
|
else if (opt.is("--no-confirm-quit")) config.confirm_quit = false
|
||||||
else if (opt.is("--confirm-delete")) config.confirm_delete = true
|
else if (opt.is("--confirm-delete")) config.confirm_delete = true
|
||||||
else if (opt.is("--no-confirm-delete")) config.confirm_delete = false
|
else if (opt.is("--no-confirm-delete")) config.confirm_delete = false
|
||||||
else if (opt.is("--delete-command")) config.delete_command = allocator.dupeZ(u8, try args.arg()) catch unreachable
|
|
||||||
else if (opt.is("--color")) {
|
else if (opt.is("--color")) {
|
||||||
const val = try args.arg();
|
const val = args.arg();
|
||||||
if (std.mem.eql(u8, val, "off")) config.ui_color = .off
|
if (std.mem.eql(u8, val, "off")) config.ui_color = .off
|
||||||
else if (std.mem.eql(u8, val, "dark")) config.ui_color = .dark
|
else if (std.mem.eql(u8, val, "dark")) config.ui_color = .dark
|
||||||
else if (std.mem.eql(u8, val, "dark-bg")) config.ui_color = .darkbg
|
else if (std.mem.eql(u8, val, "dark-bg")) config.ui_color = .darkbg
|
||||||
else try args.die("Unknown --color option: {s}.\n", .{val});
|
else ui.die("Unknown --color option: {s}.\n", .{val});
|
||||||
} else if (opt.is("-t") or opt.is("--threads")) {
|
} else if (opt.is("-t") or opt.is("--threads")) {
|
||||||
const val = try args.arg();
|
const val = args.arg();
|
||||||
config.threads = std.fmt.parseInt(u8, val, 10) catch try args.die("Invalid number of --threads: {s}.\n", .{val});
|
config.threads = std.fmt.parseInt(u8, val, 10) catch ui.die("Invalid number of --threads: {s}.\n", .{val});
|
||||||
} else return error.UnknownOption;
|
} else return false;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tryReadArgsFile(path: [:0]const u8) void {
|
fn tryReadArgsFile(path: [:0]const u8) void {
|
||||||
|
|
@ -330,117 +305,148 @@ fn tryReadArgsFile(path: [:0]const u8) void {
|
||||||
};
|
};
|
||||||
defer f.close();
|
defer f.close();
|
||||||
|
|
||||||
|
var arglist = std.ArrayList([:0]const u8).init(allocator);
|
||||||
|
|
||||||
|
var rd_ = std.io.bufferedReader(f.reader());
|
||||||
|
const rd = rd_.reader();
|
||||||
|
|
||||||
var line_buf: [4096]u8 = undefined;
|
var line_buf: [4096]u8 = undefined;
|
||||||
var line_rd = util.LineReader.init(f, &line_buf);
|
var line_fbs = std.io.fixedBufferStream(&line_buf);
|
||||||
|
const line_writer = line_fbs.writer();
|
||||||
|
|
||||||
while (true) {
|
while (true) : (line_fbs.reset()) {
|
||||||
const line_ = (line_rd.read() catch |e|
|
rd.streamUntilDelimiter(line_writer, '\n', line_buf.len) catch |err| switch (err) {
|
||||||
ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) })
|
error.EndOfStream => if (line_fbs.getPos() catch unreachable == 0) break,
|
||||||
) orelse break;
|
else => |e| ui.die("Error reading from {s}: {s}\nRun with --ignore-config to skip reading config files.\n", .{ path, ui.errorString(e) }),
|
||||||
|
};
|
||||||
var argc: usize = 0;
|
const line_ = line_fbs.getWritten();
|
||||||
var ignerror = false;
|
|
||||||
var arglist: [2][:0]const u8 = .{ "", "" };
|
|
||||||
|
|
||||||
var line = std.mem.trim(u8, line_, &std.ascii.whitespace);
|
var line = std.mem.trim(u8, line_, &std.ascii.whitespace);
|
||||||
if (line.len > 0 and line[0] == '@') {
|
|
||||||
ignerror = true;
|
|
||||||
line = line[1..];
|
|
||||||
}
|
|
||||||
if (line.len == 0 or line[0] == '#') continue;
|
if (line.len == 0 or line[0] == '#') continue;
|
||||||
if (std.mem.indexOfAny(u8, line, " \t=")) |i| {
|
if (std.mem.indexOfAny(u8, line, " \t=")) |i| {
|
||||||
arglist[argc] = allocator.dupeZ(u8, line[0..i]) catch unreachable;
|
arglist.append(allocator.dupeZ(u8, line[0..i]) catch unreachable) catch unreachable;
|
||||||
argc += 1;
|
|
||||||
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.whitespace);
|
line = std.mem.trimLeft(u8, line[i+1..], &std.ascii.whitespace);
|
||||||
}
|
}
|
||||||
arglist[argc] = allocator.dupeZ(u8, line) catch unreachable;
|
arglist.append(allocator.dupeZ(u8, line) catch unreachable) catch unreachable;
|
||||||
argc += 1;
|
|
||||||
|
|
||||||
var args = Args.init(arglist[0..argc]);
|
|
||||||
args.ignerror = ignerror;
|
|
||||||
while (args.next() catch null) |opt| {
|
|
||||||
if (argConfig(&args, opt, true)) |_| {}
|
|
||||||
else |_| {
|
|
||||||
if (ignerror) break;
|
|
||||||
ui.die("Unrecognized option in config file '{s}': {s}.\nRun with --ignore-config to skip reading config files.\n", .{path, opt.val});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allocator.free(arglist[0]);
|
|
||||||
if (argc == 2) allocator.free(arglist[1]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var args = Args.init(arglist.items);
|
||||||
|
while (args.next()) |opt| {
|
||||||
|
if (!argConfig(&args, opt))
|
||||||
|
ui.die("Unrecognized option in config file '{s}': {s}.\nRun with --ignore-config to skip reading config files.\n", .{path, opt.val});
|
||||||
|
}
|
||||||
|
for (arglist.items) |i| allocator.free(i);
|
||||||
|
arglist.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn version() noreturn {
|
fn version() noreturn {
|
||||||
|
const stdout = std.io.getStdOut();
|
||||||
stdout.writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
stdout.writeAll("ncdu " ++ program_version ++ "\n") catch {};
|
||||||
std.process.exit(0);
|
std.process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn help() noreturn {
|
fn help() noreturn {
|
||||||
|
const stdout = std.io.getStdOut();
|
||||||
stdout.writeAll(
|
stdout.writeAll(
|
||||||
\\ncdu <options> <directory>
|
\\ncdu <options> <directory>
|
||||||
\\
|
\\
|
||||||
\\Mode selection:
|
\\Options:
|
||||||
\\ -h, --help This help message
|
\\ -h,--help This help message
|
||||||
\\ -v, -V, --version Print version
|
\\ -q Quiet mode, refresh interval 2 seconds
|
||||||
|
\\ -v,-V,--version Print version
|
||||||
|
\\ -x Same filesystem
|
||||||
|
\\ -e Enable extended information
|
||||||
|
\\ -t NUM Number of threads to use
|
||||||
|
\\ -r Read only
|
||||||
|
\\ -o FILE Export scanned directory to FILE
|
||||||
\\ -f FILE Import scanned directory from FILE
|
\\ -f FILE Import scanned directory from FILE
|
||||||
\\ -o FILE Export scanned directory to FILE in JSON format
|
\\ -0,-1,-2 UI to use when scanning (0=none,2=full ncurses)
|
||||||
\\ -O FILE Export scanned directory to FILE in binary format
|
\\ --si Use base 10 (SI) prefixes instead of base 2
|
||||||
\\ -e, --extended Enable extended information
|
|
||||||
\\ --ignore-config Don't load config files
|
|
||||||
\\
|
|
||||||
\\Scan options:
|
|
||||||
\\ -x, --one-file-system Stay on the same filesystem
|
|
||||||
\\ --exclude PATTERN Exclude files that match PATTERN
|
\\ --exclude PATTERN Exclude files that match PATTERN
|
||||||
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
\\ -X, --exclude-from FILE Exclude files that match any pattern in FILE
|
||||||
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
|
||||||
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
\\ -L, --follow-symlinks Follow symbolic links (excluding directories)
|
||||||
|
\\ --exclude-caches Exclude directories containing CACHEDIR.TAG
|
||||||
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
\\ --exclude-kernfs Exclude Linux pseudo filesystems (procfs,sysfs,cgroup,...)
|
||||||
\\ -t NUM Scan with NUM threads
|
\\ --confirm-quit Confirm quitting ncdu
|
||||||
|
\\ --color SCHEME Set color scheme (off/dark/dark-bg)
|
||||||
|
\\ --ignore-config Don't load config files
|
||||||
\\
|
\\
|
||||||
\\Export options:
|
\\Refer to `man ncdu` for the full list of options.
|
||||||
\\ -c, --compress Use Zstandard compression with `-o`
|
|
||||||
\\ --compress-level NUM Set compression level
|
|
||||||
\\ --export-block-size KIB Set export block size with `-O`
|
|
||||||
\\
|
|
||||||
\\Interface options:
|
|
||||||
\\ -0, -1, -2 UI to use when scanning (0=none,2=full ncurses)
|
|
||||||
\\ -q, --slow-ui-updates "Quiet" mode, refresh interval 2 seconds
|
|
||||||
\\ --enable-shell Enable/disable shell spawning feature
|
|
||||||
\\ --enable-delete Enable/disable file deletion feature
|
|
||||||
\\ --enable-refresh Enable/disable directory refresh feature
|
|
||||||
\\ -r Read only (--disable-delete)
|
|
||||||
\\ -rr Read only++ (--disable-delete & --disable-shell)
|
|
||||||
\\ --si Use base 10 (SI) prefixes instead of base 2
|
|
||||||
\\ --apparent-size Show apparent size instead of disk usage by default
|
|
||||||
\\ --hide-hidden Hide "hidden" or excluded files by default
|
|
||||||
\\ --show-itemcount Show item count column by default
|
|
||||||
\\ --show-mtime Show mtime column by default (requires `-e`)
|
|
||||||
\\ --show-graph Show graph column by default
|
|
||||||
\\ --show-percent Show percent column by default
|
|
||||||
\\ --graph-style STYLE hash / half-block / eighth-block
|
|
||||||
\\ --shared-column off / shared / unique
|
|
||||||
\\ --sort COLUMN-(asc/desc) disk-usage / name / apparent-size / itemcount / mtime
|
|
||||||
\\ --enable-natsort Use natural order when sorting by name
|
|
||||||
\\ --group-directories-first Sort directories before files
|
|
||||||
\\ --confirm-quit Ask confirmation before quitting ncdu
|
|
||||||
\\ --no-confirm-delete Don't ask confirmation before deletion
|
|
||||||
\\ --delete-command CMD Command to run for file deletion
|
|
||||||
\\ --color SCHEME off / dark / dark-bg
|
|
||||||
\\
|
|
||||||
\\Refer to `man ncdu` for more information.
|
|
||||||
\\
|
\\
|
||||||
) catch {};
|
) catch {};
|
||||||
std.process.exit(0);
|
std.process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn spawnShell() void {
|
||||||
|
ui.deinit();
|
||||||
|
defer ui.init();
|
||||||
|
|
||||||
|
var env = std.process.getEnvMap(allocator) catch unreachable;
|
||||||
|
defer env.deinit();
|
||||||
|
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
||||||
|
if (env.get("NCDU_LEVEL")) |l|
|
||||||
|
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
||||||
|
'0'...'8' => |d| &[1] u8{d+1},
|
||||||
|
'9' => "9",
|
||||||
|
else => "1"
|
||||||
|
}) catch unreachable
|
||||||
|
else
|
||||||
|
env.put("NCDU_LEVEL", "1") catch unreachable;
|
||||||
|
|
||||||
|
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
||||||
|
var child = std.process.Child.init(&.{shell}, allocator);
|
||||||
|
child.cwd = browser.dir_path;
|
||||||
|
child.env_map = &env;
|
||||||
|
|
||||||
|
const stdin = std.io.getStdIn();
|
||||||
|
const stderr = std.io.getStdErr();
|
||||||
|
const term = child.spawnAndWait() catch |e| blk: {
|
||||||
|
stderr.writer().print(
|
||||||
|
"Error spawning shell: {s}\n\nPress enter to continue.\n",
|
||||||
|
.{ ui.errorString(e) }
|
||||||
|
) catch {};
|
||||||
|
stdin.reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||||
|
break :blk std.process.Child.Term{ .Exited = 0 };
|
||||||
|
};
|
||||||
|
if (term != .Exited) {
|
||||||
|
const n = switch (term) {
|
||||||
|
.Exited => "status",
|
||||||
|
.Signal => "signal",
|
||||||
|
.Stopped => "stopped",
|
||||||
|
.Unknown => "unknown",
|
||||||
|
};
|
||||||
|
const v = switch (term) {
|
||||||
|
.Exited => |v| v,
|
||||||
|
.Signal => |v| v,
|
||||||
|
.Stopped => |v| v,
|
||||||
|
.Unknown => |v| v,
|
||||||
|
};
|
||||||
|
stderr.writer().print(
|
||||||
|
"Shell returned with {s} code {}.\n\nPress enter to continue.\n", .{ n, v }
|
||||||
|
) catch {};
|
||||||
|
stdin.reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
fn readExcludeFile(path: [:0]const u8) !void {
|
fn readExcludeFile(path: [:0]const u8) !void {
|
||||||
const f = try std.fs.cwd().openFileZ(path, .{});
|
const f = try std.fs.cwd().openFileZ(path, .{});
|
||||||
defer f.close();
|
defer f.close();
|
||||||
|
|
||||||
|
var rd_ = std.io.bufferedReader(f.reader());
|
||||||
|
const rd = rd_.reader();
|
||||||
|
|
||||||
var line_buf: [4096]u8 = undefined;
|
var line_buf: [4096]u8 = undefined;
|
||||||
var line_rd = util.LineReader.init(f, &line_buf);
|
var line_fbs = std.io.fixedBufferStream(&line_buf);
|
||||||
while (try line_rd.read()) |line| {
|
const line_writer = line_fbs.writer();
|
||||||
|
|
||||||
|
while (true) : (line_fbs.reset()) {
|
||||||
|
rd.streamUntilDelimiter(line_writer, '\n', line_buf.len) catch |err| switch (err) {
|
||||||
|
error.EndOfStream => if (line_fbs.getPos() catch unreachable == 0) break,
|
||||||
|
else => |e| return e,
|
||||||
|
};
|
||||||
|
const line = line_fbs.getWritten();
|
||||||
if (line.len > 0)
|
if (line.len > 0)
|
||||||
exclude.addPattern(line);
|
exclude.addPattern(line);
|
||||||
}
|
}
|
||||||
|
|
@ -448,12 +454,13 @@ fn readExcludeFile(path: [:0]const u8) !void {
|
||||||
|
|
||||||
fn readImport(path: [:0]const u8) !void {
|
fn readImport(path: [:0]const u8) !void {
|
||||||
const fd =
|
const fd =
|
||||||
if (std.mem.eql(u8, "-", path)) stdin
|
if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
|
||||||
else try std.fs.cwd().openFileZ(path, .{});
|
else try std.fs.cwd().openFileZ(path, .{});
|
||||||
errdefer fd.close();
|
errdefer fd.close();
|
||||||
|
|
||||||
|
// TODO: While we're at it, recognize and handle compressed JSON
|
||||||
var buf: [8]u8 = undefined;
|
var buf: [8]u8 = undefined;
|
||||||
if (8 != try fd.readAll(&buf)) return error.EndOfStream;
|
try fd.reader().readNoEof(&buf);
|
||||||
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
|
if (std.mem.eql(u8, &buf, bin_export.SIGNATURE)) {
|
||||||
try bin_reader.open(fd);
|
try bin_reader.open(fd);
|
||||||
config.binreader = true;
|
config.binreader = true;
|
||||||
|
|
@ -507,8 +514,8 @@ pub fn main() void {
|
||||||
const arglist = std.process.argsAlloc(allocator) catch unreachable;
|
const arglist = std.process.argsAlloc(allocator) catch unreachable;
|
||||||
defer std.process.argsFree(allocator, arglist);
|
defer std.process.argsFree(allocator, arglist);
|
||||||
var args = Args.init(arglist);
|
var args = Args.init(arglist);
|
||||||
_ = args.next() catch unreachable; // program name
|
_ = args.next(); // program name
|
||||||
while (args.next() catch unreachable) |opt| {
|
while (args.next()) |opt| {
|
||||||
if (!opt.opt) {
|
if (!opt.opt) {
|
||||||
// XXX: ncdu 1.x doesn't error, it just silently ignores all but the last argument.
|
// XXX: ncdu 1.x doesn't error, it just silently ignores all but the last argument.
|
||||||
if (scan_dir != null) ui.die("Multiple directories given, see ncdu -h for help.\n", .{});
|
if (scan_dir != null) ui.die("Multiple directories given, see ncdu -h for help.\n", .{});
|
||||||
|
|
@ -518,15 +525,15 @@ pub fn main() void {
|
||||||
if (opt.is("-h") or opt.is("-?") or opt.is("--help")) help()
|
if (opt.is("-h") or opt.is("-?") or opt.is("--help")) help()
|
||||||
else if (opt.is("-v") or opt.is("-V") or opt.is("--version")) version()
|
else if (opt.is("-v") or opt.is("-V") or opt.is("--version")) version()
|
||||||
else if (opt.is("-o") and (export_json != null or export_bin != null)) ui.die("The -o flag can only be given once.\n", .{})
|
else if (opt.is("-o") and (export_json != null or export_bin != null)) ui.die("The -o flag can only be given once.\n", .{})
|
||||||
else if (opt.is("-o")) export_json = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
else if (opt.is("-o")) export_json = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||||
else if (opt.is("-O") and (export_json != null or export_bin != null)) ui.die("The -O flag can only be given once.\n", .{})
|
else if (opt.is("-O") and (export_json != null or export_bin != null)) ui.die("The -O flag can only be given once.\n", .{})
|
||||||
else if (opt.is("-O")) export_bin = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
else if (opt.is("-O")) export_bin = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||||
else if (opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
|
else if (opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
|
||||||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg() catch unreachable) catch unreachable
|
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||||
else if (opt.is("--ignore-config")) {}
|
else if (opt.is("--ignore-config")) {}
|
||||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||||
else if (argConfig(&args, opt, false)) |_| {}
|
else if (argConfig(&args, opt)) {}
|
||||||
else |_| ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
else ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -535,6 +542,8 @@ pub fn main() void {
|
||||||
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
if (@import("builtin").os.tag != .linux and config.exclude_kernfs)
|
||||||
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
ui.die("The --exclude-kernfs flag is currently only supported on Linux.\n", .{});
|
||||||
|
|
||||||
|
const stdin = std.io.getStdIn();
|
||||||
|
const stdout = std.io.getStdOut();
|
||||||
const out_tty = stdout.isTty();
|
const out_tty = stdout.isTty();
|
||||||
const in_tty = stdin.isTty();
|
const in_tty = stdin.isTty();
|
||||||
if (config.scan_ui == null) {
|
if (config.scan_ui == null) {
|
||||||
|
|
@ -572,7 +581,7 @@ pub fn main() void {
|
||||||
if (config.binreader and (export_json != null or export_bin != null))
|
if (config.binreader and (export_json != null or export_bin != null))
|
||||||
bin_reader.import();
|
bin_reader.import();
|
||||||
} else {
|
} else {
|
||||||
var buf: [std.fs.max_path_bytes+1]u8 = @splat(0);
|
var buf = [_]u8{0} ** (std.fs.MAX_PATH_BYTES+1);
|
||||||
const path =
|
const path =
|
||||||
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
if (std.posix.realpathZ(scan_dir orelse ".", buf[0..buf.len-1])) |p| buf[0..p.len:0]
|
||||||
else |_| (scan_dir orelse ".");
|
else |_| (scan_dir orelse ".");
|
||||||
|
|
@ -592,10 +601,10 @@ pub fn main() void {
|
||||||
while (true) {
|
while (true) {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
.refresh => {
|
.refresh => {
|
||||||
var full_path: std.ArrayListUnmanaged(u8) = .empty;
|
var full_path = std.ArrayList(u8).init(allocator);
|
||||||
defer full_path.deinit(allocator);
|
defer full_path.deinit();
|
||||||
mem_sink.global.root.?.fmtPath(allocator, true, &full_path);
|
mem_sink.global.root.?.fmtPath(true, &full_path);
|
||||||
scan.scan(util.arrayListBufZ(&full_path, allocator)) catch {
|
scan.scan(util.arrayListBufZ(&full_path)) catch {
|
||||||
sink.global.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable;
|
sink.global.last_error = allocator.dupeZ(u8, full_path.items) catch unreachable;
|
||||||
sink.global.state = .err;
|
sink.global.state = .err;
|
||||||
while (state == .refresh) handleEvent(true, true);
|
while (state == .refresh) handleEvent(true, true);
|
||||||
|
|
@ -604,18 +613,13 @@ pub fn main() void {
|
||||||
browser.loadDir(0);
|
browser.loadDir(0);
|
||||||
},
|
},
|
||||||
.shell => {
|
.shell => {
|
||||||
const shell = std.posix.getenvZ("NCDU_SHELL") orelse std.posix.getenvZ("SHELL") orelse "/bin/sh";
|
spawnShell();
|
||||||
var env = std.process.getEnvMap(allocator) catch unreachable;
|
|
||||||
defer env.deinit();
|
|
||||||
ui.runCmd(&.{shell}, browser.dir_path, &env, false);
|
|
||||||
state = .browse;
|
state = .browse;
|
||||||
},
|
},
|
||||||
.delete => {
|
.delete => {
|
||||||
const next = delete.delete();
|
const next = delete.delete();
|
||||||
if (state != .refresh) {
|
state = .browse;
|
||||||
state = .browse;
|
browser.loadDir(if (next) |n| n.nameHash() else 0);
|
||||||
browser.loadDir(if (next) |n| n.nameHash() else 0);
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
else => handleEvent(true, false)
|
else => handleEvent(true, false)
|
||||||
}
|
}
|
||||||
|
|
@ -630,14 +634,14 @@ pub fn handleEvent(block: bool, force_draw: bool) void {
|
||||||
while (ui.oom_threads.load(.monotonic) > 0) ui.oom();
|
while (ui.oom_threads.load(.monotonic) > 0) ui.oom();
|
||||||
|
|
||||||
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
if (block or force_draw or event_delay_timer.read() > config.update_delay) {
|
||||||
if (ui.inited) _ = c.erase();
|
if (ui.inited) _ = ui.c.erase();
|
||||||
switch (state) {
|
switch (state) {
|
||||||
.scan, .refresh => sink.draw(),
|
.scan, .refresh => sink.draw(),
|
||||||
.browse => browser.draw(),
|
.browse => browser.draw(),
|
||||||
.delete => delete.draw(),
|
.delete => delete.draw(),
|
||||||
.shell => unreachable,
|
.shell => unreachable,
|
||||||
}
|
}
|
||||||
if (ui.inited) _ = c.refresh();
|
if (ui.inited) _ = ui.c.refresh();
|
||||||
event_delay_timer.reset();
|
event_delay_timer.reset();
|
||||||
}
|
}
|
||||||
if (!ui.inited) {
|
if (!ui.inited) {
|
||||||
|
|
@ -665,13 +669,13 @@ test "argument parser" {
|
||||||
const T = struct {
|
const T = struct {
|
||||||
a: Args,
|
a: Args,
|
||||||
fn opt(self: *@This(), isopt: bool, val: []const u8) !void {
|
fn opt(self: *@This(), isopt: bool, val: []const u8) !void {
|
||||||
const o = (self.a.next() catch unreachable).?;
|
const o = self.a.next().?;
|
||||||
try std.testing.expectEqual(isopt, o.opt);
|
try std.testing.expectEqual(isopt, o.opt);
|
||||||
try std.testing.expectEqualStrings(val, o.val);
|
try std.testing.expectEqualStrings(val, o.val);
|
||||||
try std.testing.expectEqual(o.is(val), isopt);
|
try std.testing.expectEqual(o.is(val), isopt);
|
||||||
}
|
}
|
||||||
fn arg(self: *@This(), val: []const u8) !void {
|
fn arg(self: *@This(), val: []const u8) !void {
|
||||||
try std.testing.expectEqualStrings(val, self.a.arg() catch unreachable);
|
try std.testing.expectEqualStrings(val, self.a.arg());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
var t = T{ .a = Args.init(&lst) };
|
var t = T{ .a = Args.init(&lst) };
|
||||||
|
|
|
||||||
|
|
@ -17,24 +17,6 @@ pub const Thread = struct {
|
||||||
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator),
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn statToEntry(stat: *const sink.Stat, e: *model.Entry, parent: *model.Dir) void {
|
|
||||||
e.pack.blocks = stat.blocks;
|
|
||||||
e.size = stat.size;
|
|
||||||
if (e.dir()) |d| {
|
|
||||||
d.parent = parent;
|
|
||||||
d.pack.dev = model.devices.getId(stat.dev);
|
|
||||||
}
|
|
||||||
if (e.link()) |l| {
|
|
||||||
l.parent = parent;
|
|
||||||
l.ino = stat.ino;
|
|
||||||
l.pack.nlink = stat.nlink;
|
|
||||||
model.inodes.lock.lock();
|
|
||||||
defer model.inodes.lock.unlock();
|
|
||||||
l.addLink();
|
|
||||||
}
|
|
||||||
if (e.ext()) |ext| ext.* = stat.ext;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const Dir = struct {
|
pub const Dir = struct {
|
||||||
dir: *model.Dir,
|
dir: *model.Dir,
|
||||||
entries: Map,
|
entries: Map,
|
||||||
|
|
@ -125,7 +107,21 @@ pub const Dir = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
const e = self.getEntry(t, stat.etype, main.config.extended and !stat.ext.isEmpty(), name);
|
const e = self.getEntry(t, stat.etype, main.config.extended and !stat.ext.isEmpty(), name);
|
||||||
statToEntry(stat, e, self.dir);
|
e.pack.blocks = stat.blocks;
|
||||||
|
e.size = stat.size;
|
||||||
|
if (e.dir()) |d| {
|
||||||
|
d.parent = self.dir;
|
||||||
|
d.pack.dev = model.devices.getId(stat.dev);
|
||||||
|
}
|
||||||
|
if (e.link()) |l| {
|
||||||
|
l.parent = self.dir;
|
||||||
|
l.ino = stat.ino;
|
||||||
|
l.pack.nlink = stat.nlink;
|
||||||
|
model.inodes.lock.lock();
|
||||||
|
defer model.inodes.lock.unlock();
|
||||||
|
l.addLink();
|
||||||
|
}
|
||||||
|
if (e.ext()) |ext| ext.* = stat.ext;
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -56,14 +56,14 @@ fn rec(ctx: *Ctx, dir: *sink.Dir, entry: *model.Entry) void {
|
||||||
pub fn run(d: *model.Dir) void {
|
pub fn run(d: *model.Dir) void {
|
||||||
const sink_threads = sink.createThreads(1);
|
const sink_threads = sink.createThreads(1);
|
||||||
|
|
||||||
var ctx: Ctx = .{
|
var ctx = .{
|
||||||
.sink = &sink_threads[0],
|
.sink = &sink_threads[0],
|
||||||
.stat = toStat(&d.entry),
|
.stat = toStat(&d.entry),
|
||||||
};
|
};
|
||||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
var buf = std.ArrayList(u8).init(main.allocator);
|
||||||
d.fmtPath(main.allocator, true, &buf);
|
d.fmtPath(true, &buf);
|
||||||
const root = sink.createRoot(buf.items, &ctx.stat);
|
const root = sink.createRoot(buf.items, &ctx.stat);
|
||||||
buf.deinit(main.allocator);
|
buf.deinit();
|
||||||
|
|
||||||
var it = d.sub.ptr;
|
var it = d.sub.ptr;
|
||||||
while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
|
while (it) |e| : (it = e.next.ptr) rec(&ctx, root, e);
|
||||||
|
|
|
||||||
|
|
@ -109,8 +109,7 @@ pub const Entry = extern struct {
|
||||||
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
fn alloc(comptime T: type, allocator: std.mem.Allocator, etype: EType, isext: bool, ename: []const u8) *Entry {
|
||||||
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
const size = (if (isext) @as(usize, @sizeOf(Ext)) else 0) + @sizeOf(T) + ename.len + 1;
|
||||||
var ptr = blk: while (true) {
|
var ptr = blk: while (true) {
|
||||||
const alignment = if (@typeInfo(@TypeOf(std.mem.Allocator.allocWithOptions)).@"fn".params[3].type == ?u29) 1 else std.mem.Alignment.@"1";
|
if (allocator.allocWithOptions(u8, size, 1, null)) |p| break :blk p
|
||||||
if (allocator.allocWithOptions(u8, size, alignment, null)) |p| break :blk p
|
|
||||||
else |_| {}
|
else |_| {}
|
||||||
ui.oom();
|
ui.oom();
|
||||||
};
|
};
|
||||||
|
|
@ -218,20 +217,19 @@ pub const Dir = extern struct {
|
||||||
suberr: bool = false,
|
suberr: bool = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn fmtPath(self: *const @This(), alloc: std.mem.Allocator, withRoot: bool, out: *std.ArrayListUnmanaged(u8)) void {
|
pub fn fmtPath(self: *const @This(), withRoot: bool, out: *std.ArrayList(u8)) void {
|
||||||
if (!withRoot and self.parent == null) return;
|
if (!withRoot and self.parent == null) return;
|
||||||
var components: std.ArrayListUnmanaged([:0]const u8) = .empty;
|
var components = std.ArrayList([:0]const u8).init(main.allocator);
|
||||||
defer components.deinit(main.allocator);
|
defer components.deinit();
|
||||||
var it: ?*const @This() = self;
|
var it: ?*const @This() = self;
|
||||||
while (it) |e| : (it = e.parent)
|
while (it) |e| : (it = e.parent)
|
||||||
if (withRoot or e.parent != null)
|
if (withRoot or e.parent != null)
|
||||||
components.append(main.allocator, e.entry.name()) catch unreachable;
|
components.append(e.entry.name()) catch unreachable;
|
||||||
|
|
||||||
var i: usize = components.items.len-1;
|
var i: usize = components.items.len-1;
|
||||||
while (true) {
|
while (true) {
|
||||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/')) out.append('/') catch unreachable;
|
||||||
out.append(main.allocator, '/') catch unreachable;
|
out.appendSlice(components.items[i]) catch unreachable;
|
||||||
out.appendSlice(alloc, components.items[i]) catch unreachable;
|
|
||||||
if (i == 0) break;
|
if (i == 0) break;
|
||||||
i -= 1;
|
i -= 1;
|
||||||
}
|
}
|
||||||
|
|
@ -273,11 +271,11 @@ pub const Link = extern struct {
|
||||||
|
|
||||||
// Return value should be freed with main.allocator.
|
// Return value should be freed with main.allocator.
|
||||||
pub fn path(self: *const @This(), withRoot: bool) [:0]const u8 {
|
pub fn path(self: *const @This(), withRoot: bool) [:0]const u8 {
|
||||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
var out = std.ArrayList(u8).init(main.allocator);
|
||||||
self.parent.fmtPath(main.allocator, withRoot, &out);
|
self.parent.fmtPath(withRoot, &out);
|
||||||
out.append(main.allocator, '/') catch unreachable;
|
out.append('/') catch unreachable;
|
||||||
out.appendSlice(main.allocator, self.entry.name()) catch unreachable;
|
out.appendSlice(self.entry.name()) catch unreachable;
|
||||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
return out.toOwnedSliceSentinel(0) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add this link to the inodes map and mark it as 'uncounted'.
|
// Add this link to the inodes map and mark it as 'uncounted'.
|
||||||
|
|
@ -352,7 +350,7 @@ pub const Ext = extern struct {
|
||||||
pub const devices = struct {
|
pub const devices = struct {
|
||||||
var lock = std.Thread.Mutex{};
|
var lock = std.Thread.Mutex{};
|
||||||
// id -> dev
|
// id -> dev
|
||||||
pub var list: std.ArrayListUnmanaged(u64) = .empty;
|
pub var list = std.ArrayList(u64).init(main.allocator);
|
||||||
// dev -> id
|
// dev -> id
|
||||||
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
var lookup = std.AutoHashMap(u64, DevId).init(main.allocator);
|
||||||
|
|
||||||
|
|
@ -363,7 +361,7 @@ pub const devices = struct {
|
||||||
if (!d.found_existing) {
|
if (!d.found_existing) {
|
||||||
if (list.items.len >= std.math.maxInt(DevId)) ui.die("Maximum number of device identifiers exceeded.\n", .{});
|
if (list.items.len >= std.math.maxInt(DevId)) ui.die("Maximum number of device identifiers exceeded.\n", .{});
|
||||||
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
d.value_ptr.* = @as(DevId, @intCast(list.items.len));
|
||||||
list.append(main.allocator, dev) catch unreachable;
|
list.append(dev) catch unreachable;
|
||||||
}
|
}
|
||||||
return d.value_ptr.*;
|
return d.value_ptr.*;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
40
src/scan.zig
40
src/scan.zig
|
|
@ -8,13 +8,13 @@ const model = @import("model.zig");
|
||||||
const sink = @import("sink.zig");
|
const sink = @import("sink.zig");
|
||||||
const ui = @import("ui.zig");
|
const ui = @import("ui.zig");
|
||||||
const exclude = @import("exclude.zig");
|
const exclude = @import("exclude.zig");
|
||||||
const c = @import("c.zig").c;
|
const c_statfs = @cImport(@cInclude("sys/vfs.h"));
|
||||||
|
|
||||||
|
|
||||||
// This function only works on Linux
|
// This function only works on Linux
|
||||||
fn isKernfs(dir: std.fs.Dir) bool {
|
fn isKernfs(dir: std.fs.Dir) bool {
|
||||||
var buf: c.struct_statfs = undefined;
|
var buf: c_statfs.struct_statfs = undefined;
|
||||||
if (c.fstatfs(dir.fd, &buf) != 0) return false; // silently ignoring errors isn't too nice.
|
if (c_statfs.fstatfs(dir.fd, &buf) != 0) return false; // silently ignoring errors isn't too nice.
|
||||||
const iskern = switch (util.castTruncate(u32, buf.f_type)) {
|
const iskern = switch (util.castTruncate(u32, buf.f_type)) {
|
||||||
// These numbers are documented in the Linux 'statfs(2)' man page, so I assume they're stable.
|
// These numbers are documented in the Linux 'statfs(2)' man page, so I assume they're stable.
|
||||||
0x42494e4d, // BINFMTFS_MAGIC
|
0x42494e4d, // BINFMTFS_MAGIC
|
||||||
|
|
@ -46,24 +46,14 @@ fn truncate(comptime T: type, comptime field: anytype, x: anytype) std.meta.fiel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn statAt(parent: std.fs.Dir, name: [:0]const u8, follow: bool, symlink: ?*bool) !sink.Stat {
|
fn statAt(parent: std.fs.Dir, name: [:0]const u8, follow: bool, symlink: *bool) !sink.Stat {
|
||||||
// std.posix.fstatatZ() in Zig 0.14 is not suitable due to https://github.com/ziglang/zig/issues/23463
|
const stat = try std.posix.fstatatZ(parent.fd, name, if (follow) 0 else std.posix.AT.SYMLINK_NOFOLLOW);
|
||||||
var stat: std.c.Stat = undefined;
|
symlink.* = std.posix.S.ISLNK(stat.mode);
|
||||||
if (std.c.fstatat(parent.fd, name, &stat, if (follow) 0 else std.c.AT.SYMLINK_NOFOLLOW) != 0) {
|
|
||||||
return switch (std.c._errno().*) {
|
|
||||||
@intFromEnum(std.c.E.NOENT) => error.FileNotFound,
|
|
||||||
@intFromEnum(std.c.E.NAMETOOLONG) => error.NameTooLong,
|
|
||||||
@intFromEnum(std.c.E.NOMEM) => error.OutOfMemory,
|
|
||||||
@intFromEnum(std.c.E.ACCES) => error.AccessDenied,
|
|
||||||
else => error.Unexpected,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (symlink) |s| s.* = std.c.S.ISLNK(stat.mode);
|
|
||||||
return sink.Stat{
|
return sink.Stat{
|
||||||
.etype =
|
.etype =
|
||||||
if (std.c.S.ISDIR(stat.mode)) .dir
|
if (std.posix.S.ISDIR(stat.mode)) .dir
|
||||||
else if (stat.nlink > 1) .link
|
else if (stat.nlink > 1) .link
|
||||||
else if (!std.c.S.ISREG(stat.mode)) .nonreg
|
else if (!std.posix.S.ISREG(stat.mode)) .nonreg
|
||||||
else .reg,
|
else .reg,
|
||||||
.blocks = clamp(sink.Stat, .blocks, stat.blocks),
|
.blocks = clamp(sink.Stat, .blocks, stat.blocks),
|
||||||
.size = clamp(sink.Stat, .size, stat.size),
|
.size = clamp(sink.Stat, .size, stat.size),
|
||||||
|
|
@ -77,7 +67,7 @@ pub fn statAt(parent: std.fs.Dir, name: [:0]const u8, follow: bool, symlink: ?*b
|
||||||
.hasgid = true,
|
.hasgid = true,
|
||||||
.hasmode = true,
|
.hasmode = true,
|
||||||
},
|
},
|
||||||
.mtime = clamp(model.Ext, .mtime, stat.mtime().sec),
|
.mtime = clamp(model.Ext, .mtime, stat.mtime().tv_sec),
|
||||||
.uid = truncate(model.Ext, .uid, stat.uid),
|
.uid = truncate(model.Ext, .uid, stat.uid),
|
||||||
.gid = truncate(model.Ext, .gid, stat.gid),
|
.gid = truncate(model.Ext, .gid, stat.gid),
|
||||||
.mode = truncate(model.Ext, .mode, stat.mode),
|
.mode = truncate(model.Ext, .mode, stat.mode),
|
||||||
|
|
@ -91,7 +81,7 @@ fn isCacheDir(dir: std.fs.Dir) bool {
|
||||||
const f = dir.openFileZ("CACHEDIR.TAG", .{}) catch return false;
|
const f = dir.openFileZ("CACHEDIR.TAG", .{}) catch return false;
|
||||||
defer f.close();
|
defer f.close();
|
||||||
var buf: [sig.len]u8 = undefined;
|
var buf: [sig.len]u8 = undefined;
|
||||||
const len = f.readAll(&buf) catch return false;
|
const len = f.reader().readAll(&buf) catch return false;
|
||||||
return len == sig.len and std.mem.eql(u8, &buf, sig);
|
return len == sig.len and std.mem.eql(u8, &buf, sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -184,7 +174,7 @@ const Thread = struct {
|
||||||
thread_num: usize,
|
thread_num: usize,
|
||||||
sink: *sink.Thread,
|
sink: *sink.Thread,
|
||||||
state: *State,
|
state: *State,
|
||||||
stack: std.ArrayListUnmanaged(*Dir) = .empty,
|
stack: std.ArrayList(*Dir) = std.ArrayList(*Dir).init(main.allocator),
|
||||||
thread: std.Thread = undefined,
|
thread: std.Thread = undefined,
|
||||||
namebuf: [4096]u8 = undefined,
|
namebuf: [4096]u8 = undefined,
|
||||||
|
|
||||||
|
|
@ -265,13 +255,13 @@ const Thread = struct {
|
||||||
const s = dir.sink.addDir(t.sink, name, &stat);
|
const s = dir.sink.addDir(t.sink, name, &stat);
|
||||||
const ndir = Dir.create(edir, stat.dev, dir.pat.enter(name), s);
|
const ndir = Dir.create(edir, stat.dev, dir.pat.enter(name), s);
|
||||||
if (main.config.threads == 1 or !t.state.tryPush(ndir))
|
if (main.config.threads == 1 or !t.state.tryPush(ndir))
|
||||||
t.stack.append(main.allocator, ndir) catch unreachable;
|
t.stack.append(ndir) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run(t: *Thread) void {
|
fn run(t: *Thread) void {
|
||||||
defer t.stack.deinit(main.allocator);
|
defer t.stack.deinit();
|
||||||
while (t.state.waitPop()) |dir| {
|
while (t.state.waitPop()) |dir| {
|
||||||
t.stack.append(main.allocator, dir) catch unreachable;
|
t.stack.append(dir) catch unreachable;
|
||||||
|
|
||||||
while (t.stack.items.len > 0) {
|
while (t.stack.items.len > 0) {
|
||||||
const d = t.stack.items[t.stack.items.len - 1];
|
const d = t.stack.items[t.stack.items.len - 1];
|
||||||
|
|
@ -286,7 +276,7 @@ const Thread = struct {
|
||||||
if (entry) |e| t.scanOne(d, e.name)
|
if (entry) |e| t.scanOne(d, e.name)
|
||||||
else {
|
else {
|
||||||
t.sink.setDir(null);
|
t.sink.setDir(null);
|
||||||
t.stack.pop().?.destroy(t);
|
t.stack.pop().destroy(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
60
src/sink.zig
60
src/sink.zig
|
|
@ -140,21 +140,20 @@ pub const Dir = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn path(d: *Dir) [:0]u8 {
|
fn path(d: *Dir) [:0]u8 {
|
||||||
var components: std.ArrayListUnmanaged([]const u8) = .empty;
|
var components = std.ArrayList([]const u8).init(main.allocator);
|
||||||
defer components.deinit(main.allocator);
|
defer components.deinit();
|
||||||
var it: ?*Dir = d;
|
var it: ?*Dir = d;
|
||||||
while (it) |e| : (it = e.parent) components.append(main.allocator, e.name) catch unreachable;
|
while (it) |e| : (it = e.parent) components.append(e.name) catch unreachable;
|
||||||
|
|
||||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
var out = std.ArrayList(u8).init(main.allocator);
|
||||||
var i: usize = components.items.len-1;
|
var i: usize = components.items.len-1;
|
||||||
while (true) {
|
while (true) {
|
||||||
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/'))
|
if (i != components.items.len-1 and !(out.items.len != 0 and out.items[out.items.len-1] == '/')) out.append('/') catch unreachable;
|
||||||
out.append(main.allocator, '/') catch unreachable;
|
out.appendSlice(components.items[i]) catch unreachable;
|
||||||
out.appendSlice(main.allocator, components.items[i]) catch unreachable;
|
|
||||||
if (i == 0) break;
|
if (i == 0) break;
|
||||||
i -= 1;
|
i -= 1;
|
||||||
}
|
}
|
||||||
return out.toOwnedSliceSentinel(main.allocator, 0) catch unreachable;
|
return out.toOwnedSliceSentinel(0) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ref(d: *Dir) void {
|
fn ref(d: *Dir) void {
|
||||||
|
|
@ -163,7 +162,7 @@ pub const Dir = struct {
|
||||||
|
|
||||||
pub fn unref(d: *Dir, t: *Thread) void {
|
pub fn unref(d: *Dir, t: *Thread) void {
|
||||||
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
if (d.refcnt.fetchSub(1, .release) != 1) return;
|
||||||
_ = d.refcnt.load(.acquire);
|
d.refcnt.fence(.acquire);
|
||||||
|
|
||||||
switch (d.out) {
|
switch (d.out) {
|
||||||
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
.mem => |*m| m.final(if (d.parent) |p| &p.out.mem else null),
|
||||||
|
|
@ -292,7 +291,7 @@ fn drawConsole() void {
|
||||||
var ansi: ?bool = null;
|
var ansi: ?bool = null;
|
||||||
var lines_written: usize = 0;
|
var lines_written: usize = 0;
|
||||||
};
|
};
|
||||||
const stderr = if (@hasDecl(std.io, "getStdErr")) std.io.getStdErr() else std.fs.File.stderr();
|
const stderr = std.io.getStdErr();
|
||||||
const ansi = st.ansi orelse blk: {
|
const ansi = st.ansi orelse blk: {
|
||||||
const t = stderr.supportsAnsiEscapeCodes();
|
const t = stderr.supportsAnsiEscapeCodes();
|
||||||
st.ansi = t;
|
st.ansi = t;
|
||||||
|
|
@ -451,28 +450,25 @@ pub fn draw() void {
|
||||||
switch (main.config.scan_ui.?) {
|
switch (main.config.scan_ui.?) {
|
||||||
.none => {},
|
.none => {},
|
||||||
.line => drawConsole(),
|
.line => drawConsole(),
|
||||||
.full => {
|
.full => switch (global.state) {
|
||||||
ui.init();
|
.done => {},
|
||||||
switch (global.state) {
|
.err => drawError(),
|
||||||
.done => {},
|
.zeroing => {
|
||||||
.err => drawError(),
|
const box = ui.Box.create(4, ui.cols -| 5, "Initializing");
|
||||||
.zeroing => {
|
box.move(2, 2);
|
||||||
const box = ui.Box.create(4, ui.cols -| 5, "Initializing");
|
ui.addstr("Clearing directory counts...");
|
||||||
box.move(2, 2);
|
},
|
||||||
ui.addstr("Clearing directory counts...");
|
.hlcnt => {
|
||||||
},
|
const box = ui.Box.create(4, ui.cols -| 5, "Finalizing");
|
||||||
.hlcnt => {
|
box.move(2, 2);
|
||||||
const box = ui.Box.create(4, ui.cols -| 5, "Finalizing");
|
ui.addstr("Counting hardlinks... ");
|
||||||
box.move(2, 2);
|
if (model.inodes.add_total > 0) {
|
||||||
ui.addstr("Counting hardlinks... ");
|
ui.addnum(.default, model.inodes.add_done);
|
||||||
if (model.inodes.add_total > 0) {
|
ui.addstr(" / ");
|
||||||
ui.addnum(.default, model.inodes.add_done);
|
ui.addnum(.default, model.inodes.add_total);
|
||||||
ui.addstr(" / ");
|
}
|
||||||
ui.addnum(.default, model.inodes.add_total);
|
},
|
||||||
}
|
.running => drawProgress(),
|
||||||
},
|
|
||||||
.running => drawProgress(),
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
109
src/ui.zig
109
src/ui.zig
|
|
@ -6,7 +6,16 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const main = @import("main.zig");
|
const main = @import("main.zig");
|
||||||
const util = @import("util.zig");
|
const util = @import("util.zig");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
pub const c = @cImport({
|
||||||
|
@cDefine("_XOPEN_SOURCE", "1");
|
||||||
|
@cInclude("stdio.h");
|
||||||
|
@cInclude("string.h");
|
||||||
|
@cInclude("curses.h");
|
||||||
|
@cInclude("time.h");
|
||||||
|
@cInclude("wchar.h");
|
||||||
|
@cInclude("locale.h");
|
||||||
|
});
|
||||||
|
|
||||||
pub var inited: bool = false;
|
pub var inited: bool = false;
|
||||||
pub var main_thread: std.Thread.Id = undefined;
|
pub var main_thread: std.Thread.Id = undefined;
|
||||||
|
|
@ -17,7 +26,8 @@ pub var cols: u32 = undefined;
|
||||||
|
|
||||||
pub fn die(comptime fmt: []const u8, args: anytype) noreturn {
|
pub fn die(comptime fmt: []const u8, args: anytype) noreturn {
|
||||||
deinit();
|
deinit();
|
||||||
std.debug.print(fmt, args);
|
const stderr = std.io.getStdErr();
|
||||||
|
stderr.writer().print(fmt, args) catch {};
|
||||||
std.process.exit(1);
|
std.process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -26,8 +36,6 @@ pub fn quit() noreturn {
|
||||||
std.process.exit(0);
|
std.process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const sleep = if (@hasDecl(std.time, "sleep")) std.time.sleep else std.Thread.sleep;
|
|
||||||
|
|
||||||
// Should be called when malloc fails. Will show a message to the user, wait
|
// Should be called when malloc fails. Will show a message to the user, wait
|
||||||
// for a second and return to give it another try.
|
// for a second and return to give it another try.
|
||||||
// Glitch: this function may be called while we're in the process of drawing
|
// Glitch: this function may be called while we're in the process of drawing
|
||||||
|
|
@ -38,17 +46,18 @@ const sleep = if (@hasDecl(std.time, "sleep")) std.time.sleep else std.Thread.sl
|
||||||
// no clue if ncurses will consistently report OOM, but we're not handling that
|
// no clue if ncurses will consistently report OOM, but we're not handling that
|
||||||
// right now.
|
// right now.
|
||||||
pub fn oom() void {
|
pub fn oom() void {
|
||||||
@branchHint(.cold);
|
@setCold(true);
|
||||||
if (main_thread == std.Thread.getCurrentId()) {
|
if (main_thread == std.Thread.getCurrentId()) {
|
||||||
const haveui = inited;
|
const haveui = inited;
|
||||||
deinit();
|
deinit();
|
||||||
std.debug.print("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8", .{});
|
const stderr = std.io.getStdErr();
|
||||||
sleep(std.time.ns_per_s);
|
stderr.writeAll("\x1b7\x1b[JOut of memory, trying again in 1 second. Hit Ctrl-C to abort.\x1b8") catch {};
|
||||||
|
std.time.sleep(std.time.ns_per_s);
|
||||||
if (haveui)
|
if (haveui)
|
||||||
init();
|
init();
|
||||||
} else {
|
} else {
|
||||||
_ = oom_threads.fetchAdd(1, .monotonic);
|
_ = oom_threads.fetchAdd(1, .monotonic);
|
||||||
sleep(std.time.ns_per_s);
|
std.time.sleep(std.time.ns_per_s);
|
||||||
_ = oom_threads.fetchSub(1, .monotonic);
|
_ = oom_threads.fetchSub(1, .monotonic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -80,7 +89,7 @@ pub fn errorString(e: anyerror) [:0]const u8 {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
var to_utf8_buf: std.ArrayListUnmanaged(u8) = .empty;
|
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
|
||||||
|
|
||||||
fn toUtf8BadChar(ch: u8) bool {
|
fn toUtf8BadChar(ch: u8) bool {
|
||||||
return switch (ch) {
|
return switch (ch) {
|
||||||
|
|
@ -107,19 +116,19 @@ pub fn toUtf8(in: [:0]const u8) [:0]const u8 {
|
||||||
if (std.unicode.utf8ByteSequenceLength(in[i])) |cp_len| {
|
if (std.unicode.utf8ByteSequenceLength(in[i])) |cp_len| {
|
||||||
if (!toUtf8BadChar(in[i]) and i + cp_len <= in.len) {
|
if (!toUtf8BadChar(in[i]) and i + cp_len <= in.len) {
|
||||||
if (std.unicode.utf8Decode(in[i .. i + cp_len])) |_| {
|
if (std.unicode.utf8Decode(in[i .. i + cp_len])) |_| {
|
||||||
to_utf8_buf.appendSlice(main.allocator, in[i .. i + cp_len]) catch unreachable;
|
to_utf8_buf.appendSlice(in[i .. i + cp_len]) catch unreachable;
|
||||||
i += cp_len;
|
i += cp_len;
|
||||||
continue;
|
continue;
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
to_utf8_buf.writer(main.allocator).print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
to_utf8_buf.writer().print("\\x{X:0>2}", .{in[i]}) catch unreachable;
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
return util.arrayListBufZ(&to_utf8_buf, main.allocator);
|
return util.arrayListBufZ(&to_utf8_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
var shorten_buf: std.ArrayListUnmanaged(u8) = .empty;
|
var shorten_buf = std.ArrayList(u8).init(main.allocator);
|
||||||
|
|
||||||
// Shorten the given string to fit in the given number of columns.
|
// Shorten the given string to fit in the given number of columns.
|
||||||
// If the string is too long, only the prefix and suffix will be printed, with '...' in between.
|
// If the string is too long, only the prefix and suffix will be printed, with '...' in between.
|
||||||
|
|
@ -150,8 +159,8 @@ pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
||||||
if (total_width <= max_width) return in;
|
if (total_width <= max_width) return in;
|
||||||
|
|
||||||
shorten_buf.shrinkRetainingCapacity(0);
|
shorten_buf.shrinkRetainingCapacity(0);
|
||||||
shorten_buf.appendSlice(main.allocator, in[0..prefix_end]) catch unreachable;
|
shorten_buf.appendSlice(in[0..prefix_end]) catch unreachable;
|
||||||
shorten_buf.appendSlice(main.allocator, "...") catch unreachable;
|
shorten_buf.appendSlice("...") catch unreachable;
|
||||||
|
|
||||||
var start_width: u32 = prefix_width;
|
var start_width: u32 = prefix_width;
|
||||||
var start_len: u32 = prefix_end;
|
var start_len: u32 = prefix_end;
|
||||||
|
|
@ -163,11 +172,11 @@ pub fn shorten(in: [:0]const u8, max_width: u32) [:0] const u8 {
|
||||||
start_width += cp_width;
|
start_width += cp_width;
|
||||||
start_len += cp_len;
|
start_len += cp_len;
|
||||||
if (total_width - start_width <= max_width - prefix_width - 3) {
|
if (total_width - start_width <= max_width - prefix_width - 3) {
|
||||||
shorten_buf.appendSlice(main.allocator, in[start_len..]) catch unreachable;
|
shorten_buf.appendSlice(in[start_len..]) catch unreachable;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return util.arrayListBufZ(&shorten_buf, main.allocator);
|
return util.arrayListBufZ(&shorten_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn shortenTest(in: [:0]const u8, max_width: u32, out: [:0]const u8) !void {
|
fn shortenTest(in: [:0]const u8, max_width: u32, out: [:0]const u8) !void {
|
||||||
|
|
@ -288,7 +297,7 @@ pub const Style = lbl: {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
break :lbl @Type(.{
|
break :lbl @Type(.{
|
||||||
.@"enum" = .{
|
.Enum = .{
|
||||||
.tag_type = u8,
|
.tag_type = u8,
|
||||||
.fields = &fields,
|
.fields = &fields,
|
||||||
.decls = &[_]std.builtin.Type.Declaration{},
|
.decls = &[_]std.builtin.Type.Declaration{},
|
||||||
|
|
@ -335,7 +344,8 @@ fn updateSize() void {
|
||||||
fn clearScr() void {
|
fn clearScr() void {
|
||||||
// Send a "clear from cursor to end of screen" instruction, to clear a
|
// Send a "clear from cursor to end of screen" instruction, to clear a
|
||||||
// potential line left behind from scanning in -1 mode.
|
// potential line left behind from scanning in -1 mode.
|
||||||
std.debug.print("\x1b[J", .{});
|
const stderr = std.io.getStdErr();
|
||||||
|
stderr.writeAll("\x1b[J") catch {};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init() void {
|
pub fn init() void {
|
||||||
|
|
@ -418,7 +428,7 @@ pub const FmtSize = struct {
|
||||||
pub fn fmt(v: u64) FmtSize {
|
pub fn fmt(v: u64) FmtSize {
|
||||||
if (main.config.si) {
|
if (main.config.si) {
|
||||||
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
if (v < 1000) { return FmtSize.init(" B", v, 10, 1); }
|
||||||
else if (v < 999_950) { return FmtSize.init(" kB", v, 1, 100); }
|
else if (v < 999_950) { return FmtSize.init(" KB", v, 1, 100); }
|
||||||
else if (v < 999_950_000) { return FmtSize.init(" MB", v, 1, 100_000); }
|
else if (v < 999_950_000) { return FmtSize.init(" MB", v, 1, 100_000); }
|
||||||
else if (v < 999_950_000_000) { return FmtSize.init(" GB", v, 1, 100_000_000); }
|
else if (v < 999_950_000_000) { return FmtSize.init(" GB", v, 1, 100_000_000); }
|
||||||
else if (v < 999_950_000_000_000) { return FmtSize.init(" TB", v, 1, 100_000_000_000); }
|
else if (v < 999_950_000_000_000) { return FmtSize.init(" TB", v, 1, 100_000_000_000); }
|
||||||
|
|
@ -451,11 +461,11 @@ test "fmtsize" {
|
||||||
main.config.si = true;
|
main.config.si = true;
|
||||||
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
try FmtSize.fmt( 0).testEql(" 0.0 B");
|
||||||
try FmtSize.fmt( 999).testEql("999.0 B");
|
try FmtSize.fmt( 999).testEql("999.0 B");
|
||||||
try FmtSize.fmt( 1000).testEql(" 1.0 kB");
|
try FmtSize.fmt( 1000).testEql(" 1.0 KB");
|
||||||
try FmtSize.fmt( 1049).testEql(" 1.0 kB");
|
try FmtSize.fmt( 1049).testEql(" 1.0 KB");
|
||||||
try FmtSize.fmt( 1050).testEql(" 1.1 kB");
|
try FmtSize.fmt( 1050).testEql(" 1.1 KB");
|
||||||
try FmtSize.fmt( 999_899).testEql("999.9 kB");
|
try FmtSize.fmt( 999_899).testEql("999.9 KB");
|
||||||
try FmtSize.fmt( 999_949).testEql("999.9 kB");
|
try FmtSize.fmt( 999_949).testEql("999.9 KB");
|
||||||
try FmtSize.fmt( 999_950).testEql(" 1.0 MB");
|
try FmtSize.fmt( 999_950).testEql(" 1.0 MB");
|
||||||
try FmtSize.fmt( 1000_000).testEql(" 1.0 MB");
|
try FmtSize.fmt( 1000_000).testEql(" 1.0 MB");
|
||||||
try FmtSize.fmt( 999_850_009).testEql("999.9 MB");
|
try FmtSize.fmt( 999_850_009).testEql("999.9 MB");
|
||||||
|
|
@ -633,7 +643,7 @@ pub fn getch(block: bool) i32 {
|
||||||
}
|
}
|
||||||
if (ch == c.ERR) {
|
if (ch == c.ERR) {
|
||||||
if (!block) return 0;
|
if (!block) return 0;
|
||||||
sleep(10*std.time.ns_per_ms);
|
std.time.sleep(10*std.time.ns_per_ms);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
return ch;
|
return ch;
|
||||||
|
|
@ -641,50 +651,3 @@ pub fn getch(block: bool) i32 {
|
||||||
die("Error reading keyboard input, assuming TTY has been lost.\n(Potentially nonsensical error message: {s})\n",
|
die("Error reading keyboard input, assuming TTY has been lost.\n(Potentially nonsensical error message: {s})\n",
|
||||||
.{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
.{ c.strerror(@intFromEnum(std.posix.errno(-1))) });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn waitInput() void {
|
|
||||||
if (@hasDecl(std.io, "getStdIn")) {
|
|
||||||
std.io.getStdIn().reader().skipUntilDelimiterOrEof('\n') catch unreachable;
|
|
||||||
} else {
|
|
||||||
var buf: [512]u8 = undefined;
|
|
||||||
var rd = std.fs.File.stdin().reader(&buf);
|
|
||||||
_ = rd.interface.discardDelimiterExclusive('\n') catch unreachable;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn runCmd(cmd: []const []const u8, cwd: ?[]const u8, env: *std.process.EnvMap, reporterr: bool) void {
|
|
||||||
deinit();
|
|
||||||
defer init();
|
|
||||||
|
|
||||||
// NCDU_LEVEL can only count to 9, keeps the implementation simple.
|
|
||||||
if (env.get("NCDU_LEVEL")) |l|
|
|
||||||
env.put("NCDU_LEVEL", if (l.len == 0) "1" else switch (l[0]) {
|
|
||||||
'0'...'8' => |d| &[1] u8{d+1},
|
|
||||||
'9' => "9",
|
|
||||||
else => "1"
|
|
||||||
}) catch unreachable
|
|
||||||
else
|
|
||||||
env.put("NCDU_LEVEL", "1") catch unreachable;
|
|
||||||
|
|
||||||
var child = std.process.Child.init(cmd, main.allocator);
|
|
||||||
child.cwd = cwd;
|
|
||||||
child.env_map = env;
|
|
||||||
|
|
||||||
const term = child.spawnAndWait() catch |e| blk: {
|
|
||||||
std.debug.print("Error running command: {s}\n\nPress enter to continue.\n", .{ ui.errorString(e) });
|
|
||||||
waitInput();
|
|
||||||
break :blk std.process.Child.Term{ .Exited = 0 };
|
|
||||||
};
|
|
||||||
|
|
||||||
const n = switch (term) {
|
|
||||||
.Exited => "error",
|
|
||||||
.Signal => "signal",
|
|
||||||
.Stopped => "stopped",
|
|
||||||
.Unknown => "unknown",
|
|
||||||
};
|
|
||||||
const v = switch (term) { inline else => |v| v };
|
|
||||||
if (term != .Exited or (reporterr and v != 0)) {
|
|
||||||
std.debug.print("\nCommand returned with {s} code {}.\nPress enter to continue.\n", .{ n, v });
|
|
||||||
waitInput();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
85
src/util.zig
85
src/util.zig
|
|
@ -2,7 +2,6 @@
|
||||||
// SPDX-License-Identifier: MIT
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const c = @import("c.zig").c;
|
|
||||||
|
|
||||||
// Cast any integer type to the target type, clamping the value to the supported maximum if necessary.
|
// Cast any integer type to the target type, clamping the value to the supported maximum if necessary.
|
||||||
pub fn castClamp(comptime T: type, x: anytype) T {
|
pub fn castClamp(comptime T: type, x: anytype) T {
|
||||||
|
|
@ -18,8 +17,8 @@ pub fn castClamp(comptime T: type, x: anytype) T {
|
||||||
|
|
||||||
// Cast any integer type to the target type, truncating if necessary.
|
// Cast any integer type to the target type, truncating if necessary.
|
||||||
pub fn castTruncate(comptime T: type, x: anytype) T {
|
pub fn castTruncate(comptime T: type, x: anytype) T {
|
||||||
const Ti = @typeInfo(T).int;
|
const Ti = @typeInfo(T).Int;
|
||||||
const Xi = @typeInfo(@TypeOf(x)).int;
|
const Xi = @typeInfo(@TypeOf(x)).Int;
|
||||||
const nx: std.meta.Int(Ti.signedness, Xi.bits) = @bitCast(x);
|
const nx: std.meta.Int(Ti.signedness, Xi.bits) = @bitCast(x);
|
||||||
return if (Xi.bits > Ti.bits) @truncate(nx) else nx;
|
return if (Xi.bits > Ti.bits) @truncate(nx) else nx;
|
||||||
}
|
}
|
||||||
|
|
@ -32,8 +31,8 @@ pub fn blocksToSize(b: u64) u64 {
|
||||||
// Ensure the given arraylist buffer gets zero-terminated and returns a slice
|
// Ensure the given arraylist buffer gets zero-terminated and returns a slice
|
||||||
// into the buffer. The returned buffer is invalidated whenever the arraylist
|
// into the buffer. The returned buffer is invalidated whenever the arraylist
|
||||||
// is freed or written to.
|
// is freed or written to.
|
||||||
pub fn arrayListBufZ(buf: *std.ArrayListUnmanaged(u8), alloc: std.mem.Allocator) [:0]const u8 {
|
pub fn arrayListBufZ(buf: *std.ArrayList(u8)) [:0]const u8 {
|
||||||
buf.append(alloc, 0) catch unreachable;
|
buf.append(0) catch unreachable;
|
||||||
defer buf.items.len -= 1;
|
defer buf.items.len -= 1;
|
||||||
return buf.items[0..buf.items.len-1:0];
|
return buf.items[0..buf.items.len-1:0];
|
||||||
}
|
}
|
||||||
|
|
@ -171,79 +170,3 @@ test "strnatcmp" {
|
||||||
for (i+1..w.len) |j| try eq(strnatcmp(w[i], w[j]), .lt);
|
for (i+1..w.len) |j| try eq(strnatcmp(w[i], w[j]), .lt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn expanduser(path: []const u8, alloc: std.mem.Allocator) ![:0]u8 {
|
|
||||||
if (path.len == 0 or path[0] != '~') return alloc.dupeZ(u8, path);
|
|
||||||
|
|
||||||
const len = std.mem.indexOfScalar(u8, path, '/') orelse path.len;
|
|
||||||
const home_raw = blk: {
|
|
||||||
const pwd = pwd: {
|
|
||||||
if (len == 1) {
|
|
||||||
if (std.posix.getenvZ("HOME")) |p| break :blk p;
|
|
||||||
break :pwd c.getpwuid(c.getuid());
|
|
||||||
} else {
|
|
||||||
const name = try alloc.dupeZ(u8, path[1..len]);
|
|
||||||
defer alloc.free(name);
|
|
||||||
break :pwd c.getpwnam(name.ptr);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if (pwd != null)
|
|
||||||
if (@as(*c.struct_passwd, pwd).pw_dir) |p|
|
|
||||||
break :blk std.mem.span(p);
|
|
||||||
return alloc.dupeZ(u8, path);
|
|
||||||
};
|
|
||||||
const home = std.mem.trimRight(u8, home_raw, "/");
|
|
||||||
|
|
||||||
if (home.len == 0 and path.len == len) return alloc.dupeZ(u8, "/");
|
|
||||||
return try std.mem.concatWithSentinel(alloc, u8, &.{ home, path[len..] }, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Silly abstraction to read a file one line at a time. Only exists to help
|
|
||||||
// with supporting both Zig 0.14 and 0.15, can be removed once 0.14 support is
|
|
||||||
// dropped.
|
|
||||||
pub const LineReader = if (@hasDecl(std.io, "bufferedReader")) struct {
|
|
||||||
rd: std.io.BufferedReader(4096, std.fs.File.Reader),
|
|
||||||
fbs: std.io.FixedBufferStream([]u8),
|
|
||||||
|
|
||||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
|
||||||
return .{
|
|
||||||
.rd = std.io.bufferedReader(f.reader()),
|
|
||||||
.fbs = std.io.fixedBufferStream(buf),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read(s: *@This()) !?[]u8 {
|
|
||||||
s.fbs.reset();
|
|
||||||
s.rd.reader().streamUntilDelimiter(s.fbs.writer(), '\n', s.fbs.buffer.len) catch |err| switch (err) {
|
|
||||||
error.EndOfStream => if (s.fbs.getPos() catch unreachable == 0) return null,
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
return s.fbs.getWritten();
|
|
||||||
}
|
|
||||||
|
|
||||||
} else struct {
|
|
||||||
rd: std.fs.File.Reader,
|
|
||||||
|
|
||||||
pub fn init(f: std.fs.File, buf: []u8) @This() {
|
|
||||||
return .{ .rd = f.readerStreaming(buf) };
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read(s: *@This()) !?[]u8 {
|
|
||||||
// Can't use takeDelimiter() because that's not available in 0.15.1,
|
|
||||||
// Can't use takeDelimiterExclusive() because that changed behavior in 0.15.2.
|
|
||||||
const r = &s.rd.interface;
|
|
||||||
const result = r.peekDelimiterInclusive('\n') catch |err| switch (err) {
|
|
||||||
error.EndOfStream => {
|
|
||||||
const remaining = r.buffer[r.seek..r.end];
|
|
||||||
if (remaining.len == 0) return null;
|
|
||||||
r.toss(remaining.len);
|
|
||||||
return remaining;
|
|
||||||
},
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
r.toss(result.len);
|
|
||||||
return result[0 .. result.len - 1];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue