mirror of
https://code.blicky.net/yorhel/ncdu.git
synced 2026-01-13 01:08:41 -09:00
Add (temporary) compression support for the new export format
This is mainly for testing and benchmarking, I plan to choose a single
block size and compression library before release, to avoid bloating the
ncdu binary too much.
Currently this links against the system-provided zstd, zlib and lz4.
ncdubinexp.pl doesn't support compressed files yet.
Early benchmarks of `ncdu -f firefox-128.0.json` (407k files) with
different block sizes and compression options:
bin8k bin16k bin32k bin64k bin128k bin256k bin512k json
algo size time size time size time size time size time size time size time size time
none 16800 128 16760 126 16739 125 16728 124 16724 125 16722 124 16721 124 24835 127
lz4 7844 143 7379 141 7033 140 6779 140 6689 138 6626 139 6597 139 5850 179
zlib-1 6017 377 5681 310 5471 273 5345 262 5289 259 5257 256 5242 255 4415 164
zlib-2 5843 386 5496 319 5273 284 5136 276 5072 271 5037 270 5020 268 4164 168
zlib-3 5718 396 5361 339 5130 316 4977 321 4903 318 4862 324 4842 319 3976 196
zlib-4 5536 424 5153 372 4903 341 4743 339 4665 338 4625 340 4606 336 3798 212
zlib-5 5393 464 4993 419 4731 406 4561 414 4478 422 4434 426 4414 420 3583 261
zlib-6 5322 516 4902 495 4628 507 4450 535 4364 558 4318 566 4297 564 3484 352
zlib-7 5311 552 4881 559 4599 601 4417 656 4329 679 4282 696 4260 685 3393 473
zlib-8 5305 588 4864 704 4568 1000 4374 1310 4280 1470 4230 1530 4206 1550 3315 1060
zlib-9 5305 589 4864 704 4568 1030 4374 1360 4280 1510 4230 1600 4206 1620 3312 1230
zstd-1 5845 177 5426 169 5215 165 5030 160 4921 156 4774 157 4788 153 3856 126
zstd-2 5830 178 5424 170 5152 164 4963 161 4837 160 4595 162 4614 158 3820 134
zstd-3 5683 187 5252 177 5017 172 4814 168 4674 169 4522 169 4446 170 3664 145
zstd-4 5492 235 5056 230 4966 173 4765 170 4628 169 4368 222 4437 170 3656 145
zstd-5 5430 270 4988 266 4815 234 4616 229 4485 224 4288 241 4258 223 3366 189
zstd-6 5375 323 4928 322 4694 282 4481 279 4334 276 4231 275 4125 271 3234 235
zstd-7 5322 400 4866 420 4678 319 4464 314 4315 312 4155 300 4078 295 3173 269
zstd-8 5314 454 4848 689 4636 344 4420 346 4270 345 4137 350 4060 342 3082 330
zstd-9 5320 567 4854 615 4596 392 4379 398 4228 401 4095 408 4060 345 3057 385
zstd-10 5319 588 4852 662 4568 458 4350 466 4198 478 4066 491 4024 395 3005 489
zstd-11 5310 975 4857 1040 4543 643 4318 688 4164 743 4030 803 3999 476 2967 627
zstd-12 5171 1300 4692 1390 4539 699 4313 765 4154 854 4018 939 3999 478 2967 655
zstd-13 5128 1760 4652 1880 4556 1070 4341 1130 4184 1230 3945 1490 3980 705 2932 1090
zstd-14 5118 2040 4641 2180 4366 1540 4141 1620 3977 1780 3854 1810 3961 805 2893 1330
mzstd-1 5845 206 5426 195 5215 188 5030 180 4921 176 4774 175 4788 172
mzstd-2 5830 207 5424 196 5152 186 4963 183 4837 181 4765 178 4614 176
mzstd-3 5830 207 5424 196 5150 187 4960 183 4831 180 4796 181 4626 180
mzstd-4 5830 206 5427 196 5161 188 4987 185 4879 182 4714 180 4622 179
mzstd-5 5430 347 4988 338 5161 189 4987 185 4879 181 4711 180 4620 180
mzstd-6 5384 366 4939 359 4694 390 4481 391 4334 383 4231 399 4125 394
mzstd-7 5328 413 4873 421 4694 390 4481 390 4334 385 4155 442 4078 435
mzstd-8 5319 447 4854 577 4649 417 4434 421 4286 419 4155 440 4078 436
mzstd-9 5349 386 4900 385 4606 469 4390 478 4241 478 4110 506 4078 436
mzstd-10 5319 448 4853 597 4576 539 4360 560 4210 563 4079 597 4039 502
mzstd-11 5430 349 4988 339 4606 468 4390 478 4241 478 4110 506 4013 590
mzstd-12 5384 366 4939 361 4576 540 4360 556 4210 559 4079 597 4013 589
mzstd-13 5349 387 4900 388 4694 390 4481 392 4334 386 4155 439 4078 436
mzstd-14 5328 414 4873 420 4649 417 4434 424 4286 420 4155 444 4039 500
I'll need to do benchmarks on other directories, with hardlink support
and in extended mode as well to get more varied samples.
Another consideration in choosing a compression library is the size of
its implementation:
zlib: 100k
lz4: 106k
zstd: 732k (regular), 165k (ZSTD_LIB_MINIFY, "mzstd" above)
This commit is contained in:
parent
f25bc5cbf4
commit
ebaa9b6a89
3 changed files with 59 additions and 9 deletions
|
|
@ -19,6 +19,9 @@ pub fn build(b: *std.Build) void {
|
|||
|
||||
exe.pie = pie;
|
||||
exe.root_module.linkSystemLibrary("ncursesw", .{});
|
||||
exe.root_module.linkSystemLibrary("zlib", .{});
|
||||
exe.root_module.linkSystemLibrary("libzstd", .{});
|
||||
exe.root_module.linkSystemLibrary("lz4", .{});
|
||||
// https://github.com/ziglang/zig/blob/b52be973dfb7d1408218b8e75800a2da3dc69108/build.zig#L551-L554
|
||||
if (target.result.isDarwin()) {
|
||||
// useful for package maintainers
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@ const main = @import("main.zig");
|
|||
const sink = @import("sink.zig");
|
||||
const util = @import("util.zig");
|
||||
const ui = @import("ui.zig");
|
||||
const c = @cImport({
|
||||
@cInclude("zlib.h");
|
||||
@cInclude("zstd.h");
|
||||
@cInclude("lz4.h");
|
||||
});
|
||||
|
||||
pub const global = struct {
|
||||
var fd: std.fs.File = undefined;
|
||||
|
|
@ -17,7 +22,7 @@ pub const global = struct {
|
|||
// var links: Map dev -> ino -> (last_offset, size, blocks, nlink)
|
||||
};
|
||||
|
||||
const BLOCK_SIZE: usize = 64*1024;
|
||||
const BLOCK_SIZE: usize = 512*1024; // XXX: Current maximum for benchmarking, should just stick with a fixed block size.
|
||||
|
||||
const ItemType = enum(i3) {
|
||||
dir = 0,
|
||||
|
|
@ -59,7 +64,7 @@ const ItemKey = enum(u5) {
|
|||
};
|
||||
|
||||
// Pessimistic upper bound on the encoded size of an item, excluding the name field.
|
||||
// 2 bytes for map start/end, 10 per field (2 for the key, 9 for a full u64).
|
||||
// 2 bytes for map start/end, 11 per field (2 for the key, 9 for a full u64).
|
||||
const MAX_ITEM_LEN = 2 + 11 * @typeInfo(ItemKey).Enum.fields.len;
|
||||
|
||||
const CborMajor = enum(u3) { pos, neg, bytes, text, array, map, tag, simple };
|
||||
|
|
@ -79,19 +84,48 @@ pub const Thread = struct {
|
|||
block_num: u32 = std.math.maxInt(u32),
|
||||
itemref: u64 = 0, // ref of item currently being written
|
||||
|
||||
// Temporary buffer for headers and compression
|
||||
// Temporary buffer for headers and compression.
|
||||
// TODO: check with compressBound()/ZSTD_compressBound()
|
||||
tmp: [BLOCK_SIZE+128]u8 = undefined,
|
||||
|
||||
fn compressNone(in: []const u8, out: []u8) usize {
|
||||
@memcpy(out[0..in.len], in);
|
||||
return in.len;
|
||||
}
|
||||
|
||||
fn compressZlib(in: []const u8, out: []u8) usize {
|
||||
var outlen: c.uLongf = out.len;
|
||||
const r = c.compress2(out.ptr, &outlen, in.ptr, in.len, main.config.complevel);
|
||||
std.debug.assert(r == c.Z_OK);
|
||||
return outlen;
|
||||
}
|
||||
|
||||
fn compressZstd(in: []const u8, out: []u8) usize {
|
||||
const r = c.ZSTD_compress(out.ptr, out.len, in.ptr, in.len, main.config.complevel);
|
||||
std.debug.assert(c.ZSTD_isError(r) == 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
fn compressLZ4(in: []const u8, out: []u8) usize {
|
||||
const r = c.LZ4_compress_default(in.ptr, out.ptr, @intCast(in.len), @intCast(out.len));
|
||||
std.debug.assert(r > 0);
|
||||
return @intCast(r);
|
||||
}
|
||||
|
||||
fn createBlock(t: *Thread) []const u8 {
|
||||
if (t.block_num == std.math.maxInt(u32) or t.off <= 1) return "";
|
||||
|
||||
// TODO: Compression
|
||||
const blocklen: u32 = @intCast(t.off + 16);
|
||||
const bodylen = switch (main.config.compression) {
|
||||
.none => compressNone(t.buf[0..t.off], t.tmp[12..]),
|
||||
.zlib => compressZlib(t.buf[0..t.off], t.tmp[12..]),
|
||||
.zstd => compressZstd(t.buf[0..t.off], t.tmp[12..]),
|
||||
.lz4 => compressLZ4(t.buf[0..t.off], t.tmp[12..]),
|
||||
};
|
||||
const blocklen: u32 = @intCast(bodylen + 16);
|
||||
t.tmp[0..4].* = blockHeader(1, blocklen);
|
||||
t.tmp[4..8].* = bigu32(t.block_num);
|
||||
t.tmp[8..12].* = bigu32(@intCast(t.off));
|
||||
@memcpy(t.tmp[12..][0..t.off], t.buf[0..t.off]);
|
||||
t.tmp[12+t.off..][0..4].* = blockHeader(1, blocklen);
|
||||
t.tmp[12+bodylen..][0..4].* = blockHeader(1, blocklen);
|
||||
return t.tmp[0..blocklen];
|
||||
}
|
||||
|
||||
|
|
@ -164,7 +198,7 @@ pub const Thread = struct {
|
|||
// Reserve space for a new item, write out the type, prev and name fields and return the itemref.
|
||||
fn itemStart(t: *Thread, itype: ItemType, prev_item: u64, name: []const u8) u64 {
|
||||
const min_len = name.len + MAX_ITEM_LEN;
|
||||
if (t.off + min_len > t.buf.len) t.flush(min_len);
|
||||
if (t.off + min_len > main.config.blocksize) t.flush(min_len);
|
||||
|
||||
t.itemref = (@as(u64, t.block_num) << 24) | t.off;
|
||||
t.cborIndef(.map);
|
||||
|
|
|
|||
15
src/main.zig
15
src/main.zig
|
|
@ -70,6 +70,9 @@ pub const config = struct {
|
|||
pub var exclude_kernfs: bool = false;
|
||||
pub var exclude_patterns: std.ArrayList([:0]const u8) = std.ArrayList([:0]const u8).init(allocator);
|
||||
pub var threads: usize = 1;
|
||||
pub var compression: enum { none, zlib, zstd, lz4 } = .none;
|
||||
pub var complevel: u8 = 5;
|
||||
pub var blocksize: usize = 64*1024;
|
||||
|
||||
pub var update_delay: u64 = 100*std.time.ns_per_ms;
|
||||
pub var scan_ui: ?enum { none, line, full } = null;
|
||||
|
|
@ -502,7 +505,17 @@ pub fn main() void {
|
|||
else if (opt.is("-f")) import_file = allocator.dupeZ(u8, args.arg()) catch unreachable
|
||||
else if (opt.is("--ignore-config")) {}
|
||||
else if (opt.is("--quit-after-scan")) quit_after_scan = true // undocumented feature to help with benchmarking scan/import
|
||||
else if (argConfig(&args, opt)) {}
|
||||
else if (opt.is("--binfmt")) { // Experimental, for benchmarking
|
||||
const a = args.arg();
|
||||
config.compression = switch (a[0]) {
|
||||
'z' => .zlib,
|
||||
's','S' => .zstd,
|
||||
'l' => .lz4,
|
||||
else => .none,
|
||||
};
|
||||
config.complevel = (a[1] - '0') + (if (a[0] == 'S') @as(u8, 10) else 0);
|
||||
config.blocksize = @as(usize, 8*1024) << @intCast(a[2] - '0'); // 0 = 8k, 1 16k, 2 32k, 3 64k, 4 128k, 5 256k, 6 512k
|
||||
} else if (argConfig(&args, opt)) {}
|
||||
else ui.die("Unrecognized option '{s}'.\n", .{opt.val});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue