From 29f763dde1d61e88234c6a2dcca3115dbbab444e Mon Sep 17 00:00:00 2001 From: Karchnu Date: Sat, 5 Dec 2020 16:48:55 +0100 Subject: [PATCH] Better display. --- src/test-tokenizer.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/test-tokenizer.zig b/src/test-tokenizer.zig index 0540eaa..4d3cf6b 100644 --- a/src/test-tokenizer.zig +++ b/src/test-tokenizer.zig @@ -19,15 +19,15 @@ fn nextArg(args: [][]const u8, idx: *usize) ?[]const u8 { // Get all tokens from the input. -fn getAllTokens(allocator: *mem.Allocator, source: []const u8) !std.ArrayList(lexer.Token.Id) { +fn getAllTokens(allocator: *mem.Allocator, source: []const u8) !std.ArrayList(lexer.Token) { // Getting the tokenizer, initialized with the source code we want to check. var tokenizer = lexer.Tokenizer.init(source); - var list = std.ArrayList(lexer.Token.Id).init(allocator); + var list = std.ArrayList(lexer.Token).init(allocator); while(true) { const token = tokenizer.next(); - try list.append(token.id); + try list.append(token); if(token.id == .Eof) break; } @@ -76,8 +76,8 @@ pub fn lexer_analyze() !void { // Get the file size and allocate memory. const tokens = try getAllTokens(allocator, content); - for(tokens.items) |tokenid| { - print("token: {}\n", .{@tagName(tokenid)}); + for(tokens.items) |token| { + print("{s:20} => {}\n", .{@tagName(token.id), buffer[token.loc.start..token.loc.end]}); } }