diff --git a/src/test-tokenizer.zig b/src/test-tokenizer.zig index 0540eaa..4d3cf6b 100644 --- a/src/test-tokenizer.zig +++ b/src/test-tokenizer.zig @@ -19,15 +19,15 @@ fn nextArg(args: [][]const u8, idx: *usize) ?[]const u8 { // Get all tokens from the input. -fn getAllTokens(allocator: *mem.Allocator, source: []const u8) !std.ArrayList(lexer.Token.Id) { +fn getAllTokens(allocator: *mem.Allocator, source: []const u8) !std.ArrayList(lexer.Token) { // Getting the tokenizer, initialized with the source code we want to check. var tokenizer = lexer.Tokenizer.init(source); - var list = std.ArrayList(lexer.Token.Id).init(allocator); + var list = std.ArrayList(lexer.Token).init(allocator); while(true) { const token = tokenizer.next(); - try list.append(token.id); + try list.append(token); if(token.id == .Eof) break; } @@ -76,8 +76,8 @@ pub fn lexer_analyze() !void { // Get the file size and allocate memory. const tokens = try getAllTokens(allocator, content); - for(tokens.items) |tokenid| { - print("token: {}\n", .{@tagName(tokenid)}); + for(tokens.items) |token| { + print("{s:20} => {}\n", .{@tagName(token.id), buffer[token.loc.start..token.loc.end]}); } }