2020-12-10 04:47:38 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
// Copyright (c) 2015-2020 Zig Contributors
|
|
|
|
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
|
|
|
|
// The MIT license requires this copyright notice to be included in all copies
|
|
|
|
// and substantial portions of the software.
|
|
|
|
const std = @import("std");
|
|
|
|
const assert = std.debug.assert;
|
|
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const ast = @import("ast.zig");
|
|
|
|
const Node = ast.Node;
|
|
|
|
const Tree = ast.Tree;
|
|
|
|
const AstError = ast.Error;
|
|
|
|
const TokenIndex = ast.TokenIndex;
|
|
|
|
const NodeIndex = ast.NodeIndex;
|
|
|
|
|
|
|
|
const lexer = @import("tokenizer.zig");
|
|
|
|
const Token = lexer.Token;
|
|
|
|
const Tokenizer = lexer.Tokenizer;
|
|
|
|
|
|
|
|
pub const Error = error{ParseError} || Allocator.Error;
|
|
|
|
|
|
|
|
/// Result should be freed with tree.deinit() when there are
|
|
|
|
/// no more references to any of the tokens or nodes.
|
|
|
|
pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!*Tree {
|
|
|
|
|
|
|
|
var token_ids = std.ArrayList(Token.Id).init(gpa);
|
|
|
|
defer token_ids.deinit();
|
|
|
|
var token_locs = std.ArrayList(Token.Loc).init(gpa);
|
|
|
|
defer token_locs.deinit();
|
|
|
|
|
|
|
|
// Before starting, just check that we have about enough memory.
|
|
|
|
const estimated_token_count = source.len / 8;
|
|
|
|
try token_ids.ensureCapacity(estimated_token_count);
|
|
|
|
try token_locs.ensureCapacity(estimated_token_count);
|
|
|
|
|
|
|
|
// Use the lexer to get all the tokens from the source code.
|
|
|
|
var tokenizer = Tokenizer.init(source);
|
|
|
|
while (true) {
|
|
|
|
const token = tokenizer.next();
|
|
|
|
try token_ids.append(token.id);
|
|
|
|
try token_locs.append(token.loc);
|
|
|
|
if (token.id == .Eof) break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a Parser structure.
|
|
|
|
var parser: Parser = .{
|
|
|
|
.source = source, // Source code.
|
|
|
|
.arena = std.heap.ArenaAllocator.init(gpa), // Arena allocator.
|
|
|
|
.gpa = gpa, // General Purpose Allocator.
|
|
|
|
.token_ids = token_ids.items, // IDs of the tokens.
|
|
|
|
.token_locs = token_locs.items, // Location of the tokens.
|
|
|
|
.errors = .{}, // List of errors in our parsing.
|
|
|
|
.tok_i = 0, // Index of current token being analyzed.
|
2020-12-17 03:00:56 +01:00
|
|
|
.indent = 0, // Indentation for debug.
|
2020-12-10 04:47:38 +01:00
|
|
|
};
|
|
|
|
defer parser.errors.deinit(gpa);
|
|
|
|
errdefer parser.arena.deinit();
|
|
|
|
|
|
|
|
// Ignore the first line comments from our code.
|
|
|
|
while (token_ids.items[parser.tok_i] == .LineComment) parser.tok_i += 1;
|
|
|
|
|
|
|
|
// Perform parsing, called once.
|
|
|
|
const root_node = try parser.parseRoot();
|
|
|
|
|
|
|
|
// Create a parsing Tree, with the nodes parsed early on.
|
|
|
|
// toOwnedSlice: free the memory and return the list. Arrays are empty,
|
|
|
|
// allocator can be free, arrays are owned by a different allocator.
|
|
|
|
const tree = try parser.arena.allocator.create(Tree);
|
|
|
|
tree.* = .{
|
|
|
|
.gpa = gpa,
|
|
|
|
.source = source,
|
|
|
|
.token_ids = token_ids.toOwnedSlice(),
|
|
|
|
.token_locs = token_locs.toOwnedSlice(),
|
|
|
|
.errors = parser.errors.toOwnedSlice(gpa),
|
|
|
|
.root_node = root_node,
|
|
|
|
.arena = parser.arena.state,
|
|
|
|
};
|
|
|
|
return tree;
|
|
|
|
}
|
|
|
|
|
2020-12-16 23:13:36 +01:00
|
|
|
const Assignment = struct {
|
|
|
|
id_attribute: TokenIndex,
|
|
|
|
id_value: TokenIndex,
|
|
|
|
};
|
|
|
|
|
2020-12-10 04:47:38 +01:00
|
|
|
/// Represents in-progress parsing, will be converted to an ast.Tree after completion.
|
|
|
|
const Parser = struct {
|
|
|
|
arena: std.heap.ArenaAllocator,
|
|
|
|
gpa: *Allocator,
|
|
|
|
source: []const u8,
|
|
|
|
token_ids: []const Token.Id,
|
|
|
|
token_locs: []const Token.Loc,
|
|
|
|
tok_i: TokenIndex,
|
|
|
|
errors: std.ArrayListUnmanaged(AstError),
|
2020-12-17 03:00:56 +01:00
|
|
|
indent: u16, // Indentation for debug.
|
2020-12-10 04:47:38 +01:00
|
|
|
|
|
|
|
/// Root <- skip ContainerMembers eof
|
|
|
|
fn parseRoot(p: *Parser) Allocator.Error!*Node.Root {
|
|
|
|
// Parse declarations.
|
|
|
|
const decls = try parseContainerMembers(p, true);
|
|
|
|
defer p.gpa.free(decls);
|
|
|
|
|
|
|
|
// parseContainerMembers will try to skip as much
|
|
|
|
// invalid tokens as it can so this can only be the EOF
|
|
|
|
// eatToken returns next token or null (if current token id isn't parameter).
|
|
|
|
// If current token is .Eof, next token is actually the first.
|
|
|
|
const eof_token = p.eatToken(.Eof).?;
|
|
|
|
|
|
|
|
// Nb of declarations becomes an ast.NodeIndex integer variable (usize).
|
|
|
|
const decls_len = @intCast(NodeIndex, decls.len);
|
|
|
|
const node = try Node.Root.create(&p.arena.allocator, decls_len, eof_token);
|
|
|
|
// std.mem.copy: T, dest, src
|
|
|
|
std.mem.copy(*Node, node.decls(), decls);
|
|
|
|
|
|
|
|
return node; // Root node.
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ContainerMembers
|
|
|
|
/// <- TestDecl ContainerMembers
|
|
|
|
/// / TopLevelComptime ContainerMembers
|
|
|
|
/// / KEYWORD_pub? TopLevelDecl ContainerMembers
|
|
|
|
/// / ContainerField COMMA ContainerMembers
|
|
|
|
/// / ContainerField
|
|
|
|
/// /
|
|
|
|
// parseContainerMembers: actual parsing code starts here.
|
|
|
|
fn parseContainerMembers(p: *Parser, top_level: bool) ![]*Node {
|
|
|
|
std.debug.print("parseContainerMembers: is top? {}\n", .{top_level});
|
|
|
|
// list: all nodes in the ast.
|
|
|
|
var list = std.ArrayList(*Node).init(p.gpa);
|
|
|
|
defer list.deinit();
|
|
|
|
|
|
|
|
// field_state: union of enum.
|
|
|
|
// Tagged union: eligible to use in switch expressions and coerce their value.
|
|
|
|
// Example: switch (some_tagged_union) { SomeType => |value| print("{}\n", value); }
|
|
|
|
// If a '*' is placed before the variable name, it's a pointer to the value inside
|
|
|
|
// the tagged union.
|
|
|
|
// Example: switch (some_tagged_union) { SomeType => |*value| value.* += 1; }
|
|
|
|
// @TagType can be used to get the right enum type.
|
|
|
|
var field_state: union(enum) {
|
|
|
|
/// no fields have been seen
|
|
|
|
none,
|
|
|
|
/// currently parsing fields
|
|
|
|
seen,
|
|
|
|
/// saw fields and then a declaration after them.
|
|
|
|
/// payload is first token of previous declaration.
|
|
|
|
end: TokenIndex, // TokenIndex is defined as usize in std.zig.ast.
|
|
|
|
/// there was a declaration between fields, don't report more errors
|
|
|
|
err,
|
|
|
|
} = .none;
|
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// Start => Requires Thing => End
|
|
|
|
// Requires => require StringLiteral Requires | nil
|
|
|
|
// Thing => Definition Thing | ClassHdr Thing | nil
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// Definition => define Identifier ClassHdrSimple
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// ClassHdr => ClassHdrFull LBrace ClassCon RBrace |
|
|
|
|
// ClassHdrSimple LBrace ClassCon RBrace
|
|
|
|
// ClassHdrSimple: a class without identifier,
|
|
|
|
// for a definition or when this won't be used later.
|
|
|
|
// ClassHdrSimple => Identifier
|
|
|
|
// ClassHdrFull: a class with an identifier.
|
|
|
|
// ClassHdrFull => Identifier LParen Identifier RParen
|
|
|
|
// ClassCon => ClassHdr | statement ClassCon | nil
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// Property keyword adds a new property to the definition,
|
|
|
|
// without it, default property values can be changed.
|
|
|
|
|
|
|
|
// statement => Keyword_property Identifier Identifier Colon value
|
2020-12-17 03:00:56 +01:00
|
|
|
// value => StringLiteral | Keyword_null | IntegerLiteral | FloatLiteral
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-10 04:47:38 +01:00
|
|
|
// True start of parsing.
|
|
|
|
while (true) {
|
|
|
|
const token = p.nextToken();
|
|
|
|
switch (p.token_ids[token]) {
|
2020-12-16 18:38:11 +01:00
|
|
|
.Keyword_require => {
|
|
|
|
p.putBackToken(token);
|
|
|
|
// TODO: read file required and parse its content.
|
|
|
|
p.parseRequire();
|
|
|
|
},
|
|
|
|
|
|
|
|
.Keyword_define => {
|
|
|
|
p.putBackToken(token);
|
|
|
|
// TODO: definitions.
|
|
|
|
p.parseDefine() catch |err| switch(err) {
|
|
|
|
// Propagate memory errors.
|
|
|
|
error.OutOfMemory => { return (error.OutOfMemory); },
|
|
|
|
error.ParseError => {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}; // |normal_value| { stuff; to; do; }
|
|
|
|
},
|
|
|
|
|
|
|
|
.Identifier => {
|
2020-12-17 03:00:56 +01:00
|
|
|
// Identifier => on top level this means a class.
|
2020-12-16 18:38:11 +01:00
|
|
|
p.putBackToken(token);
|
|
|
|
p.parseClass() catch |err| switch(err) {
|
|
|
|
// Propagate memory errors.
|
|
|
|
error.OutOfMemory => { return (error.OutOfMemory); },
|
|
|
|
error.ParseError => {
|
2020-12-17 03:00:56 +01:00
|
|
|
p.say("we catched a ParseError on token: {}\n"
|
|
|
|
, .{p.giveTokenContent(p.tok_i)});
|
2020-12-16 18:38:11 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}; // |normal_value| { stuff; to; do; }
|
|
|
|
},
|
|
|
|
|
2020-12-10 04:47:38 +01:00
|
|
|
.Eof => {
|
|
|
|
p.putBackToken(token);
|
|
|
|
break;
|
|
|
|
},
|
2020-12-16 18:38:11 +01:00
|
|
|
|
2020-12-10 04:47:38 +01:00
|
|
|
else => {
|
|
|
|
std.debug.print("token: {}\n", .{p.token_ids[token]});
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return list.toOwnedSlice();
|
|
|
|
}
|
|
|
|
|
2020-12-17 03:00:56 +01:00
|
|
|
fn say(p: *Parser, comptime fmt: []const u8, args: anytype) void {
|
|
|
|
var i = p.indent;
|
|
|
|
while (i > 0) {
|
|
|
|
std.debug.print("\t", .{});
|
|
|
|
i-=1;
|
|
|
|
}
|
|
|
|
std.debug.print(fmt, args);
|
|
|
|
}
|
|
|
|
|
2020-12-16 06:49:05 +01:00
|
|
|
// TODO: require "file"
|
|
|
|
// file should be read, parsed and a loop detection should take place.
|
2020-12-16 18:38:11 +01:00
|
|
|
fn parseRequire(p: *Parser) void {
|
2020-12-16 06:49:05 +01:00
|
|
|
const require_token = p.eatToken(.Keyword_require);
|
|
|
|
const file_to_read = p.eatToken(.StringLiteral);
|
|
|
|
std.debug.print("TODO: file required: {}\n", .{file_to_read});
|
2020-12-16 18:38:11 +01:00
|
|
|
}
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// TODO: class definition (inheritance).
|
|
|
|
// fn parseDefine(p: *Parser) !?Definition {
|
|
|
|
fn parseDefine(p: *Parser) !void {
|
|
|
|
const define_token = p.eatToken(.Keyword_define);
|
|
|
|
const new_class_name = p.eatToken(.Identifier);
|
|
|
|
const parent_class_name = p.eatToken(.Identifier);
|
|
|
|
std.debug.print("TODO: class inheritance: {} < {}\n",
|
|
|
|
.{new_class_name, parent_class_name});
|
|
|
|
try p.parseClass();
|
|
|
|
// TODO: get the old class definition,
|
|
|
|
// create a new definition,
|
|
|
|
// then add old and new properties and children to it.
|
|
|
|
}
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// TODO: class definition (inheritance).
|
|
|
|
fn parseClass(p: *Parser) !void {
|
2020-12-17 03:00:56 +01:00
|
|
|
p.indent += 1;
|
|
|
|
defer { p.indent -= 1; }
|
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
const class_name = p.eatToken(.Identifier);
|
|
|
|
if (class_name == null) {
|
|
|
|
return;
|
2020-12-16 06:49:05 +01:00
|
|
|
}
|
2020-12-16 18:38:11 +01:00
|
|
|
// Either simple or full header.
|
|
|
|
const identifier: ?[] const u8 = try p.parseFullClassHeader();
|
2020-12-16 23:13:36 +01:00
|
|
|
|
2020-12-17 03:00:56 +01:00
|
|
|
p.say("TODO: read class: {}", .{p.giveTokenContent(class_name.?)});
|
2020-12-16 23:13:36 +01:00
|
|
|
|
|
|
|
if (identifier) |id| {
|
|
|
|
std.debug.print(", id: {}\n", .{id});
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
std.debug.print("\n", .{});
|
|
|
|
}
|
|
|
|
|
2020-12-17 03:00:56 +01:00
|
|
|
// Starting the class.
|
|
|
|
// const lbrace = p.nextToken();
|
|
|
|
const ignored = try p.expectToken(.LBrace);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// TODO: parsing class content.
|
|
|
|
// TODO: loop over this.
|
|
|
|
const token = p.nextToken();
|
|
|
|
switch (p.token_ids[token]) {
|
|
|
|
|
|
|
|
.Identifier => {
|
|
|
|
const following = p.nextToken();
|
|
|
|
switch (p.token_ids[following]) {
|
|
|
|
.LBrace => {
|
|
|
|
p.putBackToken(following);
|
|
|
|
p.putBackToken(token);
|
|
|
|
p.say("reading a new class\n", .{});
|
|
|
|
// WARNING: RECURSION: this may cause errors.
|
|
|
|
const res = p.parseClass();
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
|
|
|
|
.Colon => {
|
|
|
|
p.putBackToken(following);
|
|
|
|
p.putBackToken(token);
|
|
|
|
const assignment = try p.parseAssignment();
|
|
|
|
p.say("redefining an attribute {} => {}\n"
|
|
|
|
, .{ p.giveTokenContent(assignment.id_attribute)
|
|
|
|
, p.giveTokenContent(assignment.id_value)});
|
|
|
|
},
|
|
|
|
|
|
|
|
else => {
|
|
|
|
// Wasn't expected.
|
|
|
|
// Couln't understand what was in this class.
|
|
|
|
p.putBackToken(following);
|
|
|
|
p.putBackToken(token);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
.Keyword_property => {
|
|
|
|
p.say("Reading a property\n", .{});
|
|
|
|
p.putBackToken(token);
|
|
|
|
try p.parseProperty();
|
|
|
|
},
|
|
|
|
|
|
|
|
.LBrace => {
|
|
|
|
p.say("Reading a LBrace\n", .{});
|
|
|
|
},
|
|
|
|
|
|
|
|
.RBrace => {
|
|
|
|
p.say("Reading a RBrace\n", .{});
|
|
|
|
// p.putBackToken(token);
|
|
|
|
break;
|
|
|
|
},
|
|
|
|
|
|
|
|
else => {
|
|
|
|
p.putBackToken(token);
|
|
|
|
p.say("reading {} in a class, backing up\n"
|
|
|
|
, .{p.giveTokenContent(token)});
|
|
|
|
break;
|
|
|
|
}
|
2020-12-16 23:13:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 03:00:56 +01:00
|
|
|
// Class definition or instance ends with a RBrace.
|
|
|
|
const end_of_class = try p.expectToken(.RBrace);
|
2020-12-16 23:13:36 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 03:00:56 +01:00
|
|
|
fn parseProperty(p: *Parser) !void {
|
|
|
|
const property = try p.expectToken(.Keyword_property);
|
|
|
|
const class_name = try p.expectToken(.Identifier);
|
|
|
|
const attribute_name = try p.expectToken(.Identifier);
|
|
|
|
const colon = try p.expectToken(.Colon);
|
|
|
|
const id_value = p.nextToken();
|
|
|
|
switch (p.token_ids[id_value]) {
|
|
|
|
.Keyword_null,
|
|
|
|
.StringLiteral,
|
|
|
|
.IntegerLiteral,
|
|
|
|
.FloatLiteral => {
|
|
|
|
p.say("property: {} {} = {}\n"
|
|
|
|
, .{p.giveTokenContent(class_name), p.giveTokenContent(attribute_name), p.giveTokenContent(id_value)});
|
|
|
|
return ;
|
|
|
|
// return Assignment{.id_attribute = ia, .id_value = id_value};
|
|
|
|
},
|
|
|
|
else => {
|
|
|
|
return error.ParseError;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// statement => Keyword_property Identifier Identifier Colon value
|
|
|
|
// value => StringLiteral | Keyword_null | IntegerLiteral | FloatLiteral
|
2020-12-16 23:13:36 +01:00
|
|
|
fn parseAssignment(p: *Parser) !Assignment {
|
2020-12-17 03:00:56 +01:00
|
|
|
const id_attribute = p.eatToken(.Identifier);
|
|
|
|
const ignored = try p.expectToken(.Colon);
|
|
|
|
const id_value = p.nextToken();
|
|
|
|
switch (p.token_ids[id_value]) {
|
|
|
|
.Keyword_null,
|
|
|
|
.StringLiteral,
|
|
|
|
.IntegerLiteral,
|
|
|
|
.FloatLiteral => {
|
|
|
|
if (id_attribute) |ia| {
|
|
|
|
return Assignment{.id_attribute = ia, .id_value = id_value};
|
|
|
|
}
|
|
|
|
},
|
|
|
|
else => {
|
|
|
|
return error.ParseError;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return error.ParseError;
|
2020-12-16 18:38:11 +01:00
|
|
|
}
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
fn parseFullClassHeader(p: *Parser) !?[]const u8 {
|
|
|
|
if (p.eatToken(.LParen) == null)
|
|
|
|
return null;
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// Once we know this is a full header, an identifier then a right
|
|
|
|
// parenthesis are expected, and should trigger a parsing error if not there.
|
|
|
|
const identifier = try p.expectToken(.Identifier);
|
|
|
|
const blah = try p.expectToken(.RParen);
|
2020-12-16 06:49:05 +01:00
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
return p.giveTokenContent(identifier);
|
2020-12-16 06:49:05 +01:00
|
|
|
}
|
|
|
|
|
2020-12-16 18:38:11 +01:00
|
|
|
// fn parseStatement(p: *Parser) Error!?*Node {
|
|
|
|
// }
|
2020-12-16 06:49:05 +01:00
|
|
|
|
|
|
|
|
2020-12-10 04:47:38 +01:00
|
|
|
fn eatToken(p: *Parser, id: Token.Id) ?TokenIndex {
|
|
|
|
return if (p.token_ids[p.tok_i] == id) p.nextToken() else null;
|
|
|
|
}
|
|
|
|
|
|
|
|
// expectToken: either returns the token or an error.
|
|
|
|
fn expectToken(p: *Parser, id: Token.Id) Error!TokenIndex {
|
|
|
|
return (try p.expectTokenRecoverable(id)) orelse error.ParseError;
|
|
|
|
}
|
|
|
|
|
|
|
|
// expectTokenRecoverable: either returns the token or null if not the one expected.
|
|
|
|
// Also, appends the error inside p.errors.
|
|
|
|
fn expectTokenRecoverable(p: *Parser, id: Token.Id) !?TokenIndex {
|
|
|
|
const token = p.nextToken();
|
|
|
|
if (p.token_ids[token] != id) {
|
|
|
|
try p.errors.append(p.gpa, .{
|
|
|
|
.ExpectedToken = .{ .token = token, .expected_id = id },
|
|
|
|
});
|
|
|
|
// go back so that we can recover properly
|
|
|
|
p.putBackToken(token);
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
return token;
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextToken: provide the TokenIndex of the current token, but increases the tok_i
|
|
|
|
// inside the Parser structure.
|
|
|
|
fn nextToken(p: *Parser) TokenIndex {
|
|
|
|
const result = p.tok_i;
|
|
|
|
p.tok_i += 1;
|
|
|
|
assert(p.token_ids[result] != .LineComment);
|
|
|
|
if (p.tok_i >= p.token_ids.len) return result;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
if (p.token_ids[p.tok_i] != .LineComment) return result;
|
|
|
|
p.tok_i += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// putBackToken: come back one token (except for comment lines which are ignored).
|
|
|
|
// Example: we have a function searching for a declaration,
|
|
|
|
// the function read a token "my-variable" so it returns after putting back the token.
|
|
|
|
// Caller now have the start of a declaration in its parsing structure.
|
|
|
|
fn putBackToken(p: *Parser, putting_back: TokenIndex) void {
|
|
|
|
while (p.tok_i > 0) {
|
|
|
|
p.tok_i -= 1;
|
|
|
|
if (p.token_ids[p.tok_i] == .LineComment) continue;
|
|
|
|
assert(putting_back == p.tok_i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// TODO Delete this function. I don't like the inversion of control.
|
|
|
|
fn expectNode(
|
|
|
|
p: *Parser,
|
|
|
|
parseFn: NodeParseFn,
|
|
|
|
/// if parsing fails
|
|
|
|
err: AstError,
|
|
|
|
) Error!*Node {
|
|
|
|
return (try p.expectNodeRecoverable(parseFn, err)) orelse return error.ParseError;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// TODO Delete this function. I don't like the inversion of control.
|
|
|
|
fn expectNodeRecoverable(
|
|
|
|
p: *Parser,
|
|
|
|
parseFn: NodeParseFn,
|
|
|
|
/// if parsing fails
|
|
|
|
err: AstError,
|
|
|
|
) !?*Node {
|
|
|
|
return (try parseFn(p)) orelse {
|
|
|
|
try p.errors.append(p.gpa, err);
|
|
|
|
return null;
|
|
|
|
};
|
|
|
|
}
|
2020-12-16 18:38:11 +01:00
|
|
|
|
|
|
|
// WARNING: VALID token identifier expected.
|
|
|
|
fn giveTokenContent(p: *Parser, id: TokenIndex) []const u8 {
|
|
|
|
const loc = p.token_locs[id];
|
|
|
|
return p.source[loc.start..loc.end];
|
|
|
|
}
|
2020-12-10 04:47:38 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
fn ParseFn(comptime T: type) type {
|
|
|
|
return fn (p: *Parser) Error!T;
|
|
|
|
}
|
|
|
|
|
|
|
|
test "std.zig.parser" {
|
|
|
|
_ = @import("parser_test.zig");
|
|
|
|
}
|