Use 2-space continuation indent consistently in specs

This commit is contained in:
Max Brunsfeld 2015-07-27 18:18:58 -07:00
parent 31b2db12d2
commit 766e3bab2c
22 changed files with 862 additions and 759 deletions

View file

@ -42,6 +42,5 @@ SpaceInEmptyParentheses: false
SpacesInCStyleCastParentheses: false
SpaceAfterControlStatementKeyword: true
SpaceBeforeAssignmentOperators: true
ContinuationIndentWidth: 4
ContinuationIndentWidth: 2
...

View file

@ -12,31 +12,31 @@ START_TEST
describe("build_parse_table", []() {
SyntaxGrammar parse_grammar{{
{ "rule0", choice({ i_sym(1), i_sym(2) }) },
{ "rule1", i_token(0) },
{ "rule2", i_token(1) },
{ "rule0", choice({ i_sym(1), i_sym(2) }) },
{ "rule1", i_token(0) },
{ "rule2", i_token(1) },
}, {}, { Symbol(2, SymbolOptionToken) }, {}};
LexicalGrammar lex_grammar{{
{ "token0", pattern("[a-c]") },
{ "token1", pattern("[b-d]") },
{ "token0", pattern("[a-c]") },
{ "token1", pattern("[b-d]") },
}, {}, {}};
it("first looks for the start rule and its item set closure", [&]() {
auto result = build_parse_table(parse_grammar, lex_grammar);
AssertThat(result.first.states[0].actions, Equals(map<Symbol, vector<ParseAction>>({
// start item
{ Symbol(0), {ParseAction::Shift(1, { 0 })} },
// start item
{ Symbol(0), {ParseAction::Shift(1, { 0 })} },
// expanded from the item set closure of the start item
{ Symbol(1), {ParseAction::Shift(2, { 0 })} },
{ Symbol(2), {ParseAction::Shift(2, { 0 })} },
{ Symbol(0, SymbolOptionToken), {ParseAction::Shift(3, { 0 })} },
{ Symbol(1, SymbolOptionToken), {ParseAction::Shift(4, { 0 })} },
// expanded from the item set closure of the start item
{ Symbol(1), {ParseAction::Shift(2, { 0 })} },
{ Symbol(2), {ParseAction::Shift(2, { 0 })} },
{ Symbol(0, SymbolOptionToken), {ParseAction::Shift(3, { 0 })} },
{ Symbol(1, SymbolOptionToken), {ParseAction::Shift(4, { 0 })} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
})));
});
@ -49,10 +49,10 @@ describe("build_parse_table", []() {
auto result = build_parse_table(parse_grammar, lex_grammar);
AssertThat(result.first.states[1].actions, Equals(map<Symbol, vector<ParseAction>>({
{ END_OF_INPUT(), {ParseAction::Accept()} },
{ END_OF_INPUT(), {ParseAction::Accept()} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
})));
});
@ -60,10 +60,10 @@ describe("build_parse_table", []() {
auto result = build_parse_table(parse_grammar, lex_grammar);
AssertThat(result.first.states[2].actions, Equals(map<Symbol, vector<ParseAction>>({
{ END_OF_INPUT(), {ParseAction::Reduce(Symbol(0), 1, 0, AssociativityLeft, 0)} },
{ END_OF_INPUT(), {ParseAction::Reduce(Symbol(0), 1, 0, AssociativityLeft, 0)} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
// for the ubiquitous_token 'token2'
{ Symbol(2, SymbolOptionToken), {ParseAction::ShiftExtra()} },
})));
});
});

View file

@ -16,59 +16,59 @@ describe("first_symbols", []() {
auto rule = seq({ i_token(0), i_token(1) });
AssertThat(first_symbols(rule, null_grammar), Equals(set<Symbol>({
Symbol(0, SymbolOptionToken),
Symbol(0, SymbolOptionToken),
})));
});
it("includes first_symbols(B) when A can be blank", [&]() {
auto rule = seq({
choice({
i_token(0),
blank() }),
i_token(1) });
choice({
i_token(0),
blank() }),
i_token(1) });
AssertThat(first_symbols(rule, null_grammar), Equals(set<Symbol>({
Symbol(0, SymbolOptionToken),
Symbol(1, SymbolOptionToken)
Symbol(0, SymbolOptionToken),
Symbol(1, SymbolOptionToken)
})));
});
it("includes first_symbols(A's right hand side) when A is a non-terminal", [&]() {
auto rule = choice({
seq({
i_token(0),
i_token(1) }),
i_sym(0) });
seq({
i_token(0),
i_token(1) }),
i_sym(0) });
SyntaxGrammar grammar{{
{ "rule0", seq({
i_token(2),
i_token(3),
i_token(4) }) }
{ "rule0", seq({
i_token(2),
i_token(3),
i_token(4) }) }
}, {}, {}, {}};
AssertThat(first_symbols(rule, grammar), Equals(set<Symbol>({
Symbol(0),
Symbol(0, SymbolOptionToken),
Symbol(2, SymbolOptionToken),
Symbol(0),
Symbol(0, SymbolOptionToken),
Symbol(2, SymbolOptionToken),
})));
});
it("includes first_symbols(B) when A is a non-terminal and its expansion can be blank", [&]() {
auto rule = seq({
i_sym(0),
i_token(1) });
i_sym(0),
i_token(1) });
SyntaxGrammar grammar{{
{ "rule0", choice({
i_token(0),
blank() }) }
{ "rule0", choice({
i_token(0),
blank() }) }
}, {}, {}, {}};
AssertThat(first_symbols(rule, grammar), Equals(set<Symbol>({
Symbol(0),
Symbol(0, SymbolOptionToken),
Symbol(1, SymbolOptionToken),
Symbol(0),
Symbol(0, SymbolOptionToken),
Symbol(1, SymbolOptionToken),
})));
});
});
@ -76,17 +76,17 @@ describe("first_symbols", []() {
describe("when there are left-recursive rules", [&]() {
it("terminates", [&]() {
SyntaxGrammar grammar{{
{ "rule0", choice({
seq({ i_sym(0), i_token(10) }),
i_token(11),
}) },
{ "rule0", choice({
seq({ i_sym(0), i_token(10) }),
i_token(11),
}) },
}, {}, {}, {}};
auto rule = i_sym(0);
AssertThat(first_symbols(rule, grammar), Equals(set<Symbol>({
Symbol(0),
Symbol(11, SymbolOptionToken)
Symbol(0),
Symbol(11, SymbolOptionToken)
})));
});
});
@ -95,7 +95,7 @@ describe("first_symbols", []() {
auto rule = make_shared<Metadata>(i_token(3), map<rules::MetadataKey, int>());
AssertThat(first_symbols(rule, null_grammar), Equals(set<Symbol>({
Symbol(3, SymbolOptionToken),
Symbol(3, SymbolOptionToken),
})));
});
});

View file

@ -15,8 +15,8 @@ describe("get_metadata", []() {
describe("when given a metadata rule", [&]() {
before_each([&]() {
rule = make_shared<Metadata>(sym("x"), map<MetadataKey, int>({
{ key1, 1 },
{ key2, 2 },
{ key1, 1 },
{ key2, 2 },
}));
});
@ -32,7 +32,7 @@ describe("get_metadata", []() {
describe("when the rule contains another metadata rule", [&]() {
it("also gets metadata from the inner metadata rule", [&]() {
rule = make_shared<Metadata>(make_shared<Metadata>(sym("x"), map<MetadataKey, int>({
{ key1, 1 }
{ key1, 1 }
})), map<MetadataKey, int>());
AssertThat(get_metadata(rule, key1), Equals(1));

View file

@ -10,30 +10,30 @@ START_TEST
describe("item_set_closure", []() {
SyntaxGrammar grammar{{
{ "E", seq({
i_sym(1),
i_token(11) }) },
{ "T", seq({
i_token(12),
i_token(13) }) },
{ "E", seq({
i_sym(1),
i_token(11) }) },
{ "T", seq({
i_token(12),
i_token(13) }) },
}, {}, {}, {}};
it("adds items at the beginnings of referenced rules", [&]() {
ParseItemSet item_set = item_set_closure(
ParseItem(Symbol(0), grammar.rule(Symbol(0)), {}),
set<Symbol>({ Symbol(10, SymbolOptionToken) }),
grammar
ParseItem(Symbol(0), grammar.rule(Symbol(0)), {}),
set<Symbol>({ Symbol(10, SymbolOptionToken) }),
grammar
);
AssertThat(item_set, Equals(ParseItemSet({
{
ParseItem(Symbol(1), grammar.rule(Symbol(1)), {}),
set<Symbol>({ Symbol(11, SymbolOptionToken) }),
},
{
ParseItem(Symbol(0), grammar.rule(Symbol(0)), {}),
set<Symbol>({ Symbol(10, SymbolOptionToken) }),
},
{
ParseItem(Symbol(1), grammar.rule(Symbol(1)), {}),
set<Symbol>({ Symbol(11, SymbolOptionToken) }),
},
{
ParseItem(Symbol(0), grammar.rule(Symbol(0)), {}),
set<Symbol>({ Symbol(10, SymbolOptionToken) }),
},
})));
});
});

View file

@ -12,29 +12,29 @@ describe("char_transitions(LexItemSet)", []() {
describe("when two items in the set have transitions on the same character", [&]() {
it("merges the transitions by computing the union of the two item sets", [&]() {
LexItemSet set1({
LexItem(Symbol(1), CharacterSet().include('a', 'f').copy()),
LexItem(Symbol(2), CharacterSet().include('e', 'x').copy())
LexItem(Symbol(1), CharacterSet().include('a', 'f').copy()),
LexItem(Symbol(2), CharacterSet().include('e', 'x').copy())
});
AssertThat(char_transitions(set1), Equals(map<CharacterSet, LexItemSet>({
{
CharacterSet().include('a', 'd'),
LexItemSet({
LexItem(Symbol(1), blank()),
})
CharacterSet().include('a', 'd'),
LexItemSet({
LexItem(Symbol(1), blank()),
})
},
{
CharacterSet().include('e', 'f'),
LexItemSet({
LexItem(Symbol(1), blank()),
LexItem(Symbol(2), blank()),
})
CharacterSet().include('e', 'f'),
LexItemSet({
LexItem(Symbol(1), blank()),
LexItem(Symbol(2), blank()),
})
},
{
CharacterSet().include('g', 'x'),
LexItemSet({
LexItem(Symbol(2), blank()),
})
CharacterSet().include('g', 'x'),
LexItemSet({
LexItem(Symbol(2), blank()),
})
},
})));
});
@ -49,26 +49,26 @@ describe("sym_transitions(ParseItemSet, SyntaxGrammar)", [&]() {
it("computes the closure of the new item sets", [&]() {
ParseItemSet set1({
{
ParseItem(Symbol(0), seq({ i_token(22), i_sym(1) }), { Symbol(101) }),
set<Symbol>({ Symbol(23, SymbolOptionToken) })
},
{
ParseItem(Symbol(0), seq({ i_token(22), i_sym(1) }), { Symbol(101) }),
set<Symbol>({ Symbol(23, SymbolOptionToken) })
},
});
AssertThat(sym_transitions(set1, grammar), Equals(map<Symbol, ParseItemSet>({
{
Symbol(22, SymbolOptionToken),
ParseItemSet({
{
ParseItem(Symbol(0), i_sym(1), { Symbol(101), Symbol(22) }),
set<Symbol>({ Symbol(23, SymbolOptionToken) }),
},
{
ParseItem(Symbol(1), i_token(21), {}),
set<Symbol>({ Symbol(23, SymbolOptionToken) })
},
})
},
{
Symbol(22, SymbolOptionToken),
ParseItemSet({
{
ParseItem(Symbol(0), i_sym(1), { Symbol(101), Symbol(22) }),
set<Symbol>({ Symbol(23, SymbolOptionToken) }),
},
{
ParseItem(Symbol(1), i_token(21), {}),
set<Symbol>({ Symbol(23, SymbolOptionToken) })
},
})
},
})));
});
});

View file

@ -11,8 +11,8 @@ START_TEST
describe("LexConflictManager", []() {
LexicalGrammar lexical_grammar{{
{ "other_token", pattern("[a-b]") },
{ "lookahead_token", pattern("[c-d]") },
{ "other_token", pattern("[a-b]") },
{ "lookahead_token", pattern("[c-d]") },
}, {}, {}};
LexConflictManager conflict_manager(lexical_grammar);

View file

@ -11,7 +11,7 @@ describe("LexItem", []() {
describe("determining if an item is the start of a token", [&]() {
Symbol sym(1);
rule_ptr token_start = make_shared<Metadata>(str("a"), map<MetadataKey, int>({
{ START_TOKEN, 1 }
{ START_TOKEN, 1 }
}));
it("returns true for rules designated as token starts", [&]() {
@ -21,7 +21,7 @@ describe("LexItem", []() {
it("returns false for rules not designated as token starts", [&]() {
AssertThat(LexItem(sym, make_shared<Metadata>(str("a"), map<MetadataKey, int>({
{ START_TOKEN, 0 }
{ START_TOKEN, 0 }
}))).is_token_start(), IsFalse());
AssertThat(LexItem(sym, str("a")).is_token_start(), IsFalse());
});

View file

@ -18,20 +18,20 @@ describe("merge_char_transitions", []() {
describe("when none of the transitions intersect", [&]() {
it("returns the union of the two sets of transitions", [&]() {
int_map map({
{ CharacterSet().include('a').include('c'), 1 },
{ CharacterSet().include('x').include('y'), 2 },
{ CharacterSet().include('1').include('9'), 4 },
{ CharacterSet().include('a').include('c'), 1 },
{ CharacterSet().include('x').include('y'), 2 },
{ CharacterSet().include('1').include('9'), 4 },
});
do_merge(&map, { CharacterSet().include(' '), 8 });
do_merge(&map, { CharacterSet().include('\t'), 16 });
AssertThat(map, Equals(int_map({
{ CharacterSet().include('a').include('c'), 1 },
{ CharacterSet().include('x').include('y'), 2 },
{ CharacterSet().include('1').include('9'), 4 },
{ CharacterSet().include(' '), 8 },
{ CharacterSet().include('\t'), 16 },
{ CharacterSet().include('a').include('c'), 1 },
{ CharacterSet().include('x').include('y'), 2 },
{ CharacterSet().include('1').include('9'), 4 },
{ CharacterSet().include(' '), 8 },
{ CharacterSet().include('\t'), 16 },
})));
});
});
@ -39,33 +39,33 @@ describe("merge_char_transitions", []() {
describe("when transitions intersect", [&]() {
it("merges the intersecting transitions using the provided function", [&]() {
int_map map({
{ CharacterSet().include('a', 'f').include('A', 'F'), 1 },
{ CharacterSet().include('0', '9'), 2 },
{ CharacterSet().include('a', 'f').include('A', 'F'), 1 },
{ CharacterSet().include('0', '9'), 2 },
});
do_merge(&map, { CharacterSet().include('c'), 4 });
do_merge(&map, { CharacterSet().include('3'), 8 });
AssertThat(map, Equals(int_map({
{
CharacterSet()
.include('a', 'b')
.include('d', 'f')
.include('A', 'F'),
1
},
{
CharacterSet().include('c'),
5
},
{
CharacterSet().include('0', '2').include('4', '9'),
2
},
{
CharacterSet().include('3'),
10
},
{
CharacterSet()
.include('a', 'b')
.include('d', 'f')
.include('A', 'F'),
1
},
{
CharacterSet().include('c'),
5
},
{
CharacterSet().include('0', '2').include('4', '9'),
2
},
{
CharacterSet().include('3'),
10
},
})));
});
});
@ -73,15 +73,15 @@ describe("merge_char_transitions", []() {
describe("when two of the right transitions intersect the same left transition", [&]() {
it("splits the left-hand transition correctly", [&]() {
int_map map1({
{ CharacterSet().include('a').include('c'), 1 },
{ CharacterSet().include('a').include('c'), 1 },
});
do_merge(&map1, { CharacterSet().include('a'), 2 });
do_merge(&map1, { CharacterSet().include('c'), 4 });
AssertThat(map1, Equals(int_map({
{ CharacterSet().include('a'), 3 },
{ CharacterSet().include('c'), 5 },
{ CharacterSet().include('a'), 3 },
{ CharacterSet().include('c'), 5 },
})));
});
});

View file

@ -58,11 +58,11 @@ describe("rule_can_be_blank", [&]() {
describe("checking recursively (by expanding non-terminals)", [&]() {
SyntaxGrammar grammar{{
{ "A", choice({
seq({ i_sym(0), i_token(11) }),
blank() }) },
seq({ i_sym(0), i_token(11) }),
blank() }) },
{ "B", choice({
seq({ i_sym(1), i_token(12) }),
i_token(13) }) },
seq({ i_sym(1), i_token(12) }),
i_token(13) }) },
}, {}, {}, {}};
it("terminates for left-recursive rules that can be blank", [&]() {

View file

@ -11,160 +11,158 @@ START_TEST
describe("sym_transitions", []() {
it("handles symbols", [&]() {
AssertThat(
sym_transitions(i_sym(1)),
Equals(rule_map<Symbol>({
{ Symbol(1), blank() }
})));
sym_transitions(i_sym(1)),
Equals(rule_map<Symbol>({
{ Symbol(1), blank() }
})));
});
it("handles choices", [&]() {
AssertThat(
sym_transitions(choice({ i_sym(1), i_sym(2) })),
Equals(rule_map<Symbol>({
{ Symbol(1), blank() },
{ Symbol(2), blank() }
})));
sym_transitions(choice({ i_sym(1), i_sym(2) })),
Equals(rule_map<Symbol>({
{ Symbol(1), blank() },
{ Symbol(2), blank() }
})));
});
it("handles sequences", [&]() {
AssertThat(
sym_transitions(seq({ i_sym(1), i_sym(2) })),
Equals(rule_map<Symbol>({
{ Symbol(1), i_sym(2) }
})));
sym_transitions(seq({ i_sym(1), i_sym(2) })),
Equals(rule_map<Symbol>({
{ Symbol(1), i_sym(2) }
})));
});
it("handles long sequences", [&]() {
AssertThat(
sym_transitions(seq({
i_sym(1),
i_sym(2),
i_sym(3),
i_sym(4)
})),
Equals(rule_map<Symbol>({
{ Symbol(1), seq({ i_sym(2), i_sym(3), i_sym(4) }) }
})));
sym_transitions(seq({
i_sym(1),
i_sym(2),
i_sym(3),
i_sym(4)
})),
Equals(rule_map<Symbol>({
{ Symbol(1), seq({ i_sym(2), i_sym(3), i_sym(4) }) }
})));
});
it("handles sequences whose left sides can be blank", [&]() {
AssertThat(
sym_transitions(seq({
choice({
i_sym(1),
blank(),
}),
seq({
i_sym(1),
i_sym(2)
})
})), Equals(rule_map<Symbol>({
{ Symbol(1), choice({ seq({ i_sym(1), i_sym(2) }), i_sym(2), }) }
})));
sym_transitions(seq({
choice({
i_sym(1),
blank() }),
seq({
i_sym(1),
i_sym(2) })
})), Equals(rule_map<Symbol>({
{ Symbol(1), choice({ seq({ i_sym(1), i_sym(2) }), i_sym(2), }) }
})));
});
it("handles choices with common starting symbols", [&]() {
AssertThat(
sym_transitions(
choice({
seq({ i_sym(1), i_sym(2) }),
seq({ i_sym(1), i_sym(3) }) })),
Equals(rule_map<Symbol>({
{ Symbol(1), choice({ i_sym(2), i_sym(3) }) }
})));
sym_transitions(
choice({
seq({ i_sym(1), i_sym(2) }),
seq({ i_sym(1), i_sym(3) }) })),
Equals(rule_map<Symbol>({
{ Symbol(1), choice({ i_sym(2), i_sym(3) }) }
})));
});
it("preserves metadata", [&]() {
map<MetadataKey, int> metadata_value({
{ PRECEDENCE, 5 }
{ PRECEDENCE, 5 }
});
rule_ptr rule = make_shared<Metadata>(seq({ i_sym(1), i_sym(2) }), metadata_value);
AssertThat(
sym_transitions(rule),
Equals(rule_map<Symbol>({
{ Symbol(1), make_shared<Metadata>(i_sym(2), metadata_value)},
})));
sym_transitions(rule),
Equals(rule_map<Symbol>({
{ Symbol(1), make_shared<Metadata>(i_sym(2), metadata_value)},
})));
});
});
describe("char_transitions", []() {
it("handles characters", [&]() {
AssertThat(
char_transitions(character({ '1' })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('1'), blank() }
})));
char_transitions(character({ '1' })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('1'), blank() }
})));
});
it("handles choices between overlapping character sets", [&]() {
AssertThat(
char_transitions(choice({
seq({
character({ 'a', 'b', 'c', 'd' }),
sym("x") }),
seq({
character({ 'c', 'd', 'e', 'f' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('a', 'b'), sym("x") },
{ CharacterSet().include('c', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('e', 'f'), sym("y") },
})));
char_transitions(choice({
seq({
character({ 'a', 'b', 'c', 'd' }),
sym("x") }),
seq({
character({ 'c', 'd', 'e', 'f' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('a', 'b'), sym("x") },
{ CharacterSet().include('c', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('e', 'f'), sym("y") },
})));
});
it("handles choices between whitelisted and blacklisted character sets", [&]() {
AssertThat(
char_transitions(seq({
choice({
character({ '/' }, false),
seq({
character({ '\\' }),
character({ '/' }) }) }),
character({ '/' }) })),
char_transitions(seq({
choice({
character({ '/' }, false),
seq({
character({ '\\' }),
character({ '/' }) }) }),
character({ '/' }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet()
.include_all()
.exclude('/')
.exclude('\\'),
character({ '/' }) },
{ CharacterSet()
.include('\\'),
seq({
choice({
blank(),
character({ '/' }) }),
character({ '/' }) }) },
})));
Equals(rule_map<CharacterSet>({
{ CharacterSet()
.include_all()
.exclude('/')
.exclude('\\'),
character({ '/' }) },
{ CharacterSet()
.include('\\'),
seq({
choice({
blank(),
character({ '/' }) }),
character({ '/' }) }) },
})));
});
it("handles choices between a subset and a superset of characters", [&]() {
AssertThat(
char_transitions(choice({
seq({
character({ 'b', 'c', 'd' }),
sym("x") }),
seq({
character({ 'a', 'b', 'c', 'd', 'e', 'f' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('b', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('a').include('e', 'f'), sym("y") },
})));
char_transitions(choice({
seq({
character({ 'b', 'c', 'd' }),
sym("x") }),
seq({
character({ 'a', 'b', 'c', 'd', 'e', 'f' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('b', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('a').include('e', 'f'), sym("y") },
})));
AssertThat(
char_transitions(choice({
seq({
character({ 'a', 'b', 'c', 'd', 'e', 'f' }),
sym("x") }),
seq({
character({ 'b', 'c', 'd' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('b', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('a').include('e', 'f'), sym("x") },
})));
char_transitions(choice({
seq({
character({ 'a', 'b', 'c', 'd', 'e', 'f' }),
sym("x") }),
seq({
character({ 'b', 'c', 'd' }),
sym("y") }) })),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('b', 'd'), choice({ sym("x"), sym("y") }) },
{ CharacterSet().include('a').include('e', 'f'), sym("x") },
})));
});
it("handles blanks", [&]() {
@ -173,23 +171,24 @@ describe("char_transitions", []() {
it("handles repeats", [&]() {
rule_ptr rule = repeat(seq({ character({ 'a' }), character({ 'b' }) }));
AssertThat(
char_transitions(rule),
Equals(rule_map<CharacterSet>({
{
CharacterSet().include('a'),
seq({
character({ 'b' }),
rule,
})
}})));
char_transitions(rule),
Equals(rule_map<CharacterSet>({
{
CharacterSet().include('a'),
seq({
character({ 'b' }),
rule })
}})));
rule = repeat(character({ 'a' }));
AssertThat(
char_transitions(rule),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('a'), rule }
})));
char_transitions(rule),
Equals(rule_map<CharacterSet>({
{ CharacterSet().include('a'), rule }
})));
});
});

View file

@ -13,7 +13,7 @@ namespace snowhouse {
template<typename ActualType>
bool operator()(const ActualType& actual) const {
return *expected == *actual;
return *expected == *actual;
}
ExpectedType expected;
@ -22,9 +22,9 @@ namespace snowhouse {
template<typename ExpectedType>
struct Stringizer<EqualsPointerConstraint<ExpectedType>> {
static string ToString(const EqualsPointerConstraint<ExpectedType>& constraint) {
ostringstream builder;
builder << "pointer to " << snowhouse::Stringize(constraint.expected);
return builder.str();
ostringstream builder;
builder << "pointer to " << snowhouse::Stringize(constraint.expected);
return builder.str();
}
};

View file

@ -17,35 +17,35 @@ describe("expand_repeats", []() {
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", choice({ i_aux_sym(0), blank() }) },
{ "rule0", choice({ i_aux_sym(0), blank() }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(0),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(0),
choice({ i_aux_sym(0), blank() }) }) },
})));
});
it("replaces repeats inside of sequences", [&]() {
SyntaxGrammar grammar{{
{ "rule0", seq({
i_token(10),
repeat(i_token(11)) }) },
i_token(10),
repeat(i_token(11)) }) },
}, {}, {}, {}};
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(11),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(11),
choice({ i_aux_sym(0), blank() }) }) },
})));
});
@ -57,62 +57,62 @@ describe("expand_repeats", []() {
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", choice({ i_token(10), i_aux_sym(0), blank() }) },
{ "rule0", choice({ i_token(10), i_aux_sym(0), blank() }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(11),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(11),
choice({ i_aux_sym(0), blank() }) }) },
})));
});
it("does not create redundant auxiliary rules", [&]() {
SyntaxGrammar grammar{{
{ "rule0", choice({
seq({ i_token(1), repeat(i_token(4)) }),
seq({ i_token(2), repeat(i_token(4)) }) }) },
seq({ i_token(1), repeat(i_token(4)) }),
seq({ i_token(2), repeat(i_token(4)) }) }) },
{ "rule1", seq({ i_token(3), repeat(i_token(4)) }) },
}, {}, {}, {}};
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", choice({
seq({ i_token(1), choice({ i_aux_sym(0), blank() }) }),
seq({ i_token(2), choice({ i_aux_sym(0), blank() }) }) }) },
{ "rule1", seq({ i_token(3), choice({ i_aux_sym(0), blank() }) }) },
{ "rule0", choice({
seq({ i_token(1), choice({ i_aux_sym(0), blank() }) }),
seq({ i_token(2), choice({ i_aux_sym(0), blank() }) }) }) },
{ "rule1", seq({ i_token(3), choice({ i_aux_sym(0), blank() }) }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(4),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(4),
choice({ i_aux_sym(0), blank() }) }) },
})));
});
it("can replace multiple repeats in the same rule", [&]() {
SyntaxGrammar grammar{{
{ "rule0", seq({
repeat(i_token(10)),
repeat(i_token(11)) }) },
repeat(i_token(10)),
repeat(i_token(11)) }) },
}, {}, {}, {}};
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", seq({
choice({ i_aux_sym(0), blank() }),
choice({ i_aux_sym(1), blank() }) }) },
{ "rule0", seq({
choice({ i_aux_sym(0), blank() }),
choice({ i_aux_sym(1), blank() }) }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat1", seq({
i_token(11),
choice({ i_aux_sym(1), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule0_repeat1", seq({
i_token(11),
choice({ i_aux_sym(1), blank() }) }) },
})));
});
@ -125,17 +125,17 @@ describe("expand_repeats", []() {
auto match = expand_repeats(grammar);
AssertThat(match.rules, Equals(rule_list({
{ "rule0", choice({ i_aux_sym(0), blank() }) },
{ "rule1", choice({ i_aux_sym(1), blank() }) },
{ "rule0", choice({ i_aux_sym(0), blank() }) },
{ "rule1", choice({ i_aux_sym(1), blank() }) },
})));
AssertThat(match.aux_rules, Equals(rule_list({
{ "rule0_repeat0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule1_repeat0", seq({
i_token(11),
choice({ i_aux_sym(1), blank() }) }) },
{ "rule0_repeat0", seq({
i_token(10),
choice({ i_aux_sym(0), blank() }) }) },
{ "rule1_repeat0", seq({
i_token(11),
choice({ i_aux_sym(1), blank() }) }) },
})));
});
});

View file

@ -12,20 +12,20 @@ describe("expand_tokens", []() {
describe("string rules", [&]() {
it("replaces strings with sequences of character sets", [&]() {
LexicalGrammar grammar{{
{ "rule_A", seq({
i_sym(10),
str("xyz"),
i_sym(11) }) },
{ "rule_A", seq({
i_sym(10),
str("xyz"),
i_sym(11) }) },
}, {}, {}};
auto result = expand_tokens(grammar);
AssertThat(result.second, Equals((const GrammarError *)nullptr));
AssertThat(result.first.rules, Equals(rule_list({
{ "rule_A", seq({
i_sym(10),
token(prec(1, seq({ character({ 'x' }), character({ 'y' }), character({ 'z' }) }))),
i_sym(11) }) },
{ "rule_A", seq({
i_sym(10),
token(prec(1, seq({ character({ 'x' }), character({ 'y' }), character({ 'z' }) }))),
i_sym(11) }) },
})));
});
@ -38,10 +38,10 @@ describe("expand_tokens", []() {
auto result = expand_tokens(grammar);
AssertThat(result.first.rules, Equals(rule_list({
{ "rule_A", token(prec(1, seq({
character({ 945 }),
character({ ' ' }),
character({ 946 }) }))) }
{ "rule_A", token(prec(1, seq({
character({ 945 }),
character({ ' ' }),
character({ 946 }) }))) }
})));
});
});
@ -50,19 +50,19 @@ describe("expand_tokens", []() {
it("replaces regexps with the equivalent rule tree", [&]() {
LexicalGrammar grammar{{
{ "rule_A", seq({
i_sym(10),
pattern("x*"),
i_sym(11) }) },
i_sym(10),
pattern("x*"),
i_sym(11) }) },
}, {}, {}};
auto result = expand_tokens(grammar);
AssertThat(result.second, Equals((const GrammarError *)nullptr));
AssertThat(result.first.rules, Equals(rule_list({
{ "rule_A", seq({
i_sym(10),
repeat(character({ 'x' })),
i_sym(11) }) },
{ "rule_A", seq({
i_sym(10),
repeat(character({ 'x' })),
i_sym(11) }) },
})));
});
@ -75,16 +75,16 @@ describe("expand_tokens", []() {
auto result = expand_tokens(grammar);
AssertThat(result.first.rules, Equals(rule_list({
{ "rule_A", repeat(character({ 945, 946, 947, 948 }, false)) }
{ "rule_A", repeat(character({ 945, 946, 947, 948 }, false)) }
})));
});
it("returns an error when the grammar contains an invalid regex", [&]() {
LexicalGrammar grammar{{
{ "rule_A", seq({
pattern("("),
str("xyz"),
pattern("[") }) },
pattern("("),
str("xyz"),
pattern("[") }) },
}, {}, {}};
auto result = expand_tokens(grammar);

View file

@ -12,66 +12,63 @@ using prepare_grammar::extract_tokens;
using prepare_grammar::InternedGrammar;
describe("extract_tokens", []() {
const set<rules::rule_ptr> no_ubiquitous_tokens;
const set<set<rules::Symbol>> no_expected_conflicts;
it("moves string rules into the lexical grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({ str("ab"), i_sym(0) }) }
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", seq({ str("ab"), i_sym(0) }) }
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
})));
AssertThat(get<0>(result).aux_rules, IsEmpty())
AssertThat(get<1>(result).rules, IsEmpty())
AssertThat(get<1>(result).aux_rules, Equals(rule_list({
{ "'ab'", str("ab") },
{ "'ab'", str("ab") },
})));
});
it("moves pattern rules into the lexical grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({ pattern("a+"), i_sym(0) }) }
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", seq({ pattern("a+"), i_sym(0) }) }
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
})));
AssertThat(get<0>(result).aux_rules, IsEmpty())
AssertThat(get<1>(result).rules, IsEmpty())
AssertThat(get<1>(result).aux_rules, Equals(rule_list({
{ "/a+/", pattern("a+") },
{ "/a+/", pattern("a+") },
})));
});
it("moves other rules marked as tokens into the lexical grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({
token(seq({ pattern("."), choice({ str("a"), str("b") }) })),
i_sym(0) }) }
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", seq({
token(seq({ pattern("."), choice({ str("a"), str("b") }) })),
i_sym(0) }) }
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
{ "rule_A", seq({ i_aux_token(0), i_sym(0) }) }
})));
AssertThat(get<0>(result).aux_rules, IsEmpty())
AssertThat(get<1>(result).rules, IsEmpty())
AssertThat(get<1>(result).aux_rules, Equals(rule_list({
{ "(seq /./ (choice 'a' 'b'))", token(seq({ pattern("."), choice({ str("a"), str("b") }) })) },
{ "(seq /./ (choice 'a' 'b'))", token(seq({ pattern("."), choice({ str("a"), str("b") }) })) },
})));
});
it("does not move blank rules", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", choice({ i_sym(0), blank() }) },
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", choice({ i_sym(0), blank() }) },
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", choice({ i_sym(0), blank() }) },
{ "rule_A", choice({ i_sym(0), blank() }) },
})));
AssertThat(get<0>(result).aux_rules, IsEmpty())
@ -81,8 +78,8 @@ describe("extract_tokens", []() {
it("does not create duplicate tokens in the lexical grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({ str("ab"), i_sym(0), str("ab") }) },
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", seq({ str("ab"), i_sym(0), str("ab") }) },
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", seq({ i_aux_token(0), i_sym(0), i_aux_token(0) }) }
@ -116,38 +113,38 @@ describe("extract_tokens", []() {
describe("when an entire rule can be extracted", [&]() {
it("moves the rule the lexical grammar when possible and updates referencing symbols", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", i_sym(1) },
{ "rule_B", pattern("a|b") },
{ "rule_C", token(seq({ str("a"), str("b") })) },
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", i_sym(1) },
{ "rule_B", pattern("a|b") },
{ "rule_C", token(seq({ str("a"), str("b") })) },
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_A", i_token(0) }
{ "rule_A", i_token(0) }
})));
AssertThat(get<0>(result).aux_rules, IsEmpty());
AssertThat(get<1>(result).rules, Equals(rule_list({
{ "rule_B", pattern("a|b") },
{ "rule_C", token(seq({ str("a"), str("b") })) },
{ "rule_B", pattern("a|b") },
{ "rule_C", token(seq({ str("a"), str("b") })) },
})));
AssertThat(get<1>(result).aux_rules, IsEmpty());
});
it("updates symbols whose indices need to change due to deleted rules", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", str("ab") },
{ "rule_B", i_sym(0) },
{ "rule_C", i_sym(1) },
}, no_ubiquitous_tokens, no_expected_conflicts});
{ "rule_A", str("ab") },
{ "rule_B", i_sym(0) },
{ "rule_C", i_sym(1) },
}, {}, {}});
AssertThat(get<0>(result).rules, Equals(rule_list({
{ "rule_B", i_token(0) },
{ "rule_C", i_sym(0) },
{ "rule_B", i_token(0) },
{ "rule_C", i_sym(0) },
})));
AssertThat(get<0>(result).aux_rules, IsEmpty());
AssertThat(get<1>(result).rules, Equals(rule_list({
{ "rule_A", str("ab") },
{ "rule_A", str("ab") },
})));
AssertThat(get<1>(result).aux_rules, IsEmpty());
});
@ -157,17 +154,17 @@ describe("extract_tokens", []() {
describe("ubiquitous tokens that are not symbols", [&]() {
it("adds them to the lexical grammar's separators", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", str("x") },
{ "rule_A", str("x") },
}, {
pattern("\\s+"),
str("y"),
}, no_expected_conflicts});
pattern("\\s+"),
str("y"),
}, {}});
AssertThat(get<2>(result), Equals<const GrammarError *>(nullptr));
AssertThat(get<1>(result).separators, Equals(rule_vector({
pattern("\\s+"),
str("y"),
pattern("\\s+"),
str("y"),
})));
AssertThat(get<0>(result).ubiquitous_tokens, IsEmpty());
@ -177,17 +174,17 @@ describe("extract_tokens", []() {
describe("ubiquitous tokens that point to moved rules", [&]() {
it("updates them according to the new symbol numbers", [&]() {
auto result = extract_tokens(InternedGrammar{ {
{ "rule_A", seq({ str("w"), i_sym(1) }) },
{ "rule_B", str("x") },
{ "rule_C", str("y") },
{ "rule_A", seq({ str("w"), i_sym(1) }) },
{ "rule_B", str("x") },
{ "rule_C", str("y") },
}, {
i_sym(2),
}, no_expected_conflicts});
i_sym(2),
}, {}});
AssertThat(get<2>(result), Equals<const GrammarError *>(nullptr));
AssertThat(get<0>(result).ubiquitous_tokens, Equals(set<Symbol>({
{ Symbol(1, SymbolOptionToken) },
{ Symbol(1, SymbolOptionToken) },
})));
AssertThat(get<1>(result).separators, IsEmpty());
@ -197,9 +194,9 @@ describe("extract_tokens", []() {
describe("ubiquitous tokens that are visible", [&]() {
it("preserves them in the syntactic grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", str("ab") },
{ "rule_B", str("bc") },
}, { i_sym(1) }, no_expected_conflicts});
{ "rule_A", str("ab") },
{ "rule_B", str("bc") },
}, { i_sym(1) }, {}});
AssertThat(get<2>(result), Equals<const GrammarError *>(nullptr));
@ -214,14 +211,14 @@ describe("extract_tokens", []() {
describe("ubiquitous tokens that are used in other grammar rules", [&]() {
it("preserves them in the syntactic grammar", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({ i_sym(1), str("ab") }) },
{ "_rule_B", str("bc") },
}, { i_sym(1) }, no_expected_conflicts});
{ "rule_A", seq({ i_sym(1), str("ab") }) },
{ "_rule_B", str("bc") },
}, { i_sym(1) }, {}});
AssertThat(get<2>(result), Equals<const GrammarError *>(nullptr));
AssertThat(get<0>(result).ubiquitous_tokens, Equals(set<Symbol>({
Symbol(0, SymbolOptionToken),
Symbol(0, SymbolOptionToken),
})));
AssertThat(get<1>(result).separators, IsEmpty());
@ -231,28 +228,28 @@ describe("extract_tokens", []() {
describe("ubiquitous tokens that are non-token symbols", [&]() {
it("returns an error", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", seq({ str("x"), i_sym(1) }), },
{ "rule_B", seq({ str("y"), str("z") }) },
}, { i_sym(1) }, no_expected_conflicts});
{ "rule_A", seq({ str("x"), i_sym(1) }), },
{ "rule_B", seq({ str("y"), str("z") }) },
}, { i_sym(1) }, {}});
AssertThat(get<2>(result), !Equals<const GrammarError *>(nullptr));
AssertThat(get<2>(result), EqualsPointer(
new GrammarError(GrammarErrorTypeInvalidUbiquitousToken,
"Not a token: rule_B")));
new GrammarError(GrammarErrorTypeInvalidUbiquitousToken,
"Not a token: rule_B")));
});
});
describe("ubiquitous tokens that are not symbols", [&]() {
it("returns an error", [&]() {
auto result = extract_tokens(InternedGrammar{{
{ "rule_A", str("x") },
{ "rule_B", str("y") },
}, { choice({ i_sym(1), blank() }) }, no_expected_conflicts});
{ "rule_A", str("x") },
{ "rule_B", str("y") },
}, { choice({ i_sym(1), blank() }) }, {}});
AssertThat(get<2>(result), !Equals<const GrammarError *>(nullptr));
AssertThat(get<2>(result), EqualsPointer(
new GrammarError(GrammarErrorTypeInvalidUbiquitousToken,
"Not a token: (choice (sym 1) (blank))")));
new GrammarError(GrammarErrorTypeInvalidUbiquitousToken,
"Not a token: (choice (sym 1) (blank))")));
});
});
});

View file

@ -12,25 +12,25 @@ using prepare_grammar::intern_symbols;
describe("intern_symbols", []() {
it("replaces named symbols with numerically-indexed symbols", [&]() {
Grammar grammar({
{ "x", choice({ sym("y"), sym("z") }) },
{ "y", sym("z") },
{ "z", str("stuff") }
{ "x", choice({ sym("y"), sym("z") }) },
{ "y", sym("z") },
{ "z", str("stuff") }
});
auto result = intern_symbols(grammar);
AssertThat(result.second, Equals((GrammarError *)nullptr));
AssertThat(result.first.rules, Equals(rule_list({
{ "x", choice({ i_sym(1), i_sym(2) }) },
{ "y", i_sym(2) },
{ "z", str("stuff") },
{ "x", choice({ i_sym(1), i_sym(2) }) },
{ "y", i_sym(2) },
{ "z", str("stuff") },
})));
});
describe("when there are symbols that reference undefined rules", [&]() {
it("returns an error", []() {
Grammar grammar({
{ "x", sym("y") },
{ "x", sym("y") },
});
auto result = intern_symbols(grammar);
@ -41,9 +41,9 @@ describe("intern_symbols", []() {
it("translates the grammar's optional 'ubiquitous_tokens' to numerical symbols", [&]() {
auto grammar = Grammar({
{ "x", choice({ sym("y"), sym("z") }) },
{ "y", sym("z") },
{ "z", str("stuff") }
{ "x", choice({ sym("y"), sym("z") }) },
{ "y", sym("z") },
{ "z", str("stuff") }
}).ubiquitous_tokens({ sym("z") });
auto result = intern_symbols(grammar);

View file

@ -14,187 +14,167 @@ describe("parse_regex", []() {
};
vector<ValidInputRow> valid_inputs = {
{
"character sets",
"[aAeE]",
character({ 'a', 'A', 'e', 'E' })
},
{
"character sets",
"[aAeE]",
character({ 'a', 'A', 'e', 'E' })
},
{
"'.' characters as wildcards",
".",
character({ '\n' }, false)
},
{
"'.' characters as wildcards",
".",
character({ '\n' }, false)
},
{
"character classes",
"\\w-\\d-\\s",
seq({
character({
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_',
}),
character({ '-' }),
character({ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }),
character({ '-' }),
character({ ' ', '\t', '\r', '\n' }) })
},
{
"character classes",
"\\w-\\d-\\s",
seq({
character({
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_' }),
character({ '-' }),
character({ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }),
character({ '-' }),
character({ ' ', '\t', '\r', '\n' }) })
},
{
"choices",
"ab|cd|ef",
choice({
seq({
character({ 'a' }),
character({ 'b' }),
}),
seq({
character({ 'c' }),
character({ 'd' })
}),
seq({
character({ 'e' }),
character({ 'f' })
})
})
},
{
"choices",
"ab|cd|ef",
choice({
seq({
character({ 'a' }),
character({ 'b' }) }),
seq({
character({ 'c' }),
character({ 'd' }) }),
seq({
character({ 'e' }),
character({ 'f' }) }) })
},
{
"simple sequences",
"abc",
seq({
character({ 'a' }),
character({ 'b' }),
character({ 'c' }) })
},
{
"simple sequences",
"abc",
seq({
character({ 'a' }),
character({ 'b' }),
character({ 'c' }) })
},
{
"character ranges",
"[12a-dA-D3]",
character({
'1', '2', '3',
'a', 'b', 'c', 'd',
'A', 'B', 'C', 'D' })
},
{
"character ranges",
"[12a-dA-D3]",
character({
'1', '2', '3',
'a', 'b', 'c', 'd',
'A', 'B', 'C', 'D' })
},
{
"negated characters",
"[^a\\d]",
character({ 'a', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }, false)
},
{
"negated characters",
"[^a\\d]",
character({ 'a', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }, false)
},
{
"backslashes",
"\\\\",
character({ '\\' })
},
{
"backslashes",
"\\\\",
character({ '\\' })
},
{
"character groups in sequences",
"x([^x]|\\\\x)*x",
seq({
character({ 'x' }),
repeat(choice({
character({ 'x' }, false),
seq({ character({ '\\' }), character({ 'x' }) })
})),
character({ 'x' })
})
},
{
"character groups in sequences",
"x([^x]|\\\\x)*x",
seq({
character({ 'x' }),
repeat(choice({
character({ 'x' }, false),
seq({ character({ '\\' }), character({ 'x' }) }) })),
character({ 'x' }) })
},
{
"choices in sequences",
"(a|b)cd",
seq({
choice({
character({ 'a' }),
character({ 'b' }),
}),
character({ 'c' }),
character({ 'd' })
})
},
{
"choices in sequences",
"(a|b)cd",
seq({
choice({
character({ 'a' }),
character({ 'b' }) }),
character({ 'c' }),
character({ 'd' }) })
},
{
"escaped parentheses",
"a\\(b",
seq({
character({ 'a' }),
character({ '(' }),
character({ 'b' })
})
},
{
"escaped parentheses",
"a\\(b",
seq({
character({ 'a' }),
character({ '(' }),
character({ 'b' }) })
},
{
"escaped periods",
"a\\.",
seq({
character({ 'a' }),
character({ '.' })
})
},
{
"escaped periods",
"a\\.",
seq({
character({ 'a' }),
character({ '.' }) })
},
{
"escaped characters",
"\\t\\n\\r",
seq({
character({ '\t' }),
character({ '\n' }),
character({ '\r' }),
})
},
{
"escaped characters",
"\\t\\n\\r",
seq({
character({ '\t' }),
character({ '\n' }),
character({ '\r' }) })
},
{
"plus repeats",
"(ab)+(cd)+",
seq({
seq({
seq({ character({ 'a' }), character({ 'b' }) }),
repeat(seq({ character({ 'a' }), character({ 'b' }) })),
}),
seq({
seq({ character({ 'c' }), character({ 'd' }) }),
repeat(seq({ character({ 'c' }), character({ 'd' }) })),
}),
})
},
{
"plus repeats",
"(ab)+(cd)+",
seq({
seq({
seq({ character({ 'a' }), character({ 'b' }) }),
repeat(seq({ character({ 'a' }), character({ 'b' }) })) }),
seq({
seq({ character({ 'c' }), character({ 'd' }) }),
repeat(seq({ character({ 'c' }), character({ 'd' }) })) }) })
},
{
"asterix repeats",
"(ab)*(cd)*",
seq({
repeat(seq({ character({ 'a' }), character({ 'b' }) })),
repeat(seq({ character({ 'c' }), character({ 'd' }) })),
})
},
{
"asterix repeats",
"(ab)*(cd)*",
seq({
repeat(seq({ character({ 'a' }), character({ 'b' }) })),
repeat(seq({ character({ 'c' }), character({ 'd' }) })) })
},
{
"optional rules",
"a(bc)?",
seq({
character({ 'a' }),
choice({
seq({ character({ 'b' }), character({ 'c' }) }),
blank()
})
})
},
{
"optional rules",
"a(bc)?",
seq({
character({ 'a' }),
choice({
seq({ character({ 'b' }), character({ 'c' }) }),
blank() }) })
},
{
"choices containing negated character classes",
"/([^/]|(\\\\/))*/",
seq({
character({ '/' }),
repeat(choice({
character({ '/' }, false),
seq({ character({ '\\' }), character({ '/' }) }),
})),
character({ '/' }),
}),
}
{
"choices containing negated character classes",
"/([^/]|(\\\\/))*/",
seq({
character({ '/' }),
repeat(choice({
character({ '/' }, false),
seq({ character({ '\\' }), character({ '/' }) }) })),
character({ '/' }), }),
},
};
struct InvalidInputRow {
@ -204,36 +184,36 @@ describe("parse_regex", []() {
};
vector<InvalidInputRow> invalid_inputs = {
{
"mismatched open parens",
"(a",
"unmatched open paren",
},
{
"mismatched nested open parens",
"((a) (b)",
"unmatched open paren",
},
{
"mismatched close parens",
"a)",
"unmatched close paren",
},
{
"mismatched nested close parens",
"((a) b))",
"unmatched close paren",
},
{
"mismatched brackets for character classes",
"[a",
"unmatched open square bracket",
},
{
"mismatched brackets for character classes",
"a]",
"unmatched close square bracket",
},
{
"mismatched open parens",
"(a",
"unmatched open paren",
},
{
"mismatched nested open parens",
"((a) (b)",
"unmatched open paren",
},
{
"mismatched close parens",
"a)",
"unmatched close paren",
},
{
"mismatched nested close parens",
"((a) b))",
"unmatched close paren",
},
{
"mismatched brackets for character classes",
"[a",
"unmatched open square bracket",
},
{
"mismatched brackets for character classes",
"a]",
"unmatched close square bracket",
},
};
for (auto &row : valid_inputs) {

View file

@ -9,24 +9,24 @@ describe("CharacterSet", []() {
describe("equality", [&]() {
it("returns true for identical character sets", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
CharacterSet set2 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
AssertThat(set1, Equals(set2));
});
it("returns false for character sets that include different ranges", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
CharacterSet set2 = CharacterSet()
.include('a', 'c')
.include('f', 'm');
.include('a', 'c')
.include('f', 'm');
AssertThat(set1, !Equals(set2));
AssertThat(set2, !Equals(set1));
@ -34,14 +34,14 @@ describe("CharacterSet", []() {
it("returns false for character sets that exclude different ranges", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'd')
.exclude('f', 'm');
.include_all()
.exclude('a', 'd')
.exclude('f', 'm');
CharacterSet set2 = CharacterSet()
.include_all()
.exclude('a', 'c')
.exclude('f', 'm');
.include_all()
.exclude('a', 'c')
.exclude('f', 'm');
AssertThat(set1, !Equals(set2));
AssertThat(set2, !Equals(set1));
@ -59,24 +59,24 @@ describe("CharacterSet", []() {
describe("hashing", [&]() {
it("returns the same number for identical character sets", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
CharacterSet set2 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
AssertThat(set1.hash_code(), Equals(set2.hash_code()));
});
it("returns different numbers for character sets that include different ranges", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'd')
.include('f', 'm');
.include('a', 'd')
.include('f', 'm');
CharacterSet set2 = CharacterSet()
.include('a', 'c')
.include('f', 'm');
.include('a', 'c')
.include('f', 'm');
AssertThat(set1.hash_code(), !Equals(set2.hash_code()));
AssertThat(set2.hash_code(), !Equals(set1.hash_code()));
@ -84,14 +84,14 @@ describe("CharacterSet", []() {
it("returns different numbers for character sets that exclude different ranges", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'd')
.exclude('f', 'm');
.include_all()
.exclude('a', 'd')
.exclude('f', 'm');
CharacterSet set2 = CharacterSet()
.include_all()
.exclude('a', 'c')
.exclude('f', 'm');
.include_all()
.exclude('a', 'c')
.exclude('f', 'm');
AssertThat(set1.hash_code(), !Equals(set2.hash_code()));
AssertThat(set2.hash_code(), !Equals(set1.hash_code()));
@ -125,31 +125,33 @@ describe("CharacterSet", []() {
it("adds included characters", [&]() {
CharacterSet set1 = CharacterSet().include('a', 'd');
AssertThat(set1, Equals(CharacterSet()
.include('a')
.include('b')
.include('c')
.include('d')));
.include('a')
.include('b')
.include('c')
.include('d')));
});
});
describe("when the set has a blacklist of characters", [&]() {
it("removes excluded characters", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'g')
.include('c', 'e');
.include_all()
.exclude('a', 'g')
.include('c', 'e');
AssertThat(set1, Equals(CharacterSet()
.include_all()
.exclude('a')
.exclude('b')
.exclude('f')
.exclude('g')));
.include_all()
.exclude('a')
.exclude('b')
.exclude('f')
.exclude('g')));
});
it("does nothing if the character are already not excluded", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.include('a', 'c');
.include_all()
.include('a', 'c');
AssertThat(set1, Equals(CharacterSet().include_all()));
});
});
@ -159,13 +161,14 @@ describe("CharacterSet", []() {
describe("when the set has a whitelist of characters", [&]() {
it("removes included characters", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'g')
.exclude('c', 'e');
.include('a', 'g')
.exclude('c', 'e');
AssertThat(set1, Equals(CharacterSet()
.include('a')
.include('b')
.include('f')
.include('g')));
.include('a')
.include('b')
.include('f')
.include('g')));
});
it("does nothing if the character's are already not included", [&]() {
@ -177,14 +180,15 @@ describe("CharacterSet", []() {
describe("when the set has a blacklist of characters", [&]() {
it("removes excluded characters", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'd');
.include_all()
.exclude('a', 'd');
AssertThat(set1, Equals(CharacterSet()
.include_all()
.exclude('a')
.exclude('b')
.exclude('c')
.exclude('d')));
.include_all()
.exclude('a')
.exclude('b')
.exclude('c')
.exclude('d')));
});
});
});
@ -198,15 +202,15 @@ describe("CharacterSet", []() {
CharacterSet set1 = CharacterSet().include('a', 'z');
set1.remove_set(CharacterSet().include('d', 's'));
AssertThat(set1, Equals(CharacterSet()
.include('a', 'c')
.include('t', 'z')));
.include('a', 'c')
.include('t', 'z')));
});
it("returns the characters that were removed", [&]() {
CharacterSet set1 = CharacterSet().include('a', 'z');
intersection = set1.remove_set(CharacterSet().include('d', 's'));
AssertThat(intersection, Equals(CharacterSet()
.include('d', 's')));
.include('d', 's')));
});
it("returns the empty set when the sets are disjoint", [&]() {
@ -222,13 +226,13 @@ describe("CharacterSet", []() {
CharacterSet set1 = CharacterSet().include('a', 'f');
intersection = set1.remove_set(CharacterSet()
.include_all()
.exclude('d', 'z'));
.include_all()
.exclude('d', 'z'));
AssertThat(set1, Equals(CharacterSet()
.include('d', 'f')));
.include('d', 'f')));
AssertThat(intersection, Equals(CharacterSet()
.include('a', 'c')));
.include('a', 'c')));
});
});
});
@ -237,16 +241,16 @@ describe("CharacterSet", []() {
describe("when the subtractend has whitelisted characters", [&]() {
it("adds the subtractend's inclusions to the receiver's exclusions", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'f');
.include_all()
.exclude('a', 'f');
intersection = set1.remove_set(CharacterSet()
.include('x', 'z'));
.include('x', 'z'));
AssertThat(set1, Equals(CharacterSet()
.include_all()
.exclude('a', 'f')
.exclude('x', 'z')));
.include_all()
.exclude('a', 'f')
.exclude('x', 'z')));
AssertThat(intersection, Equals(CharacterSet().include('x', 'z')));
});
@ -255,46 +259,46 @@ describe("CharacterSet", []() {
describe("when the subtractend has blacklisted characters", [&]() {
it("includes only the characters excluded by the subtractend but not by the receiver", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'm');
.include_all()
.exclude('a', 'm');
set1.remove_set(CharacterSet()
.include_all()
.exclude('d', 'z'));
.include_all()
.exclude('d', 'z'));
AssertThat(set1, Equals(CharacterSet()
.include('n', 'z')));
.include('n', 'z')));
});
it("returns the characters excluded by neither set", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'm');
.include_all()
.exclude('a', 'm');
intersection = set1.remove_set(CharacterSet()
.include_all()
.exclude('d', 'z'));
.include_all()
.exclude('d', 'z'));
AssertThat(intersection, Equals(CharacterSet()
.include_all()
.exclude('a', 'z')));
.include_all()
.exclude('a', 'z')));
});
it("works when the sets are disjoint", [&]() {
CharacterSet set1 = CharacterSet()
.include_all()
.exclude('a', 'm');
.include_all()
.exclude('a', 'm');
intersection = set1.remove_set(CharacterSet()
.include_all()
.exclude('d', 'z'));
.include_all()
.exclude('d', 'z'));
AssertThat(set1, Equals(CharacterSet()
.include('n', 'z')));
.include('n', 'z')));
AssertThat(intersection, Equals(CharacterSet()
.include_all()
.exclude('a', 'z')));
.include_all()
.exclude('a', 'z')));
});
});
});
@ -303,28 +307,28 @@ describe("CharacterSet", []() {
describe("::included_ranges", [&]() {
it("consolidates sequences of 3 or more consecutive characters into ranges", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'c')
.include('g')
.include('z');
.include('a', 'c')
.include('g')
.include('z');
AssertThat(set1.included_ranges(), Equals(vector<CharacterRange>({
CharacterRange('a', 'c'),
CharacterRange('g'),
CharacterRange('z'),
CharacterRange('a', 'c'),
CharacterRange('g'),
CharacterRange('z'),
})));
});
it("doesn't consolidate sequences of 2 consecutive characters", [&]() {
CharacterSet set1 = CharacterSet()
.include('a', 'b')
.include('g')
.include('z');
.include('a', 'b')
.include('g')
.include('z');
AssertThat(set1.included_ranges(), Equals(vector<CharacterRange>({
CharacterRange('a'),
CharacterRange('b'),
CharacterRange('g'),
CharacterRange('z'),
CharacterRange('a'),
CharacterRange('b'),
CharacterRange('g'),
CharacterRange('z'),
})));
});
});

124
spec/fixtures/grammars/cpp.cc vendored Normal file
View file

@ -0,0 +1,124 @@
#include "tree_sitter/compiler.h"
#include "helpers.h"
namespace tree_sitter_examples {
using tree_sitter::Grammar;
using namespace tree_sitter::rules;
// http://slps.github.io/zoo/cpp/iso-n2723.html
extern const Grammar cpp =
Grammar(
{
{ "expression", repeat(sym("declaration")) },
{ "declaration", sym("function_definition") },
{ "function_definition",
seq({ optional(sym("declaration_specifiers")), sym("type_specifier"),
sym("declarator"), repeat(sym("declaration")),
sym("compound_statement") }) },
{ "declaration_specifiers",
repeat1(
choice({ sym("storage_class_specifier"), sym("type_qualifier") })) },
{ "storage_class_specifier",
choice({ str("typedef"), str("extern"), str("static"), str("auto"),
str("register") }) },
{ "type_specifier",
choice({ sym("struct_specifier"),
seq({ repeat(choice({ str("signed"), str("unsigned"),
str("long"), str("short") })),
sym("identifier") }) }) },
{ "struct_specifier",
seq(
{ str("struct"), optional(sym("identifier")),
seq({ str("{"), repeat(sym("struct_declaration")), str("}") }) }) },
{ "struct_declaration",
seq({ sym("type_specifier"), sym("declarator") }) },
{ "parameter_declaration",
seq({ optional(sym("declaration_specifiers")), sym("type_specifier"),
sym("declarator") }) },
{ "declaration",
seq({ optional(sym("declaration_specifiers")), sym("type_specifier"),
comma_sep1(sym("init_declarator")), str(";") }) },
{ "init_declarator",
choice({ sym("declarator"),
seq({ sym("declarator"), str("="), sym("initializer") }) }) },
{ "initializer",
choice({ sym("expression"), seq({ str("{"), sym("initializer_list"),
optional(str(",")), str("}") }) }) },
{ "initializer_list",
choice({ seq({ optional(sym("designation")), sym("initializer") }),
seq({ sym("initializer_list"), str(","),
optional(sym("designation")), sym("initializer") }) }) },
{ "designation",
seq({ repeat1(choice({ seq({ str("["), sym("expression"), str("]") }),
seq({ str("."), sym("identifier") }) })),
str("=") }) },
{ "declarator", seq({ repeat(sym("star")), sym("direct_declarator") }) },
{ "direct_declarator",
choice(
{ sym("identifier"), seq({ str("("), sym("declarator"), str(")") }),
seq({ sym("direct_declarator"), str("["),
optional(sym("expression")), str("]") }),
seq({ sym("direct_declarator"), str("("),
comma_sep(sym("parameter_declaration")), str(")") }) }) },
{ "type_qualifier",
choice({ str("const"), str("restrict"), str("volatile") }) },
{ "star", str("*") },
{ "compound_statement",
seq({ str("{"),
repeat(choice({ sym("declaration"), sym("statement") })),
str("}") }) },
{ "expression",
choice({ sym("math_expression"), sym("call_expression"), sym("string"),
sym("identifier"), sym("number") }) },
{ "math_expression",
choice(
{ prec(1, seq({ sym("expression"), str("+"), sym("expression") })),
prec(2, seq({ sym("expression"), sym("star"),
sym("expression") })) }) },
{ "call_expression",
prec(3, seq({ sym("expression"), str("("), comma_sep(sym("expression")),
str(")") })) },
{ "statement", choice({ sym("expression_statement") }) },
{ "expression_statement", seq({ sym("expression"), str(";") }) },
{ "string", delimited("\"") },
{ "identifier", pattern("\\a[\\w_]*") },
{ "number", pattern("\\d+(\\.\\d+)?") },
{ "comment", pattern("//[^\n]*") },
})
.ubiquitous_tokens({
sym("comment"), pattern("[ \t\r\n]"),
})
.expected_conflicts({
{ "type_specifier", "expression" },
});
} // namespace tree_sitter_examples

View file

@ -27,7 +27,7 @@ describe("Document", [&]() {
ts_document_set_input_string(doc, "{ \"key\": [1, 2] }");
AssertThat(ts_node_string(ts_document_root_node(doc)), Equals(
"(DOCUMENT (object (string) (array (number) (number))))"));
"(DOCUMENT (object (string) (array (number) (number))))"));
});
});
@ -58,7 +58,7 @@ describe("Document", [&]() {
ts_document_set_language(doc, ts_language_json());
AssertThat(ts_node_string(ts_document_root_node(doc)), Equals(
"(DOCUMENT (object (string) (array (number) (number))))"));
"(DOCUMENT (object (string) (array (number) (number))))"));
});
});
});

View file

@ -14,10 +14,10 @@ describe("Node", []() {
ts_document_set_input_string(document, " [123, false, {\"x\": null}]");
root = ts_document_root_node(document);
AssertThat(ts_node_string(root), Equals(
"(DOCUMENT (array "
"(number) "
"(false) "
"(object (string) (null))))"));
"(DOCUMENT (array "
"(number) "
"(false) "
"(object (string) (null))))"));
});
after_each([&]() {

View file

@ -24,22 +24,22 @@ describe("Tree", []() {
before_each([&]() {
tree1 = ts_tree_make_leaf(
cat,
ts_length_make(5, 4),
ts_length_make(2, 1),
false);
cat,
ts_length_make(5, 4),
ts_length_make(2, 1),
false);
tree2 = ts_tree_make_leaf(
cat,
ts_length_make(3, 3),
ts_length_make(1, 1),
false);
cat,
ts_length_make(3, 3),
ts_length_make(1, 1),
false);
parent1 = ts_tree_make_node(
dog,
2,
tree_array({ tree1, tree2, }),
false);
dog,
2,
tree_array({ tree1, tree2, }),
false);
});
after_each([&]() {
@ -70,9 +70,9 @@ describe("Tree", []() {
describe("make_node(symbol, child_count, children, is_hidden)", [&]() {
it("computes its size based on its child nodes", [&]() {
AssertThat(parent1->size.bytes, Equals<size_t>(
tree1->size.bytes + + tree2->padding.bytes + tree2->size.bytes));
tree1->size.bytes + + tree2->padding.bytes + tree2->size.bytes));
AssertThat(parent1->size.chars, Equals<size_t>(
tree1->size.chars + + tree2->padding.chars + tree2->size.chars));
tree1->size.chars + + tree2->padding.chars + tree2->size.chars));
});
it("computes its padding based on its first child", [&]() {
@ -103,13 +103,13 @@ describe("Tree", []() {
before_each([&]() {
parent1->options = TSTreeOptionsHidden;
tree3 = ts_tree_make_leaf(
cat,
ts_length_make(8, 6),
ts_length_make(5, 3),
0);
cat,
ts_length_make(8, 6),
ts_length_make(5, 3),
0);
grandparent = ts_tree_make_node(pig, 2, tree_array({
parent1,
tree3,
parent1,
tree3,
}), 0);
});
@ -129,15 +129,15 @@ describe("Tree", []() {
AssertThat(children[1].tree, Equals(tree2));
AssertThat(children[1].offset.bytes, Equals<size_t>(
tree1->size.bytes + tree2->padding.bytes));
tree1->size.bytes + tree2->padding.bytes));
AssertThat(children[1].offset.chars, Equals<size_t>(
tree1->size.chars + tree2->padding.chars));
tree1->size.chars + tree2->padding.chars));
AssertThat(children[2].tree, Equals(tree3));
AssertThat(children[2].offset.bytes, Equals<size_t>(
tree1->size.bytes + tree2->padding.bytes + tree2->size.bytes + tree3->padding.bytes));
tree1->size.bytes + tree2->padding.bytes + tree2->size.bytes + tree3->padding.bytes));
AssertThat(children[2].offset.chars, Equals<size_t>(
tree1->size.chars + tree2->padding.chars + tree2->size.chars + tree3->padding.chars));
tree1->size.chars + tree2->padding.chars + tree2->size.chars + tree3->padding.chars));
});
});
@ -148,8 +148,8 @@ describe("Tree", []() {
ts_tree_set_fragile_left(tree1);
ts_tree_set_extra(tree1);
parent = ts_tree_make_node(pig, 2, tree_array({
tree1,
tree2,
tree1,
tree2,
}), 0);
});
@ -169,8 +169,8 @@ describe("Tree", []() {
ts_tree_set_fragile_right(tree2);
ts_tree_set_extra(tree2);
parent = ts_tree_make_node(pig, 2, tree_array({
tree1,
tree2,
tree1,
tree2,
}), 0);
});
@ -190,8 +190,8 @@ describe("Tree", []() {
ts_tree_set_fragile_right(tree1);
ts_tree_set_fragile_left(tree2);
parent = ts_tree_make_node(pig, 2, tree_array({
tree1,
tree2,
tree1,
tree2,
}), 0);
});
@ -209,23 +209,23 @@ describe("Tree", []() {
describe("equality", [&]() {
it("returns true for identical trees", [&]() {
TSTree *tree1_copy = ts_tree_make_leaf(
cat,
ts_length_make(5, 4),
ts_length_make(2, 1),
0);
cat,
ts_length_make(5, 4),
ts_length_make(2, 1),
0);
AssertThat(ts_tree_eq(tree1, tree1_copy), IsTrue());
TSTree *tree2_copy = ts_tree_make_leaf(
cat,
ts_length_make(3, 3),
ts_length_make(1, 1),
0);
cat,
ts_length_make(3, 3),
ts_length_make(1, 1),
0);
AssertThat(ts_tree_eq(tree2, tree2_copy), IsTrue());
TSTree *parent2 = ts_tree_make_node(dog, 2, tree_array({
tree1_copy, tree2_copy,
tree1_copy, tree2_copy,
}), 0);
AssertThat(ts_tree_eq(parent1, parent2), IsTrue());
@ -237,10 +237,10 @@ describe("Tree", []() {
it("returns false for trees with different symbols", [&]() {
TSTree *different_tree = ts_tree_make_leaf(
tree1->symbol + 1,
tree1->size,
tree1->padding,
0);
tree1->symbol + 1,
tree1->size,
tree1->padding,
0);
AssertThat(ts_tree_eq(tree1, different_tree), IsFalse());
ts_tree_release(different_tree);
@ -248,10 +248,10 @@ describe("Tree", []() {
it("returns false for trees with different children", [&]() {
TSTree *different_tree = ts_tree_make_leaf(
tree1->symbol + 1,
tree1->size,
tree1->padding,
0);
tree1->symbol + 1,
tree1->size,
tree1->padding,
0);
TSTree *different_parent = ts_tree_make_node(dog, 2, tree_array({
different_tree, different_tree,
@ -289,7 +289,7 @@ describe("Tree", []() {
parent1->options = TSTreeOptionsHidden;
char *string1 = ts_tree_string(parent1, names);
AssertThat(string(string1), Equals("(dog (cat) (cat))"));
AssertThat(string(string1), Equals("(dog (cat) (cat))"));
free(string1);
tree1->options = TSTreeOptionsHidden;