@@ -24,7 +24,6 @@ import {
2424 SimpleHeadedWordUnit ,
2525 SimpleWordUnit ,
2626} from "./ast.ts" ;
27- import { cache } from "./cache.ts" ;
2827import { everyWordUnitInSentence } from "./extract.ts" ;
2928import {
3029 CLAUSE_RULE ,
@@ -59,11 +58,10 @@ import {
5958 UnrecognizedError ,
6059} from "./parser_lib.ts" ;
6160import { describe , Token } from "./token.ts" ;
61+ import { lazy as lazyEval } from "../../misc/misc.ts" ;
6262
6363const spaces = match ( / \s * / , "spaces" ) ;
6464
65- Parser . startCache ( cache ) ;
66-
6765const specificToken = memoize (
6866 < T extends Token [ "type" ] > ( type : T ) : Parser < Token & { type : T } > =>
6967 token . map ( ( token ) =>
@@ -219,7 +217,7 @@ function optionalCombined(
219217 ) ;
220218}
221219const number = manyAtLeastOnce ( wordFrom ( numeralSet , "numeral" ) ) ;
222- const phrase : Parser < Phrase > = lazy ( ( ) =>
220+ const phrase : Parser < Phrase > = lazy ( lazyEval ( ( ) =>
223221 choice < Phrase > (
224222 sequence (
225223 number ,
@@ -273,7 +271,7 @@ const phrase: Parser<Phrase> = lazy(() =>
273271 } ) ) ,
274272 )
275273 . filter ( filter ( PHRASE_RULE ) )
276- ) ;
274+ ) ) ;
277275const nanpa = sequence ( wordUnit ( new Set ( [ "nanpa" ] ) , '"nanpa"' ) , phrase )
278276 . map ( ( [ nanpa , phrase ] ) => ( { nanpa, phrase } ) )
279277 . filter ( filter ( NANPA_RULES ) ) ;
@@ -723,6 +721,4 @@ export const parse = spaces
723721 . filter ( filter ( MULTIPLE_SENTENCES_RULE ) )
724722 . map ( ( sentences ) => ( { type : "sentences" , sentences } ) ) ,
725723 ) )
726- . parser ( ) ;
727-
728- Parser . endCache ( ) ;
724+ . generateParser ( ) ;
0 commit comments