@@ -46,6 +46,39 @@ describe.skip("pruneLinesFromTop", () => {
4646 const pruned = pruneLinesFromTop ( prompt , 5 , "gpt-4" ) ;
4747 expect ( pruned . split ( "\n" ) . length ) . toBeLessThan ( prompt . split ( "\n" ) . length ) ;
4848 } ) ;
49+
50+ it ( "should return the original prompt if it's within max tokens" , ( ) => {
51+ const prompt = "Line 1\nLine 2" ;
52+ const pruned = pruneLinesFromTop ( prompt , 10 , "gpt-4" ) ;
53+ expect ( pruned ) . toEqual ( prompt ) ;
54+ } ) ;
55+
56+ it ( "should return an empty string if maxTokens is 0" , ( ) => {
57+ const prompt = "Line 1\nLine 2\nLine 3\nLine 4" ;
58+ const pruned = pruneLinesFromTop ( prompt , 0 , "gpt-4" ) ;
59+ expect ( pruned ) . toEqual ( "" ) ;
60+ } ) ;
61+
62+ it ( "should handle an empty prompt string" , ( ) => {
63+ const prompt = "" ;
64+ const pruned = pruneLinesFromTop ( prompt , 5 , "gpt-4" ) ;
65+ expect ( pruned ) . toEqual ( "" ) ;
66+ } ) ;
67+
68+ it ( "should handle a prompt with a single line that exceeds maxTokens" , ( ) => {
69+ const prompt =
70+ "This is a single long line that will exceed the token limit" ;
71+ const pruned = pruneLinesFromTop ( prompt , 5 , "gpt-4" ) ;
72+
73+ expect ( pruned ) . toEqual ( "" ) ;
74+ } ) ;
75+
76+ it ( "should correctly prune when all lines together exceed maxTokens but individual lines do not" , ( ) => {
77+ const prompt = "L1\nL2\nL3\nL4" ;
78+
79+ const pruned = pruneLinesFromTop ( prompt , 5 , "gpt-4" ) ;
80+ expect ( pruned ) . toEqual ( "L3\nL4" ) ;
81+ } ) ;
4982} ) ;
5083
5184describe . skip ( "pruneLinesFromBottom" , ( ) => {
@@ -54,6 +87,39 @@ describe.skip("pruneLinesFromBottom", () => {
5487 const pruned = pruneLinesFromBottom ( prompt , 5 , "gpt-4" ) ;
5588 expect ( pruned . split ( "\n" ) . length ) . toBeLessThan ( prompt . split ( "\n" ) . length ) ;
5689 } ) ;
90+
91+ it ( "should return the original prompt if it's within max tokens" , ( ) => {
92+ const prompt = "Line 1\nLine 2" ;
93+ const pruned = pruneLinesFromBottom ( prompt , 10 , "gpt-4" ) ;
94+ expect ( pruned ) . toEqual ( prompt ) ;
95+ } ) ;
96+
97+ it ( "should return an empty string if maxTokens is 0" , ( ) => {
98+ const prompt = "Line 1\nLine 2\nLine 3\nLine 4" ;
99+ const pruned = pruneLinesFromBottom ( prompt , 0 , "gpt-4" ) ;
100+ expect ( pruned ) . toEqual ( "" ) ;
101+ } ) ;
102+
103+ it ( "should handle an empty prompt string" , ( ) => {
104+ const prompt = "" ;
105+ const pruned = pruneLinesFromBottom ( prompt , 5 , "gpt-4" ) ;
106+ expect ( pruned ) . toEqual ( "" ) ;
107+ } ) ;
108+
109+ it ( "should handle a prompt with a single line that exceeds maxTokens" , ( ) => {
110+ const prompt =
111+ "This is a single long line that will exceed the token limit" ;
112+ const pruned = pruneLinesFromBottom ( prompt , 5 , "gpt-4" ) ;
113+
114+ expect ( pruned ) . toEqual ( "" ) ;
115+ } ) ;
116+
117+ it ( "should correctly prune when all lines together exceed maxTokens but individual lines do not" , ( ) => {
118+ const prompt = "L1\nL2\nL3\nL4" ;
119+
120+ const pruned = pruneLinesFromBottom ( prompt , 5 , "gpt-4" ) ;
121+ expect ( pruned ) . toEqual ( "L1\nL2" ) ;
122+ } ) ;
57123} ) ;
58124
59125describe . skip ( "pruneRawPromptFromTop" , ( ) => {
0 commit comments