Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
59 commits
Select commit Hold shift + click to select a range
0ede09f
add markdown to AI logs
rappleb1 Oct 25, 2025
97abed3
format
rappleb1 Oct 25, 2025
2390427
selectable student query
rappleb1 Oct 25, 2025
419c887
classroom start
rappleb1 Oct 28, 2025
988a96d
commit for later
rappleb1 Oct 28, 2025
af46d81
no idea
rappleb1 Oct 28, 2025
01438f9
Added submission link to program assessment results view. Set maximum…
wfreem2 Oct 28, 2025
b3add79
Merge pull request #604 from umgc/wxfreem/program_assessment_improvement
rappleb1 Oct 30, 2025
d1e69a4
Merge pull request #593 from umgc/ai_logs_markdown
rappleb1 Oct 30, 2025
cd3048a
Assigns game to students, added students view, added score tracker
mlmcdan1 Oct 30, 2025
98fa9ee
Merge remote-tracking branch 'origin/developer' into team_e
rappleb1 Oct 30, 2025
f2d585c
Merge branch 'gamification_llm_fix' of https://github.com/umgc/2025_f…
rappleb1 Oct 30, 2025
ffd1b98
Merge branch 'team_e' into gamification_llm_fix
rappleb1 Oct 30, 2025
08adc77
Added the ability to copy form the chat interface while keeping forma…
Codyjwhite Oct 30, 2025
598f065
database structure, examples
rappleb1 Oct 30, 2025
ef3f91d
Init commit reflections.
sphilip3 Oct 30, 2025
0aa2bc0
Formatting
Codyjwhite Oct 30, 2025
20bd30d
first pass at fixing rubric json response from google classroom
andreash94 Oct 30, 2025
b00358f
fixing empty google_lms_service login function
andreash94 Oct 30, 2025
48b3859
checks if user is teacher and fixes unimplemented getCourses() function
andreash94 Oct 30, 2025
db5fd85
Adds local llm support to gamification and essay assistant / code cle…
ssung13 Oct 30, 2025
4017d34
import quiz function
andreash94 Oct 31, 2025
16d7dec
Merge pull request #608 from umgc/334-Codyjwhite-EssayAssistantFixes
rappleb1 Oct 31, 2025
d9e3fdd
adding create assignment functionality
andreash94 Oct 31, 2025
cdb62de
local llm analytics
rappleb1 Oct 31, 2025
aa77408
Merge pull request #611 from umgc/ssung13_local_llm
rappleb1 Oct 31, 2025
484195a
Connect gamification completions to backend and show status in UI
mlmcdan1 Oct 31, 2025
a3bd088
Merge branch 'team_e' into 332_sphilip5_student_reflection
rappleb1 Oct 31, 2025
bbe0ee0
Merge remote-tracking branch 'origin/team_e' into 332_sphilip5_studen…
rappleb1 Oct 31, 2025
a9e4713
Merge remote-tracking branch 'origin/team_e' into gamification_llm_fix
rappleb1 Oct 31, 2025
f7b9377
warnings and format
rappleb1 Oct 31, 2025
9600cf5
format and analyze
rappleb1 Oct 31, 2025
ff1da91
Merge pull request #607 from umgc/gamification_llm_fix
rappleb1 Oct 31, 2025
3d12a41
Merge remote-tracking branch 'origin/team_e' into 332_sphilip5_studen…
rappleb1 Oct 31, 2025
95d824c
Adding reactive grade display on view submissions detail page
andreash94 Nov 1, 2025
d7d3469
formatting
andreash94 Nov 1, 2025
092c7f3
naming
andreash94 Nov 1, 2025
375c5d4
Merge pull request #627 from umgc/Demo_adjustments_AI_Feedback_andrea…
andreash94 Nov 1, 2025
2c9394d
adds support to edulense assistant / minor fixes
ssung13 Nov 1, 2025
e4ac167
Dart fix
ssung13 Nov 1, 2025
380bc22
disable edulense assistant for students
ssung13 Nov 1, 2025
7a73765
Merge pull request #632 from umgc/ssung13_localLLM_fix
rappleb1 Nov 1, 2025
b94741e
Fixed logging by getting userID from Shared Preferences instead of st…
Codyjwhite Nov 1, 2025
0f76f04
Formatting
Codyjwhite Nov 1, 2025
6127802
Merge pull request #633 from umgc/334-Codyjwhite-EssayAssistantFixes
rappleb1 Nov 1, 2025
469ffdc
reflections db, consistency in database naming
rappleb1 Nov 2, 2025
76a9ab2
Merge remote-tracking branch 'origin/team_e' into 332_sphilip5_studen…
rappleb1 Nov 2, 2025
91c17cc
normalize
rappleb1 Nov 2, 2025
03c192a
better exception handling
rappleb1 Nov 2, 2025
6be8844
remove old
rappleb1 Nov 2, 2025
f1bb68a
semicolon
rappleb1 Nov 3, 2025
863ca90
iep classroom
rappleb1 Nov 3, 2025
7993bb9
Merge branch 'team_e' into iep_classroom
rappleb1 Nov 3, 2025
05d8520
Merge pull request #610 from umgc/google_classroom_fixes_andreash94
rappleb1 Nov 3, 2025
79b9009
Merge remote-tracking branch 'origin/team_e' into iep_classroom
rappleb1 Nov 3, 2025
b746778
format and fix
rappleb1 Nov 3, 2025
c826130
remove unused
rappleb1 Nov 3, 2025
39c6e35
Merge pull request #614 from umgc/332_sphilip5_student_reflection
rappleb1 Nov 4, 2025
c8bca76
Merge pull request #637 from umgc/iep_classroom
rappleb1 Nov 4, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions LearningLens2025/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,12 @@ terraform/terraformout.txt
# npm

/lambda/code_eval/node_modules
/lambda/gettoken/node_modules
/lambda/gettoken/gettoken.zip
/lambda/ai_log/node_modules
/lambda/game_data/node_modules
/lambda/reflections/node_modules
/lambda/ai_log/ai_log.zip
/lambda/code_eval/code_eval.zip
/lambda/game_data/game_data.zip
/lambda/reflections/reflections.zip
# Any extra javascript files for lambda functions
!lambda/code_eval/*.js
2 changes: 2 additions & 0 deletions LearningLens2025/frontend/.example.env
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,6 @@ deepseek_apiKey=
GOOGLE_CLIENT_ID=
AI_LOGGING_URL=
CODE_EVAL_URL=
GAME_URL=
REFLECTIONS_URL=
LOCAL_MODEL_DOWNLOAD_URL_PATH=
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import 'package:learninglens_app/Api/llm/perplexity_api.dart';
import 'package:learninglens_app/Api/lms/factory/lms_factory.dart';
import 'package:learninglens_app/Controller/custom_appbar.dart';
import 'package:learninglens_app/services/local_storage_service.dart';
import 'package:learninglens_app/Api/llm/local_llm_service.dart'; // local llm
import 'package:flutter/foundation.dart';

class TextBasedFunctionCallerView extends StatefulWidget {
const TextBasedFunctionCallerView({super.key});
Expand All @@ -30,6 +32,8 @@ class _TextBasedFunctionCallerViewState
LlmType selectedLLM = LlmType.GROK; //default to chatgpt
late LLM llm;

bool _localLlmAvail = !kIsWeb;

@override
void initState() {
super.initState();
Expand All @@ -54,6 +58,8 @@ class _TextBasedFunctionCallerViewState
aiModel = PerplexityLLM(LocalStorageService.getPerplexityKey());
} else if (selectedLLM == LlmType.DEEPSEEK) {
aiModel = DeepseekLLM(LocalStorageService.getDeepseekKey());
} else if (selectedLLM == LlmType.LOCAL) {
aiModel = LocalLLMService();
} else {
// default
aiModel = OpenAiLLM(LocalStorageService.getOpenAIKey());
Expand Down Expand Up @@ -199,11 +205,18 @@ class _TextBasedFunctionCallerViewState
items: LlmType.values.map((LlmType llm) {
return DropdownMenuItem<LlmType>(
value: llm,
enabled: LocalStorageService.userHasLlmKey(llm),
enabled: (llm == LlmType.LOCAL &&
LocalStorageService.getLocalLLMPath() != "" &&
_localLlmAvail) ||
LocalStorageService.userHasLlmKey(llm),
child: Text(
llm.displayName,
style: TextStyle(
color: LocalStorageService.userHasLlmKey(llm)
color: (llm == LlmType.LOCAL &&
LocalStorageService.getLocalLLMPath() !=
"" &&
_localLlmAvail) ||
LocalStorageService.userHasLlmKey(llm)
? Colors.black87
: Colors.grey,
),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import 'package:learninglens_app/Api/experimental/assistant/textbased_function_c
import 'package:learninglens_app/Api/llm/llm_api_modules_base.dart';
import 'package:learninglens_app/Api/llm/prompt_engine.dart';
import 'package:learninglens_app/services/api_service.dart';
import 'package:learninglens_app/Api/llm/local_llm_service.dart';

// Replicate the functionality used in chatgpt_client, but swap over to prompt engineering instead of the function caller.
// This code gears the development for the assistant to be more generic in terms of which llm is used rather than relying on
Expand Down Expand Up @@ -61,34 +62,45 @@ class TextBasedLLMClient {
}
}

/// Calls the OpenAI Chat Completion API with the entire conversation so far
/// Calls the OpenAI Chat Completion API with the entire conversation so far or call locl LLM.
Future<String?> _callLLM(List<Map<String, String>> conversation) async {
final response = await ApiService().httpPost(
Uri.parse(llm.url),
headers: {
"Authorization": 'Bearer ${llm.apiKey}',
"Content-Type": "application/json",
},
body: jsonEncode({
"model": llm.model,
"messages": conversation.map((m) {
return {"role": m["role"], "content": m["content"]};
}).toList(),
"temperature": 0.7,
"top_p": 0.9,
}),
);

if (response.statusCode != 200) {
return "Error from LLM: ${response.statusCode} => ${response.body}";
}
if (llm.apiKey == "") {
final response = await LocalLLMService().runModel2(conversation.map((m) {
return {"role": m["role"], "content": m["content"]};
}).toList());

final jsonData = jsonDecode(response.body);
if (jsonData["choices"] == null || jsonData["choices"].isEmpty) {
return null;
}
if (response == "") {
return null;
}
return response;
} else {
final response = await ApiService().httpPost(
Uri.parse(llm.url),
headers: {
"Authorization": 'Bearer ${llm.apiKey}',
"Content-Type": "application/json",
},
body: jsonEncode({
"model": llm.model,
"messages": conversation.map((m) {
return {"role": m["role"], "content": m["content"]};
}).toList(),
"temperature": 0.7,
"top_p": 0.9,
}),
);

if (response.statusCode != 200) {
return "Error from LLM: ${response.statusCode} => ${response.body}";
}

return jsonData["choices"][0]["message"]["content"];
final jsonData = jsonDecode(response.body);
if (jsonData["choices"] == null || jsonData["choices"].isEmpty) {
return null;
}

return jsonData["choices"][0]["message"]["content"];
}
}

/// Parses a "CALL functionName(...)" string, calls the local function, returns its result
Expand Down
1 change: 0 additions & 1 deletion LearningLens2025/frontend/lib/Api/llm/DeepSeek_api.dart
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,6 @@ $inputText
'max_tokens': maxOutputTokens,
'stream': true, // critical for SSE
};
print("Streaming body: $body");
final client = http.Client();
try {
final req = http.Request('POST', parsedUrl)
Expand Down
156 changes: 127 additions & 29 deletions LearningLens2025/frontend/lib/Api/llm/local_llm_service.dart
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ class LocalLLMService implements LLM {

int requestId = await fllamaChat(request, (response, responseJson, done) {
responseBuff.add(response);
//print(response);
print(response);
if (done) {
_runningRequestId = null;
finalResponse = _getFinalResponse(responseBuff);
Expand Down Expand Up @@ -197,6 +197,7 @@ class LocalLLMService implements LLM {
return "ERROR: Local LLM failed to produce a response";
}

// method to run model with previous contexts
Future<String> runModel2(
List<Map<String, dynamic>>? context, {
int? maxTokenSet,
Expand Down Expand Up @@ -343,8 +344,8 @@ class LocalLLMService implements LLM {
builder: (context) => AlertDialog(
title: const Text('⚠️Warning:'),
content: Text(
'The currently loaded Local LLM does not consistently generate a valid XML needed for rubric geneartion.\n'
'Proceeding could result in invalid XML output error.\n\n'
'The currently loaded Local LLM does not consistently generate a valid XML or json files used in Learning Management System.\n'
'Proceeding could result in invalid XML or json output error.\n\n'
'The recommended model for this task is 7B or higher reasoning models (Qwen).\n'
'Do you want to continue anyway?',
),
Expand Down Expand Up @@ -409,6 +410,7 @@ class LocalLLMService implements LLM {
return await runModel2(context);
}

// unused in local LLM;
@override
Future<String> generate(String prompt) {
// TODO: implement generate
Expand Down Expand Up @@ -498,43 +500,139 @@ class LocalLLMService implements LLM {
}

Future<List<String>> fetchModelKeys() async {
final url = Uri.parse(
'https://raw.githubusercontent.com/ssung13/SWEN670F2025/main/models.csv',
);
final downloadUrl = LocalStorageService.getLocalLLMDownloadURLPath();
if (downloadUrl != '') {
final url = Uri.parse(
downloadUrl,
);

final List<String> modelKeys = [];
final List<String> modelKeys = [];

try {
final response = await http.get(url);
if (response.statusCode == 200) {
final csvContent = response.body;

for (var line in LineSplitter.split(csvContent)) {
if (line.trim().isEmpty) continue;
final parts = line.split(',');
if (parts.isNotEmpty) {
modelKeys.add(parts[0].trim());
try {
final response = await http.get(url);
if (response.statusCode == 200) {
final csvContent = response.body;

for (var line in LineSplitter.split(csvContent)) {
if (line.trim().isEmpty) continue;
final parts = line.split(',');
if (parts.isNotEmpty) {
modelKeys.add(parts[0].trim());
}
}
} else {
print('Failed to fetch CSV: ${response.statusCode}');
}
} else {
print('Failed to fetch CSV: ${response.statusCode}');
} catch (e) {
print('Error fetching CSV: $e');
}
} catch (e) {
print('Error fetching CSV: $e');
}

return modelKeys;
return modelKeys;
} else {
return [];
}
}

// method for outputting stream.
@override
Stream<String> chatStream(
{List<Map<String, dynamic>>? context,
String? prompt,
double temperature = 0.7,
double topP = 1.0,
double frequencyPenalty = 0.0,
double presencePenalty = 0.0}) {
// TODO: implement chatStream
throw UnimplementedError();
double? temperature,
double? topP,
double? frequencyPenalty,
double? presencePenalty}) async* {
try {
model = LocalStorageService.getLocalLLMPath();

List<Message>? convertedContext = context?.map((entry) {
final role = Role.values.firstWhere((r) => r.name == entry['role']);
final message = entry['content'] as String;
return Message(role, message);
}).toList();

double temperatureInput = temperature ?? 0.7;

// model id for the web build (DO NOT use WEB for now / WIP).
String mlcModelId = MlcModelId.qwen05b;

// model path for the desktop build.
String mmprojPath = "";

final request = OpenAiRequest(
tools: [
if (_tool != null)
Tool(
name: _tool!.name,
jsonSchema: _tool!.parametersAsString,
),
],
maxTokens: maxOutputTokens.round(),
messages: convertedContext!,
numGpuLayers: 99,
/* this seems to have no adverse effects in environments w/o GPU support, ex. Android and web */
modelPath: kIsWeb ? mlcModelId : model,
mmprojPath: mmprojPath,
frequencyPenalty: 0.0,
// Don't use below 1.1, LLMs without a repeat penalty
// will repeat the same token.
presencePenalty: 1.1,
topP: _topP,
// 22.9s for 249 input tokens with 20K context for SmolLM3.
// 22.9s for 249 input tokens with 4K context for SmolLM3.
contextSize: contextSize,
// Don't use 0.0, some models will repeat
// the same token.
temperature: temperatureInput,
logger: (log) {
if (log.contains('<unused')) {
// 25-03-11: Added because Gemma 3 outputs so many that it
// can break the VS Code log viewer.
return;
}
if (log.contains('ggml_')) {
// 25-03-11: Added because that's the biggest clutter-er left
// when trying to get logs reduced down to compare Gemma 3 working vs.
// not-working cases.
return;
}
// ignore: avoid_print
print('[llama.cpp] $log');
},
);

// This is required, otherwise the fllama will crash if using fllamaChat
await fllamaChatTemplateGet(model);

final controller = StreamController<String>();
String previous = '';
int requestId = await fllamaChat(request, (response, responseJson, done) {
if (!controller.isClosed) {
if (response.isNotEmpty) {
if (response.startsWith(previous)) {
controller.add(response.substring(previous.length));
previous = response;
} else {
// In case it doesn't follow pattern (rare)
controller.add(response);
previous = response;
}
}
if (done) {
controller.close();
_runningRequestId = null;
}
}
});

_runningRequestId = requestId;
yield* controller.stream;

while (_runningRequestId != null) {
await Future.delayed(const Duration(milliseconds: 100));
}
} catch (e) {
print(e);
}
}
}
Loading