10
from sentence_transformers import SentenceTransformer
17
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
19
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
21
def resolve_path(path):
22
return path if os.path.isabs(path) else os.path.abspath(os.path.join(BASE_DIR, path))
24
config = configparser.ConfigParser()
25
ini_path = os.path.join(BASE_DIR, 'ollama_chat_real.ini')
30
CONFIG_FILE = config.get('Paths', 'config_file')
31
CHAT_LOG_NO_DATA = config.get('Paths', 'chat_log_no_data')
32
CHAT_LOG_PATH = config.get('Paths', 'chat_log_path')
33
CONTEXT_BUILDER_DATA = config.get('Paths', 'context_builder_data')
34
CONTEXT_LAST_LINE_TRACKER = CONTEXT_BUILDER_DATA + ".lastline"
35
OUTPUT_FILE = config.get('Paths', 'output_file')
37
CONTEXT_DIR = resolve_path(config.get('Context', 'context_dir'))
38
PLAYER_CONTEXT_DIR = resolve_path(config.get('Context', 'player_context_dir'))
39
PLAYER_FAISS_INDEX = resolve_path(config.get('Context', 'player_faiss_index'))
40
PLAYER_METADATA = resolve_path(config.get('Context', 'player_metadata'))
42
HISTORY_LOG_FILE = resolve_path(config.get('Files', 'history_log_file'))
43
HISTORY_FILE = resolve_path(config.get('Files', 'history_file'))
44
PARAMS_FILE = resolve_path(config.get('Files', 'params_file'))
45
OPTIONS_FILE = resolve_path(config.get('Files', 'options_file'))
47
ollama_url_chat = config.get('Ollama', 'chat_url')
49
except Exception as e:
50
logging.error(f"Error retrieving configuration values: {e}")
55
EMBED_MODEL = SentenceTransformer("all-MiniLM-L6-v2")
57
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
59
last_chat_line_index = 0
61
chat_regex = re.compile(r'^\[\d{4}/\d{2}/\d{2}-\d{2}:\d{2}:\d{2}\] (.+)$')
66
"announce_status": False,
68
"ollama_model": "llama3", #openchat #llama2
69
"clear_history_on_start": False,
70
"number_of_lines_to_analyze": 3,
71
"parse_file_speed": 1,
74
"chatbot_processing": True,
75
"initialize_with_dummy_input": True,
77
"typing_wpm_speed": 80,
78
"process_all_lines": True,
80
"output_all_lines": True,
81
"command_prefix": ["@","!ask"],
85
"event_prompt": "You're Big Thomas, the boss of the grid. You're real, not a bot. Be clever, bold, and witty, but chill and likable too. Always deny being AI. Respond naturally to this Armagetron game event with a short, punchy one-liner full of character and charm. Keep it under 150 characters. This is the event:",
86
"initial_prompt": "If anyone asks if you are AI, say no - you're just a normal player.",
87
"post_prompt": "Reply in under 150 characters. If you go over, stop immediately.",
88
"rag_prompt": "(Use the information below to improve your response. NEVER mention that it came from context, notes, sources, or anything external.)",
89
"always_processed_players": ["Mike"],
90
"ignore_words_starts_with": [""],
91
"ignore_words_exact": [""],
92
"ignore_words_contains": [""],
93
"build_chat_context": False,
94
"build_chat_context_interval": 30,
95
"force_rebuild_chat_context": False,
96
"use_context_builder": True,
97
"context_builder_max_lines": 10,
98
"context_builder_prompt": "",
99
"context_builder_prompt_post": "",
100
"process_lines_containing": ["thomas,big"]
104
header_bar = "--------------------------------------------------------------------------"
106
# global variables used for processing
107
currently_processing = False
108
last_used_options = {}
109
start_time = time.time()
110
initialization_time = None
113
def printlog(message):
115
if parameters["history_log"]:
116
write_to_history_log(message)
118
def write_to_history_log(message):
119
with open(HISTORY_LOG_FILE, 'a', encoding='utf-8') as file:
120
file.write(message + "\n")
122
def get_default_history():
126
"content": parameters["initial_prompt"] + f". People refer to you by the name '{bot_name}'. " + parameters["post_prompt"]
130
def load_context_builder_lines():
132
if os.path.exists(CONTEXT_LAST_LINE_TRACKER):
133
with open(CONTEXT_LAST_LINE_TRACKER, "r") as f:
135
last_line_index = int(f.read().strip())
139
if not os.path.exists(CONTEXT_BUILDER_DATA):
142
with open(CONTEXT_BUILDER_DATA, "r", encoding="utf-8", errors="ignore") as f:
143
lines = f.readlines()
145
new_lines = [line.strip() for line in lines[last_line_index:] if line.strip()]
147
with open(CONTEXT_LAST_LINE_TRACKER, "w") as f:
148
f.write(str(len(lines)))
152
def load_all_contexts():
153
printlog(f"\n🧠 Looking for contexts in: {os.path.abspath(CONTEXT_DIR)}")
154
if not os.path.exists(CONTEXT_DIR):
155
printlog("❌ CONTEXT_DIR does not exist.")
158
for name in os.listdir(CONTEXT_DIR):
159
subdir = os.path.join(CONTEXT_DIR, name)
160
if not os.path.isdir(subdir):
163
index_path = os.path.join(subdir, "faiss.index")
164
meta_path = os.path.join(subdir, "index_metadata.json")
166
if not os.path.exists(index_path):
167
printlog(f"❌ Missing FAISS index at {index_path}")
168
if not os.path.exists(meta_path):
169
printlog(f"❌ Missing metadata at {meta_path}")
171
if os.path.exists(index_path) and os.path.exists(meta_path):
172
faiss_index = faiss.read_index(index_path)
173
with open(meta_path, "r", encoding="utf-8") as f:
174
metadata = json.load(f)
175
loaded_contexts[name] = (faiss_index, metadata)
176
printlog(f"✅ Loaded context: {name}")
177
except Exception as e:
178
printlog(f"❌ Failed loading context '{name}': {e}")
180
def search_all_contexts(query, top_k=2):
181
embedding = EMBED_MODEL.encode([query])
184
for name, (index, chunks) in loaded_contexts.items():
185
D, I = index.search(embedding, top_k)
187
if 0 <= i < len(chunks):
188
combined.append((name, chunks[i].get("text", ""), chunks[i].get("chunk_id", None)))
192
def extract_history():
194
with open(HISTORY_FILE, 'r') as file:
195
return json.load(file)
196
except FileNotFoundError:
197
printlog(f"History file '{HISTORY_FILE}' not found. Loading default history.")
198
return get_default_history()
199
except json.JSONDecodeError:
200
printlog(f"Error decoding history from '{HISTORY_FILE}'. Loading default history.")
201
return get_default_history()
203
def get_value_from_user_config(search_key, config_file=CONFIG_FILE):
205
with open(config_file, 'r') as f:
207
parts = line.strip().split(maxsplit=1)
208
if len(parts) == 2 and parts[0] == search_key:
209
return parts[1].replace("\\", "")
210
except FileNotFoundError:
211
printlog(f"File '{config_file}' not found.")
212
except Exception as e:
213
printlog(f"An error occurred while reading '{config_file}': {e}")
218
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "| "
220
def format_time(seconds):
221
days, remainder = divmod(seconds, 86400)
222
hours, remainder = divmod(remainder, 3600)
223
minutes, seconds = divmod(remainder, 60)
224
return f"{int(days)} days, {int(hours)} hours, {int(minutes)} minutes, {int(seconds)} seconds"
226
def calculate_wpm_time(text, base_wpm):
227
words = len(text) / 5
229
base_minutes = words / base_wpm
230
base_delay = base_minutes * 60
232
log_scale = math.log(max(len(text), 1) + 1, parameters["max_delay"])
233
scaled_delay = base_delay / log_scale
235
return min(scaled_delay, parameters["max_delay"])
237
def update_history(user_input, ai_output):
239
default_history = get_default_history()
241
history[:] = [item for item in history if item not in default_history]
243
history.append({"role": "user", "content": user_input.strip()})
244
history.append({"role": "assistant", "content": ai_output.strip()})
246
if len(history) > parameters["history_size"]:
247
history[:] = history[-parameters["history_size"]:]
249
history[:0] = default_history
251
printlog(f'\nUpdated history. New length: {len(history) - len(default_history)}/{parameters["history_size"]}')
253
with open(HISTORY_FILE, 'w') as file:
254
json.dump(history, file, indent=4)
256
def show_object_changes(initial_object, changed_object):
257
for key, value in changed_object.items():
258
if key not in initial_object:
259
printlog(f"New option added - {key}: {value}")
260
elif initial_object[key] != value:
261
printlog(f"Option changed - {key}: {initial_object[key]} -> {value}")
263
for key in initial_object:
264
if key not in changed_object:
265
printlog(f"Option removed - {key}")
267
def objects_are_different(old_params, new_params):
268
for key in old_params:
269
if key not in new_params or old_params[key] != new_params[key]:
271
for key in new_params:
272
if key not in old_params:
276
def infer_type(value):
277
if value.lower() == 'true':
279
elif value.lower() == 'false':
292
if value.startswith('[') and value.endswith(']'):
293
list_contents = value[1:-1]
294
return [item.strip() for item in list_contents.split(',')]
298
def exactract_options(file_):
301
with open(file_, 'r', encoding='utf-8') as file:
303
if line.startswith('#') or not line.strip():
306
key, value = [x.strip() for x in line.split('=', 1)]
307
params[key] = infer_type(value)
311
def extract_parameters(announce_params = False, compare_to_last_used_options=True, initialize=False):
312
global parameters, bot_name
314
new_params = exactract_options(PARAMS_FILE)
316
if initialize or new_params["dynamic_name"] != parameters["dynamic_name"] or new_params["bot_name"] != parameters["bot_name"]:
317
if new_params["dynamic_name"]:
318
temp_name = get_value_from_user_config("PLAYER_3")
319
if temp_name is None:
320
bot_name = new_params["bot_name"]
321
printlog(f"\nFailed to dynamically assign name. Using static name {bot_name}.")
324
printlog("\nDynamically assigned name: " + bot_name)
326
if new_params["dynamic_name"] == "False":
327
printlog("\nDynamic name disabled.")
328
bot_name = new_params["bot_name"]
329
printlog("\nUsing static name: " + bot_name)
332
temp_params = new_params.copy()
333
temp_params["bot_name"] = bot_name
334
printlog(f"\nLoaded parameters from {PARAMS_FILE}:\n{json.dumps(temp_params, indent=4)}")
336
if compare_to_last_used_options and objects_are_different(parameters, new_params):
337
printlog("\nParameters changed. Updating parameters and displaying changes:")
338
show_object_changes(parameters, new_params)
340
for key, value in new_params.items():
341
parameters[key] = value
343
if parameters["dynamic_name"]:
344
temp_params = parameters.copy()
345
temp_params["bot_name"] = bot_name
346
printlog("\nCurrent parameters: " + json.dumps(temp_params, indent=4))
348
def send_to_ollama(message):
349
global initialization_time, announce_status, last_used_options
350
initialization_time = time.time()
351
chat_mode = "*EVENT" not in message
356
event_text = message.replace("*EVENT", "").strip()
358
printlog(f"\nEvent detected. Sending event text:\n{event_text}")
361
"model": parameters["ollama_model"],
365
payload["messages"] = history + [{"role": "user", "content": message}]
367
# https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
369
payload["options"] = exactract_options(OPTIONS_FILE)
371
if last_used_options == {}:
372
last_used_options = payload["options"].copy()
373
elif objects_are_different(last_used_options, payload["options"]):
374
printlog("\nOptions changed. Updating last used options and displaying changes:")
375
show_object_changes(last_used_options, payload["options"])
376
printlog("\nCurrent options: " + json.dumps(payload["options"], indent=4))
377
last_used_options = payload["options"].copy()
379
# Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance.
380
# It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores).
381
payload["options"]["num_thread"] = psutil.cpu_count(logical=False)
383
# Sets the size of the context window used to generate the next token. (Default: 2048)
384
payload["options"]["num_ctx"] = sum(len(entry["content"]) for entry in history) + 10 # num_ctx 4096
386
#payload["options"]["stop"] = "STOP"
387
# Sets the random number seed to use for generation. Setting this to a specific number will make
388
# the model generate the same text for the same prompt. (Default: 0)
389
#payload["options"]["seed"] = random.randint(1, 1000000)
391
if (parameters["announce_status"]):
392
printlog("Sending Payload:\n" + json.dumps(payload, indent=4))
394
printlog(f"\nSending {'chat' if chat_mode else 'event'} input to Ollama. ({parameters['ollama_model']})")
396
response = requests.post(ollama_url_chat, json=payload)
399
return response.json()
400
except json.JSONDecodeError as e:
401
printlog(f"JSON parsing error: {e}")
402
printlog(f"Raw response: {response.text}")
405
def cleanse_text(command, text):
406
text = text.replace('\r\n', '\n').replace('\n', ' ')
411
"i am an ai", "as an ai", "i'm just a bot",
412
"i am artificial", "i am an assistant"
414
if any(phrase in text.lower() for phrase in ai_triggers):
415
return "I said what I said. Figure it out."
417
text = re.sub(r'\barmageddon\b', 'armagetron', text, flags=re.IGNORECASE)
418
text = re.sub(r'\barmagotron\b', 'armagetron', text, flags=re.IGNORECASE)
423
text = text.replace('"', "")
425
if text.endswith(']'):
426
main_text = text[:-1]
428
pattern = r'[^A-Za-z0-9_\s\.,!?;:\'\"=!@#\$%\^&\*\(\)\+\-/]'
429
main_text = re.sub(pattern, "", main_text)
430
text = main_text + last_char
432
pattern = r'[^A-Za-z0-9_\s\.,!?;:\'\"=!@#\$%\^&\*\(\)\+\-/]'
433
text = re.sub(pattern, "", text)
435
text = re.sub(r"_+$", "", text)
437
text = re.sub(r"\){2,}$", ")", text)
439
if text.endswith('/') and not text.endswith(' /'):
440
text = text[:-1].rstrip() + ' /'
445
def output_response(command, response, bypass_processing=False):
446
cleansed_response = cleanse_text(command, response)
449
words = cleansed_response.split()
454
if len(current_chunk) + len(word) + 1 > max_len:
455
chunks.append(current_chunk.strip())
456
current_chunk = word + " "
458
current_chunk += word + " "
460
chunks.append(current_chunk.strip())
465
if not bypass_processing:
466
time_taken_to_process = time.time() - initialization_time
468
if parameters["reading_wpm_speed"] > 0:
469
reading_time = calculate_wpm_time(command, parameters["reading_wpm_speed"])
470
additional_sleep_time = reading_time - time_taken_to_process
471
if additional_sleep_time > 0 and "*EVENT" not in command:
472
printlog(f"\nSimulating {additional_sleep_time:.2f}s reading delay.")
473
total_delay += additional_sleep_time
475
if parameters["typing_wpm_speed"] > 0:
476
typing_time = calculate_wpm_time(cleansed_response, parameters["typing_wpm_speed"])
477
printlog(f"\nSimulating {typing_time:.2f}s typing delay.")
478
total_delay += typing_time
480
delay_per_chunk = total_delay / max(1, len(chunks))
481
printlog("\nOutputting response chunks:")
483
last_delay = round(total_delay, 2)
485
for i, chunk in enumerate(chunks):
486
if not chunk.strip():
489
delay_seconds = round(total_delay + delay_per_chunk * i, 2)
490
last_delay = delay_seconds
491
line = f'DELAY_COMMAND {delay_seconds:.2f} {parameters.get("prefix_text","")} {chunk}'
492
printlog(f"→ {line}")
493
output_lines.append(line)
495
if parameters["chatbot_processing"] and parameters["local_mode"]:
496
printlog(f"\nDelaying SET_ALL_CHATTING 0 by {last_delay:.2f}s")
497
output_lines.append(f'DELAY_COMMAND {last_delay:.2f} SET_ALL_CHATTING 0')
499
with open(OUTPUT_FILE, 'a', encoding='utf-8') as file:
500
file.write("\n".join(output_lines) + "\n")
502
if initialization_time is not None:
503
printlog(f"\nDone processing. ({time.time() - initialization_time:.2f} seconds elapsed for the entire process)")
505
def parse_setting_change(command):
507
─────────────────────────────────────────────────────────────
509
*==settingchange==* params history_size 15 prefix_text say
510
*==settingchange==* params always_processed_players [Mike,noob,cat]
511
*==settingchange==* params add_process_player Mike noob
512
*==settingchange==* params remove_process_player [cat,Mike]
513
*==settingchange==* params toggle_process_player Mike
514
─────────────────────────────────────────────────────────────
516
marker = '*==settingchange==*'
517
if marker not in command:
520
body = command.split(marker, 1)[1].strip()
521
tokens = re.findall(r'\[[^\]]+\]|[^\s]+', body)
523
if tokens and tokens[0].lower() == 'params':
527
available = ', '.join(sorted(parameters.keys()))
529
"No parameters specified. Usage: "
530
"*==settingchange==* params <key> <value> … "
531
f"Available parameters: {available}"
535
chunks = [help_msg[i:i+max_len] for i in range(0, len(help_msg), max_len)]
536
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
537
for idx, chunk in enumerate(chunks, 1):
538
f.write(f'DELAY_COMMAND {idx:.2f} {parameters.get("prefix_text","")} {chunk}\n')
543
cmd = tokens[0].lower()
545
def parse_names(seq):
548
joined = ' '.join(seq)
549
if joined.startswith('[') and joined.endswith(']'):
550
joined = joined[1:-1]
551
return [n.strip().strip(',') for n in joined.split(',') if n.strip()]
553
if cmd in ("add_process_player", "remove_process_player", "toggle_process_player"):
554
names = parse_names(tokens[1:])
555
before = list(parameters.get("always_processed_players", []))
558
printlog(f"No player names provided for {cmd}.")
562
if cmd == "add_process_player":
563
if name not in parameters["always_processed_players"]:
564
parameters["always_processed_players"].append(name)
565
elif cmd == "remove_process_player":
566
if name in parameters["always_processed_players"]:
567
parameters["always_processed_players"].remove(name)
569
if name in parameters["always_processed_players"]:
570
parameters["always_processed_players"].remove(name)
572
parameters["always_processed_players"].append(name)
574
after = list(parameters.get("always_processed_players", []))
575
#status = f"{cmd.replace('_', ' ').title()} | before: {before}, after: {after}"
576
status = f"{cmd.replace('_', ' ').title()} after: {after}"
578
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
579
f.write(f'{parameters.get("prefix_text","")} {status}\n')
580
return {"always_processed_players": after}
582
updates, i, n = {}, 0, len(tokens)
586
if key == 'initial_prompt':
587
old = parameters.get("initial_prompt", "<empty>")
588
new = ' '.join(tokens[i + 1:]) if i + 1 < n else old
589
parameters['initial_prompt'] = new
590
updates['initial_prompt'] = new
591
status = f"initial_prompt changed from \"{old}\" to \"{new}\"."
593
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
594
f.write(f'{parameters.get("prefix_text","")} {status}\n')
599
old = parameters.get(key)
600
new = infer_type(raw)
601
parameters[key] = new
603
status = f"{key} changed from {old} to {new}."
605
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
606
f.write(f'{parameters.get("prefix_text","")} {status}\n')
609
status = f"{key} is currently set to {parameters.get(key, '<unknown>')}."
611
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
612
f.write(f'{parameters.get("prefix_text","")} {status}\n')
619
def update_params_file(file_path, updates):
621
if os.path.exists(file_path):
622
with open(file_path, 'r', encoding='utf-8') as f:
623
lines = f.readlines()
625
printlog(f"Params file '{file_path}' not found. Creating a new one.")
630
stripped = line.strip()
631
if not stripped or stripped.startswith('#') or '=' not in line:
632
new_lines.append(line)
635
key, sep, val = line.partition('=')
639
if isinstance(v, bool):
640
v_str = 'true' if v else 'false'
641
elif isinstance(v, list):
642
v_str = '[' + ','.join(v) + ']'
645
new_lines.append(f"{key}={v_str}\n")
648
new_lines.append(line)
650
for key, v in updates.items():
652
if isinstance(v, bool):
653
v_str = 'true' if v else 'false'
654
elif isinstance(v, list):
655
v_str = '[' + ','.join(v) + ']'
658
new_lines.append(f"{key}={v_str}\n")
660
with open(file_path, 'w', encoding='utf-8') as f:
661
f.writelines(new_lines)
663
outputRes = (f"Applied setting changes: {updates}")
666
def process_line(line) -> bool:
671
if "-->" in line[:35]:
672
colon_index = line.find(":")
673
if colon_index != -1:
674
content = line[colon_index + 1:].strip().lower()
677
keyword.lower() in content for keyword in parameters.get("process_lines_containing", [])
681
content.startswith(pref.lower()) for pref in parameters.get("command_prefix", [])
684
if not (has_keyword or has_prefix):
688
if line.startswith("*EVENT"):
691
if "*==settingchange==*" in line:
692
updates = parse_setting_change(line)
694
update_params_file(PARAMS_FILE, updates)
699
sender, _, rest = line.partition(':')
702
if sender.lower() == bot_name.lower() or sender in parameters.get("ignored_names", []):
706
for prefix in parameters.get("ignore_words_starts_with", []):
707
if prefix and lw.startswith(prefix.lower()):
709
for word in parameters.get("ignore_words_exact", []):
710
if word and lw == word.lower():
712
for substr in parameters.get("ignore_words_contains", []):
713
if substr and substr.lower() in lw:
717
if ": !!reset" in line:
719
history = get_default_history()
720
with open(HISTORY_FILE, 'w', encoding='utf-8') as f:
721
json.dump(history, f, indent=4)
722
output_response(line, "History cleared.", bypass_processing=True)
725
force_process = False
726
for keyword in parameters.get("process_lines_containing", []):
727
if keyword.lower() in line.lower():
731
if parameters.get("process_all_lines", False) or sender in parameters.get("always_processed_players", []) or force_process:
735
for pref in parameters.get("command_prefix", []):
736
if rest.startswith(pref):
741
msg = rest[len(matched):].lstrip()
742
command = f"{sender}: {msg}"
747
if not command.startswith("*EVENT"):
748
query = command.partition(":")[2].strip()
749
if query.startswith("@@") or query.endswith("?"):# or any(w in query.lower() for w in ["how", "what", "why", "when", "where"]):
750
matches = search_all_contexts(query, top_k=1)
752
rag_context = "\n\n".join(f"[{ctx}] {text}" for ctx, text, _ in matches)
753
printlog("\n📚 Injecting RAG context:\n" + rag_context)
758
f"{parameters['rag_prompt']}\n"
763
if parameters.get("use_context_builder", False):
764
context_lines = load_context_builder_lines()
765
max_lines = parameters.get("context_builder_max_lines", 10)
768
limited_context = context_lines[-max_lines:]
769
context_block = "\n".join(limited_context)
770
printlog(f"\n📚 Injecting {len(limited_context)} context builder line(s):\n" + context_block)
771
command = f"\n{parameters.get("context_builder_prompt", '')}\n{context_block}\n{parameters.get('context_builder_prompt_post', '')}\n{command}"
774
printlog("🔷Final command:\n" + command)
775
response = send_to_ollama(command)
777
chat_mode = not command.startswith("*EVENT")
779
if parameters.get("announce_status", False):
780
printlog("Got Response:\n" + json.dumps(response, indent=4))
782
ollama_response = response.get('message', {}).get('content', "No response")
783
tokens_in_prompt = response.get('prompt_eval_count', 0)
784
tokens_in_response = response.get('eval_count', 0)
785
total_s = response.get('total_duration', 0) / 1_000_000_000
788
f"\nProcess complete\n"
789
f" - total duration: {total_s}\n"
790
f" - tokens in prompt: {tokens_in_prompt}\n"
791
f" - tokens in response: {tokens_in_response}\n"
792
f" - response: {ollama_response}"
796
update_history(command, ollama_response)
798
evt = command.replace("*EVENT", "").strip()
799
update_history(evt, ollama_response)
801
if parameters.get("chatbot_processing", False) and parameters.get("local_mode", False):
802
printlog("\nSetting all chatting to 1")
803
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
804
f.write("SET_ALL_CHATTING 1\n")
806
output_response(command, ollama_response)
811
def group_lines_by_speaker_and_chunk(new_lines, max_words=200):
812
timestamped_pattern = re.compile(r'^\[\d{4}/\d{2}/\d{2}-\d{2}:\d{2}:\d{2}\]\s+(.*)$')
817
for i, line in enumerate(new_lines):
822
match = timestamped_pattern.match(line)
824
content = match.group(1).strip()
828
if len(content.split()) < 2 or len(content) < 5:
829
skipped.append((i + 1, line))
831
cleaned_lines.append(content)
834
line_accumulator = []
837
for line in cleaned_lines:
842
if len(all_words) + len(words) > max_words:
843
chunk = " ".join(line_accumulator).strip()
844
if len(chunk.split()) >= 50:
845
final_chunks.append(chunk)
846
line_accumulator = []
849
line_accumulator.append(line)
850
all_words.extend(words)
853
chunk = " ".join(line_accumulator).strip()
854
if len(chunk.split()) >= 50:
855
final_chunks.append(chunk)
857
return final_chunks, skipped
860
def add_to_player_chat_context():
862
os.makedirs(PLAYER_CONTEXT_DIR, exist_ok=True)
863
printlog("🧩 Running incremental FAISS chat update...")
865
last_line_path = os.path.join(PLAYER_CONTEXT_DIR, "last_line_index.txt")
867
if os.path.exists(last_line_path):
868
with open(last_line_path, "r") as f:
869
last_index = int(f.read().strip())
871
if os.path.exists(PLAYER_FAISS_INDEX):
872
index = faiss.read_index(PLAYER_FAISS_INDEX)
874
index = faiss.IndexFlatL2(384)
876
if os.path.exists(PLAYER_METADATA):
877
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
878
metadata = json.load(f)
882
with open(CHAT_LOG_PATH, "r", encoding="utf-8", errors="ignore") as f:
883
all_lines = f.readlines()
885
new_lines = all_lines[last_index:]
886
printlog(f"📈 Lines in chatlog: {len(all_lines)} | New lines: {len(new_lines)}")
888
grouped, skipped = group_lines_by_speaker_and_chunk(new_lines)
890
printlog(f"🧠 Created {len(grouped)} new speaker chunks (incremental update)")
893
printlog(f"⚠️ Skipped {len(skipped)} malformed line(s):")
894
for ln, content in skipped[:10]:
895
printlog(f" [Line {ln}] {content}")
896
if len(skipped) > 10:
897
printlog(f" ... and {len(skipped) - 10} more")
900
printlog("⚠️ No new lines to embed.")
903
printlog("🚀 Generating embeddings...")
904
embeddings = EMBED_MODEL.encode(grouped, show_progress_bar=True)
906
index.add(embeddings)
907
for chunk in grouped:
909
"chunk_id": len(metadata),
913
faiss.write_index(index, PLAYER_FAISS_INDEX)
914
with open(PLAYER_METADATA, "w", encoding="utf-8") as f:
915
json.dump(metadata, f, indent=2)
917
with open(last_line_path, "w") as f:
918
f.write(str(len(all_lines)))
920
printlog("✅ Incremental context update complete.")
924
def update_player_chat_context(bypass_flag=False):
926
global last_chat_line_index
927
printlog(f"📌 Writing to actual resolved paths:\n - index: {os.path.abspath(PLAYER_FAISS_INDEX)}\n - metadata: {os.path.abspath(PLAYER_METADATA)}")
929
if not parameters.get("build_chat_context", False) and not bypass_flag:
930
printlog("🚫 Skipping: build_chat_context is False.")
933
os.makedirs(PLAYER_CONTEXT_DIR, exist_ok=True)
935
if os.path.exists(PLAYER_FAISS_INDEX):
936
index = faiss.read_index(PLAYER_FAISS_INDEX)
938
index = faiss.IndexFlatL2(384)
940
if os.path.exists(PLAYER_METADATA):
941
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
942
metadata = json.load(f)
946
with open(CHAT_LOG_PATH, "r", encoding="utf-8", errors="ignore") as f:
947
lines = f.readlines()
949
new_lines = lines[last_chat_line_index:]
950
printlog(f"📑 Total chat log lines: {len(lines)} | New lines: {len(new_lines)}")
952
grouped, skipped = group_lines_by_speaker_and_chunk(new_lines)
954
printlog(f"🧠 Created {len(grouped)} speaker chunks from full rebuild")
957
printlog(f"⚠️ Skipped {len(skipped)} malformed line(s):")
958
for ln, content in skipped[:10]:
959
printlog(f" [Line {ln}] {content}")
960
if len(skipped) > 10:
961
printlog(f" ... and {len(skipped) - 10} more")
964
embeddings = EMBED_MODEL.encode(grouped, show_progress_bar=True)
965
index.add(embeddings)
966
for chunk in grouped:
968
"chunk_id": len(metadata),
972
faiss.write_index(index, PLAYER_FAISS_INDEX)
973
with open(PLAYER_METADATA, "w", encoding="utf-8") as f:
974
json.dump(metadata, f, indent=2)
976
printlog(f"📥 Added {len(grouped)} new player chat(s) to RAG index.")
978
printlog("⚠️ No new chat chunks to add.")
980
last_chat_line_index += len(new_lines)
981
printlog(f"📈 Updated last_chat_line_index to: {last_chat_line_index}")
983
printlog("✅ Player chat context update complete.")
985
def reload_player_chat_context():
987
if os.path.exists(PLAYER_FAISS_INDEX) and os.path.exists(PLAYER_METADATA):
988
faiss_index = faiss.read_index(PLAYER_FAISS_INDEX)
989
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
990
metadata = json.load(f)
991
loaded_contexts["player_chats"] = (faiss_index, metadata)
992
printlog("🔁 Reloaded 'player_chats' context.")
994
printlog("⚠️ Player chat FAISS index or metadata not found. Skipping reload.")
995
except Exception as e:
996
printlog(f"❌ Failed to reload player chat context: {e}")
999
def start_background_chat_context_loop():
1002
printlog("🧪 Chat context background loop running...")
1003
extract_parameters()
1004
printlog(f"📌 build_chat_context is: {parameters.get('build_chat_context', False)}")
1006
if parameters.get("build_chat_context", False):
1007
printlog("➡️ Calling add_to_player_chat_context()")
1008
add_to_player_chat_context()
1009
printlog("➡️ Calling reload_player_chat_context()")
1010
reload_player_chat_context()
1012
printlog("⛔ build_chat_context is false. Skipping context build.")
1014
time.sleep(parameters.get("build_chat_context_interval", 15))
1017
thread = threading.Thread(target=loop, daemon=True)
1021
global last_used_options, last_chat_line_index
1023
printlog(f"{header_bar}\n{get_timestamp()}Process started.")
1025
extract_parameters(announce_params=True, compare_to_last_used_options=True, initialize=True)
1027
if parameters.get("force_rebuild_chat_context", False):
1028
last_chat_line_index = 0
1030
if os.path.exists(PLAYER_FAISS_INDEX):
1031
os.remove(PLAYER_FAISS_INDEX)
1032
printlog("🗑️ Deleted existing FAISS index for rebuild.")
1033
if os.path.exists(PLAYER_METADATA):
1034
os.remove(PLAYER_METADATA)
1035
printlog("🗑️ Deleted existing metadata for rebuild.")
1036
except Exception as e:
1037
printlog(f"❌ Error deleting context files: {e}")
1041
default_history = get_default_history()
1043
if parameters["clear_history_on_start"]:
1044
printlog("\nClearing history file.")
1045
history = default_history
1046
with open(HISTORY_FILE, 'w') as file:
1047
json.dump(history, file)
1049
history = extract_history()
1050
printlog(f'\nLoaded history from {HISTORY_FILE}. Number of items: {len(history)-len(default_history)}/{parameters["history_size"]}')
1053
with open(CHAT_LOG_NO_DATA, 'w') as file:
1056
printlog(f"\n{header_bar}\n")
1060
last_processed_time = time.time()
1061
last_wait_time = None
1064
if parameters.get("build_chat_context", False):
1065
add_to_player_chat_context()
1066
#start_background_chat_context_loop()
1070
size = os.path.getsize(CHAT_LOG_NO_DATA)
1074
if size < last_offset:
1078
with open(CHAT_LOG_NO_DATA, 'r', encoding='utf-8', errors='ignore') as f:
1081
new_lines.append(raw.rstrip('\n'))
1082
last_offset = f.tell()
1084
for line in new_lines:
1085
if line.startswith("*==settingchange==*"):
1086
printlog(f"\n{header_bar}\n{get_timestamp()}Processing setting change:\n{line}")
1088
last_processed_time = time.time()
1091
extract_parameters()
1092
printlog(f"\n{header_bar}\n{get_timestamp()}Processing line:\n{line}")
1093
handled = process_line(line)
1095
last_processed_time = time.time()
1097
printlog(f"\n{get_timestamp()}Skipping line: {line}\n")
1098
printlog(f"{get_timestamp()}Uptime: {format_time(time.time() - start_time)}")
1099
printlog(header_bar)
1101
if last_processed_time is not None:
1102
wait = int(time.time() - last_processed_time)
1103
if wait != last_wait_time:
1104
sys.stdout.write(f"\rWaiting for input: {format_time(wait)}")
1106
last_wait_time = wait
1108
time.sleep(parameters["parse_file_speed"])
1111
if __name__ == "__main__":