8
by hackermans
|
1
|
import requests
|
2
|
import random
|
3
|
import time
|
4
|
import json
|
5
|
import re
|
6
|
import math
|
7
|
import psutil
|
8
|
import sys
|
9
|
import os
|
10
|
from sentence_transformers import SentenceTransformer
|
11
|
import faiss
|
12
|
import threading
|
13
|
import configparser
|
14
|
import logging
|
15
|
import sys
|
16
|
|
17
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
18
|
|
19
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
20
|
|
21
|
def resolve_path(path):
|
22
|
return path if os.path.isabs(path) else os.path.abspath(os.path.join(BASE_DIR, path))
|
23
|
|
24
|
config = configparser.ConfigParser()
|
25
|
ini_path = os.path.join(BASE_DIR, 'ollama_chat_real.ini')
|
26
|
|
27
|
try:
|
28
|
config.read(ini_path)
|
29
|
|
30
|
CONFIG_FILE = config.get('Paths', 'config_file')
|
31
|
CHAT_LOG_NO_DATA = config.get('Paths', 'chat_log_no_data')
|
32
|
CHAT_LOG_PATH = config.get('Paths', 'chat_log_path')
|
33
|
CONTEXT_BUILDER_DATA = config.get('Paths', 'context_builder_data')
|
34
|
CONTEXT_LAST_LINE_TRACKER = CONTEXT_BUILDER_DATA + ".lastline"
|
35
|
OUTPUT_FILE = config.get('Paths', 'output_file')
|
36
|
|
37
|
CONTEXT_DIR = resolve_path(config.get('Context', 'context_dir'))
|
38
|
PLAYER_CONTEXT_DIR = resolve_path(config.get('Context', 'player_context_dir'))
|
39
|
PLAYER_FAISS_INDEX = resolve_path(config.get('Context', 'player_faiss_index'))
|
40
|
PLAYER_METADATA = resolve_path(config.get('Context', 'player_metadata'))
|
41
|
|
42
|
HISTORY_LOG_FILE = resolve_path(config.get('Files', 'history_log_file'))
|
43
|
HISTORY_FILE = resolve_path(config.get('Files', 'history_file'))
|
44
|
PARAMS_FILE = resolve_path(config.get('Files', 'params_file'))
|
45
|
OPTIONS_FILE = resolve_path(config.get('Files', 'options_file'))
|
46
|
|
47
|
ollama_url_chat = config.get('Ollama', 'chat_url')
|
48
|
|
49
|
except Exception as e:
|
50
|
logging.error(f"Error retrieving configuration values: {e}")
|
51
|
sys.exit(1)
|
52
|
|
53
|
|
54
|
loaded_contexts = {}
|
55
|
EMBED_MODEL = SentenceTransformer("all-MiniLM-L6-v2")
|
56
|
|
57
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
58
|
|
59
|
last_chat_line_index = 0
|
60
|
|
61
|
chat_regex = re.compile(r'^\[\d{4}/\d{2}/\d{2}-\d{2}:\d{2}:\d{2}\] (.+)$')
|
62
|
|
63
|
bot_name = ""
|
64
|
|
65
|
parameters = {
|
66
|
"announce_status": False,
|
67
|
"history_size": 20,
|
68
|
"ollama_model": "llama3", #openchat #llama2
|
69
|
"clear_history_on_start": False,
|
70
|
"number_of_lines_to_analyze": 3,
|
71
|
"parse_file_speed": 1,
|
72
|
"local_mode": True,
|
73
|
"dynamic_name": True,
|
74
|
"chatbot_processing": True,
|
75
|
"initialize_with_dummy_input": True,
|
76
|
"bot_name": "ollama",
|
77
|
"typing_wpm_speed": 80,
|
78
|
"process_all_lines": True,
|
79
|
"prefix_text": "say",
|
80
|
"output_all_lines": True,
|
81
|
"command_prefix": ["@","!ask"],
|
82
|
"max_delay": 10,
|
83
|
"ignored_names": [],
|
84
|
"history_log": False,
|
85
|
"event_prompt": "You're Big Thomas, the boss of the grid. You're real, not a bot. Be clever, bold, and witty, but chill and likable too. Always deny being AI. Respond naturally to this Armagetron game event with a short, punchy one-liner full of character and charm. Keep it under 150 characters. This is the event:",
|
86
|
"initial_prompt": "If anyone asks if you are AI, say no - you're just a normal player.",
|
87
|
"post_prompt": "Reply in under 150 characters. If you go over, stop immediately.",
|
88
|
"rag_prompt": "(Use the information below to improve your response. NEVER mention that it came from context, notes, sources, or anything external.)",
|
89
|
"always_processed_players": ["Mike"],
|
90
|
"ignore_words_starts_with": [""],
|
91
|
"ignore_words_exact": [""],
|
92
|
"ignore_words_contains": [""],
|
93
|
"build_chat_context": False,
|
94
|
"build_chat_context_interval": 30,
|
95
|
"force_rebuild_chat_context": False,
|
96
|
"use_context_builder": True,
|
97
|
"context_builder_max_lines": 10,
|
98
|
"context_builder_prompt": "",
|
99
|
"context_builder_prompt_post": "",
|
100
|
"process_lines_containing": ["thomas,big"]
|
101
|
|
102
|
}
|
103
|
|
104
|
header_bar = "--------------------------------------------------------------------------"
|
105
|
|
106
|
# global variables used for processing
|
107
|
currently_processing = False
|
108
|
last_used_options = {}
|
109
|
start_time = time.time()
|
110
|
initialization_time = None
|
111
|
history = []
|
112
|
|
113
|
def printlog(message):
|
114
|
print(message)
|
115
|
if parameters["history_log"]:
|
116
|
write_to_history_log(message)
|
117
|
|
118
|
def write_to_history_log(message):
|
119
|
with open(HISTORY_LOG_FILE, 'a', encoding='utf-8') as file:
|
120
|
file.write(message + "\n")
|
121
|
|
122
|
def get_default_history():
|
123
|
return [
|
124
|
{
|
125
|
"role": "system",
|
126
|
"content": parameters["initial_prompt"] + f". People refer to you by the name '{bot_name}'. " + parameters["post_prompt"]
|
127
|
}
|
128
|
]
|
129
|
|
130
|
def load_context_builder_lines():
|
131
|
last_line_index = 0
|
132
|
if os.path.exists(CONTEXT_LAST_LINE_TRACKER):
|
133
|
with open(CONTEXT_LAST_LINE_TRACKER, "r") as f:
|
134
|
try:
|
135
|
last_line_index = int(f.read().strip())
|
136
|
except ValueError:
|
137
|
last_line_index = 0
|
138
|
|
139
|
if not os.path.exists(CONTEXT_BUILDER_DATA):
|
140
|
return []
|
141
|
|
142
|
with open(CONTEXT_BUILDER_DATA, "r", encoding="utf-8", errors="ignore") as f:
|
143
|
lines = f.readlines()
|
144
|
|
145
|
new_lines = [line.strip() for line in lines[last_line_index:] if line.strip()]
|
146
|
|
147
|
with open(CONTEXT_LAST_LINE_TRACKER, "w") as f:
|
148
|
f.write(str(len(lines)))
|
149
|
|
150
|
return new_lines
|
151
|
|
152
|
def load_all_contexts():
|
153
|
printlog(f"\n๐ง Looking for contexts in: {os.path.abspath(CONTEXT_DIR)}")
|
154
|
if not os.path.exists(CONTEXT_DIR):
|
155
|
printlog("โ CONTEXT_DIR does not exist.")
|
156
|
return
|
157
|
|
158
|
for name in os.listdir(CONTEXT_DIR):
|
159
|
subdir = os.path.join(CONTEXT_DIR, name)
|
160
|
if not os.path.isdir(subdir):
|
161
|
continue
|
162
|
try:
|
163
|
index_path = os.path.join(subdir, "faiss.index")
|
164
|
meta_path = os.path.join(subdir, "index_metadata.json")
|
165
|
|
166
|
if not os.path.exists(index_path):
|
167
|
printlog(f"โ Missing FAISS index at {index_path}")
|
168
|
if not os.path.exists(meta_path):
|
169
|
printlog(f"โ Missing metadata at {meta_path}")
|
170
|
|
171
|
if os.path.exists(index_path) and os.path.exists(meta_path):
|
172
|
faiss_index = faiss.read_index(index_path)
|
173
|
with open(meta_path, "r", encoding="utf-8") as f:
|
174
|
metadata = json.load(f)
|
175
|
loaded_contexts[name] = (faiss_index, metadata)
|
176
|
printlog(f"โ
Loaded context: {name}")
|
177
|
except Exception as e:
|
178
|
printlog(f"โ Failed loading context '{name}': {e}")
|
179
|
|
180
|
def search_all_contexts(query, top_k=2):
|
181
|
embedding = EMBED_MODEL.encode([query])
|
182
|
combined = []
|
183
|
|
184
|
for name, (index, chunks) in loaded_contexts.items():
|
185
|
D, I = index.search(embedding, top_k)
|
186
|
for i in I[0]:
|
187
|
if 0 <= i < len(chunks):
|
188
|
combined.append((name, chunks[i].get("text", ""), chunks[i].get("chunk_id", None)))
|
189
|
|
190
|
return combined
|
191
|
|
192
|
def extract_history():
|
193
|
try:
|
194
|
with open(HISTORY_FILE, 'r') as file:
|
195
|
return json.load(file)
|
196
|
except FileNotFoundError:
|
197
|
printlog(f"History file '{HISTORY_FILE}' not found. Loading default history.")
|
198
|
return get_default_history()
|
199
|
except json.JSONDecodeError:
|
200
|
printlog(f"Error decoding history from '{HISTORY_FILE}'. Loading default history.")
|
201
|
return get_default_history()
|
202
|
|
203
|
def get_value_from_user_config(search_key, config_file=CONFIG_FILE):
|
204
|
try:
|
205
|
with open(config_file, 'r') as f:
|
206
|
for line in f:
|
207
|
parts = line.strip().split(maxsplit=1)
|
208
|
if len(parts) == 2 and parts[0] == search_key:
|
209
|
return parts[1].replace("\\", "")
|
210
|
except FileNotFoundError:
|
211
|
printlog(f"File '{config_file}' not found.")
|
212
|
except Exception as e:
|
213
|
printlog(f"An error occurred while reading '{config_file}': {e}")
|
214
|
return ""
|
215
|
|
216
|
|
217
|
def get_timestamp():
|
218
|
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "| "
|
219
|
|
220
|
def format_time(seconds):
|
221
|
days, remainder = divmod(seconds, 86400)
|
222
|
hours, remainder = divmod(remainder, 3600)
|
223
|
minutes, seconds = divmod(remainder, 60)
|
224
|
return f"{int(days)} days, {int(hours)} hours, {int(minutes)} minutes, {int(seconds)} seconds"
|
225
|
|
226
|
def calculate_wpm_time(text, base_wpm):
|
227
|
words = len(text) / 5
|
228
|
|
229
|
base_minutes = words / base_wpm
|
230
|
base_delay = base_minutes * 60
|
231
|
|
232
|
log_scale = math.log(max(len(text), 1) + 1, parameters["max_delay"])
|
233
|
scaled_delay = base_delay / log_scale
|
234
|
|
235
|
return min(scaled_delay, parameters["max_delay"])
|
236
|
|
237
|
def update_history(user_input, ai_output):
|
238
|
global history
|
239
|
default_history = get_default_history()
|
240
|
|
241
|
history[:] = [item for item in history if item not in default_history]
|
242
|
|
243
|
history.append({"role": "user", "content": user_input.strip()})
|
244
|
history.append({"role": "assistant", "content": ai_output.strip()})
|
245
|
|
246
|
if len(history) > parameters["history_size"]:
|
247
|
history[:] = history[-parameters["history_size"]:]
|
248
|
|
249
|
history[:0] = default_history
|
250
|
|
251
|
printlog(f'\nUpdated history. New length: {len(history) - len(default_history)}/{parameters["history_size"]}')
|
252
|
|
253
|
with open(HISTORY_FILE, 'w') as file:
|
254
|
json.dump(history, file, indent=4)
|
255
|
|
256
|
def show_object_changes(initial_object, changed_object):
|
257
|
for key, value in changed_object.items():
|
258
|
if key not in initial_object:
|
259
|
printlog(f"New option added - {key}: {value}")
|
260
|
elif initial_object[key] != value:
|
261
|
printlog(f"Option changed - {key}: {initial_object[key]} -> {value}")
|
262
|
|
263
|
for key in initial_object:
|
264
|
if key not in changed_object:
|
265
|
printlog(f"Option removed - {key}")
|
266
|
|
267
|
def objects_are_different(old_params, new_params):
|
268
|
for key in old_params:
|
269
|
if key not in new_params or old_params[key] != new_params[key]:
|
270
|
return True
|
271
|
for key in new_params:
|
272
|
if key not in old_params:
|
273
|
return True
|
274
|
return False
|
275
|
|
276
|
def infer_type(value):
|
277
|
if value.lower() == 'true':
|
278
|
return True
|
279
|
elif value.lower() == 'false':
|
280
|
return False
|
281
|
|
282
|
try:
|
283
|
return int(value)
|
284
|
except ValueError:
|
285
|
pass
|
286
|
|
287
|
try:
|
288
|
return float(value)
|
289
|
except ValueError:
|
290
|
pass
|
291
|
|
292
|
if value.startswith('[') and value.endswith(']'):
|
293
|
list_contents = value[1:-1]
|
294
|
return [item.strip() for item in list_contents.split(',')]
|
295
|
|
296
|
return value
|
297
|
|
298
|
def exactract_options(file_):
|
299
|
params = {}
|
300
|
|
301
|
with open(file_, 'r', encoding='utf-8') as file:
|
302
|
for line in file:
|
303
|
if line.startswith('#') or not line.strip():
|
304
|
continue
|
305
|
|
306
|
key, value = [x.strip() for x in line.split('=', 1)]
|
307
|
params[key] = infer_type(value)
|
308
|
return params
|
309
|
|
310
|
|
311
|
def extract_parameters(announce_params = False, compare_to_last_used_options=True, initialize=False):
|
312
|
global parameters, bot_name
|
313
|
|
314
|
new_params = exactract_options(PARAMS_FILE)
|
315
|
|
316
|
if initialize or new_params["dynamic_name"] != parameters["dynamic_name"] or new_params["bot_name"] != parameters["bot_name"]:
|
317
|
if new_params["dynamic_name"]:
|
318
|
temp_name = get_value_from_user_config("PLAYER_3")
|
319
|
if temp_name is None:
|
320
|
bot_name = new_params["bot_name"]
|
321
|
printlog(f"\nFailed to dynamically assign name. Using static name {bot_name}.")
|
322
|
else:
|
323
|
bot_name = temp_name
|
324
|
printlog("\nDynamically assigned name: " + bot_name)
|
325
|
else:
|
326
|
if new_params["dynamic_name"] == "False":
|
327
|
printlog("\nDynamic name disabled.")
|
328
|
bot_name = new_params["bot_name"]
|
329
|
printlog("\nUsing static name: " + bot_name)
|
330
|
|
331
|
if announce_params:
|
332
|
temp_params = new_params.copy()
|
333
|
temp_params["bot_name"] = bot_name
|
334
|
printlog(f"\nLoaded parameters from {PARAMS_FILE}:\n{json.dumps(temp_params, indent=4)}")
|
335
|
|
336
|
if compare_to_last_used_options and objects_are_different(parameters, new_params):
|
337
|
printlog("\nParameters changed. Updating parameters and displaying changes:")
|
338
|
show_object_changes(parameters, new_params)
|
339
|
|
340
|
for key, value in new_params.items():
|
341
|
parameters[key] = value
|
342
|
|
343
|
if parameters["dynamic_name"]:
|
344
|
temp_params = parameters.copy()
|
345
|
temp_params["bot_name"] = bot_name
|
346
|
printlog("\nCurrent parameters: " + json.dumps(temp_params, indent=4))
|
347
|
|
348
|
def send_to_ollama(message):
|
349
|
global initialization_time, announce_status, last_used_options
|
350
|
initialization_time = time.time()
|
351
|
chat_mode = "*EVENT" not in message
|
352
|
|
353
|
extract_parameters()
|
354
|
|
355
|
if not chat_mode:
|
356
|
event_text = message.replace("*EVENT", "").strip()
|
357
|
message = event_text
|
358
|
printlog(f"\nEvent detected. Sending event text:\n{event_text}")
|
359
|
|
360
|
payload = {
|
361
|
"model": parameters["ollama_model"],
|
362
|
"stream": False
|
363
|
}
|
364
|
|
365
|
payload["messages"] = history + [{"role": "user", "content": message}]
|
366
|
|
367
|
# https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
368
|
|
369
|
payload["options"] = exactract_options(OPTIONS_FILE)
|
370
|
|
371
|
if last_used_options == {}:
|
372
|
last_used_options = payload["options"].copy()
|
373
|
elif objects_are_different(last_used_options, payload["options"]):
|
374
|
printlog("\nOptions changed. Updating last used options and displaying changes:")
|
375
|
show_object_changes(last_used_options, payload["options"])
|
376
|
printlog("\nCurrent options: " + json.dumps(payload["options"], indent=4))
|
377
|
last_used_options = payload["options"].copy()
|
378
|
|
379
|
# Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance.
|
380
|
# It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores).
|
381
|
payload["options"]["num_thread"] = psutil.cpu_count(logical=False)
|
382
|
|
383
|
# Sets the size of the context window used to generate the next token. (Default: 2048)
|
384
|
payload["options"]["num_ctx"] = sum(len(entry["content"]) for entry in history) + 10 # num_ctx 4096
|
385
|
|
386
|
#payload["options"]["stop"] = "STOP"
|
387
|
# Sets the random number seed to use for generation. Setting this to a specific number will make
|
388
|
# the model generate the same text for the same prompt. (Default: 0)
|
389
|
#payload["options"]["seed"] = random.randint(1, 1000000)
|
390
|
|
391
|
if (parameters["announce_status"]):
|
392
|
printlog("Sending Payload:\n" + json.dumps(payload, indent=4))
|
393
|
|
394
|
printlog(f"\nSending {'chat' if chat_mode else 'event'} input to Ollama. ({parameters['ollama_model']})")
|
395
|
|
396
|
response = requests.post(ollama_url_chat, json=payload)
|
397
|
|
398
|
try:
|
399
|
return response.json()
|
400
|
except json.JSONDecodeError as e:
|
401
|
printlog(f"JSON parsing error: {e}")
|
402
|
printlog(f"Raw response: {response.text}")
|
403
|
return None
|
404
|
|
405
|
def cleanse_text(command, text):
|
406
|
text = text.replace('\r\n', '\n').replace('\n', ' ')
|
407
|
|
408
|
text = text.strip()
|
409
|
|
410
|
ai_triggers = [
|
411
|
"i am an ai", "as an ai", "i'm just a bot",
|
412
|
"i am artificial", "i am an assistant"
|
413
|
]
|
414
|
if any(phrase in text.lower() for phrase in ai_triggers):
|
415
|
return "I said what I said. Figure it out."
|
416
|
|
417
|
text = re.sub(r'\barmageddon\b', 'armagetron', text, flags=re.IGNORECASE)
|
418
|
text = re.sub(r'\barmagotron\b', 'armagetron', text, flags=re.IGNORECASE)
|
419
|
|
420
|
if text != "XD":
|
421
|
text = text.lower()
|
422
|
|
423
|
text = text.replace('"', "")
|
424
|
|
425
|
if text.endswith(']'):
|
426
|
main_text = text[:-1]
|
427
|
last_char = text[-1]
|
428
|
pattern = r'[^A-Za-z0-9_\s\.,!?;:\'\"=!@#\$%\^&\*\(\)\+\-/]'
|
429
|
main_text = re.sub(pattern, "", main_text)
|
430
|
text = main_text + last_char
|
431
|
else:
|
432
|
pattern = r'[^A-Za-z0-9_\s\.,!?;:\'\"=!@#\$%\^&\*\(\)\+\-/]'
|
433
|
text = re.sub(pattern, "", text)
|
434
|
|
435
|
text = re.sub(r"_+$", "", text)
|
436
|
|
437
|
text = re.sub(r"\){2,}$", ")", text)
|
438
|
|
439
|
if text.endswith('/') and not text.endswith(' /'):
|
440
|
text = text[:-1].rstrip() + ' /'
|
441
|
|
442
|
return text
|
443
|
|
444
|
|
445
|
def output_response(command, response, bypass_processing=False):
|
446
|
cleansed_response = cleanse_text(command, response)
|
447
|
|
448
|
max_len = 150
|
449
|
words = cleansed_response.split()
|
450
|
chunks = []
|
451
|
current_chunk = ""
|
452
|
|
453
|
for word in words:
|
454
|
if len(current_chunk) + len(word) + 1 > max_len:
|
455
|
chunks.append(current_chunk.strip())
|
456
|
current_chunk = word + " "
|
457
|
else:
|
458
|
current_chunk += word + " "
|
459
|
if current_chunk:
|
460
|
chunks.append(current_chunk.strip())
|
461
|
|
462
|
output_lines = []
|
463
|
total_delay = 0.0
|
464
|
|
465
|
if not bypass_processing:
|
466
|
time_taken_to_process = time.time() - initialization_time
|
467
|
|
468
|
if parameters["reading_wpm_speed"] > 0:
|
469
|
reading_time = calculate_wpm_time(command, parameters["reading_wpm_speed"])
|
470
|
additional_sleep_time = reading_time - time_taken_to_process
|
471
|
if additional_sleep_time > 0 and "*EVENT" not in command:
|
472
|
printlog(f"\nSimulating {additional_sleep_time:.2f}s reading delay.")
|
473
|
total_delay += additional_sleep_time
|
474
|
|
475
|
if parameters["typing_wpm_speed"] > 0:
|
476
|
typing_time = calculate_wpm_time(cleansed_response, parameters["typing_wpm_speed"])
|
477
|
printlog(f"\nSimulating {typing_time:.2f}s typing delay.")
|
478
|
total_delay += typing_time
|
479
|
|
480
|
delay_per_chunk = total_delay / max(1, len(chunks))
|
481
|
printlog("\nOutputting response chunks:")
|
482
|
|
483
|
last_delay = round(total_delay, 2)
|
484
|
|
485
|
for i, chunk in enumerate(chunks):
|
486
|
if not chunk.strip():
|
487
|
continue
|
488
|
|
489
|
delay_seconds = round(total_delay + delay_per_chunk * i, 2)
|
490
|
last_delay = delay_seconds
|
491
|
line = f'DELAY_COMMAND {delay_seconds:.2f} {parameters.get("prefix_text","")} {chunk}'
|
492
|
printlog(f"โ {line}")
|
493
|
output_lines.append(line)
|
494
|
|
495
|
if parameters["chatbot_processing"] and parameters["local_mode"]:
|
496
|
printlog(f"\nDelaying SET_ALL_CHATTING 0 by {last_delay:.2f}s")
|
497
|
output_lines.append(f'DELAY_COMMAND {last_delay:.2f} SET_ALL_CHATTING 0')
|
498
|
|
499
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as file:
|
500
|
file.write("\n".join(output_lines) + "\n")
|
501
|
|
502
|
if initialization_time is not None:
|
503
|
printlog(f"\nDone processing. ({time.time() - initialization_time:.2f} seconds elapsed for the entire process)")
|
504
|
|
505
|
def parse_setting_change(command):
|
506
|
"""
|
507
|
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
508
|
EXAMPLES
|
509
|
*==settingchange==* params history_size 15 prefix_text say
|
510
|
*==settingchange==* params always_processed_players [Mike,noob,cat]
|
511
|
*==settingchange==* params add_process_player Mike noob
|
512
|
*==settingchange==* params remove_process_player [cat,Mike]
|
513
|
*==settingchange==* params toggle_process_player Mike
|
514
|
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
515
|
"""
|
516
|
marker = '*==settingchange==*'
|
517
|
if marker not in command:
|
518
|
return {}
|
519
|
|
520
|
body = command.split(marker, 1)[1].strip()
|
521
|
tokens = re.findall(r'\[[^\]]+\]|[^\s]+', body)
|
522
|
|
523
|
if tokens and tokens[0].lower() == 'params':
|
524
|
tokens = tokens[1:]
|
525
|
|
526
|
if not tokens:
|
527
|
available = ', '.join(sorted(parameters.keys()))
|
528
|
help_msg = (
|
529
|
"No parameters specified. Usage: "
|
530
|
"*==settingchange==* params <key> <value> โฆ "
|
531
|
f"Available parameters: {available}"
|
532
|
)
|
533
|
|
534
|
max_len = 151
|
535
|
chunks = [help_msg[i:i+max_len] for i in range(0, len(help_msg), max_len)]
|
536
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
537
|
for idx, chunk in enumerate(chunks, 1):
|
538
|
f.write(f'DELAY_COMMAND {idx:.2f} {parameters.get("prefix_text","")} {chunk}\n')
|
539
|
|
540
|
printlog(help_msg)
|
541
|
return {}
|
542
|
|
543
|
cmd = tokens[0].lower()
|
544
|
|
545
|
def parse_names(seq):
|
546
|
if not seq:
|
547
|
return []
|
548
|
joined = ' '.join(seq)
|
549
|
if joined.startswith('[') and joined.endswith(']'):
|
550
|
joined = joined[1:-1]
|
551
|
return [n.strip().strip(',') for n in joined.split(',') if n.strip()]
|
552
|
|
553
|
if cmd in ("add_process_player", "remove_process_player", "toggle_process_player"):
|
554
|
names = parse_names(tokens[1:])
|
555
|
before = list(parameters.get("always_processed_players", []))
|
556
|
|
557
|
if not names:
|
558
|
printlog(f"No player names provided for {cmd}.")
|
559
|
return {}
|
560
|
|
561
|
for name in names:
|
562
|
if cmd == "add_process_player":
|
563
|
if name not in parameters["always_processed_players"]:
|
564
|
parameters["always_processed_players"].append(name)
|
565
|
elif cmd == "remove_process_player":
|
566
|
if name in parameters["always_processed_players"]:
|
567
|
parameters["always_processed_players"].remove(name)
|
568
|
else:
|
569
|
if name in parameters["always_processed_players"]:
|
570
|
parameters["always_processed_players"].remove(name)
|
571
|
else:
|
572
|
parameters["always_processed_players"].append(name)
|
573
|
|
574
|
after = list(parameters.get("always_processed_players", []))
|
575
|
#status = f"{cmd.replace('_', ' ').title()} | before: {before}, after: {after}"
|
576
|
status = f"{cmd.replace('_', ' ').title()} after: {after}"
|
577
|
printlog(status)
|
578
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
579
|
f.write(f'{parameters.get("prefix_text","")} {status}\n')
|
580
|
return {"always_processed_players": after}
|
581
|
|
582
|
updates, i, n = {}, 0, len(tokens)
|
583
|
while i < n:
|
584
|
key = tokens[i]
|
585
|
|
586
|
if key == 'initial_prompt':
|
587
|
old = parameters.get("initial_prompt", "<empty>")
|
588
|
new = ' '.join(tokens[i + 1:]) if i + 1 < n else old
|
589
|
parameters['initial_prompt'] = new
|
590
|
updates['initial_prompt'] = new
|
591
|
status = f"initial_prompt changed from \"{old}\" to \"{new}\"."
|
592
|
printlog(status)
|
593
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
594
|
f.write(f'{parameters.get("prefix_text","")} {status}\n')
|
595
|
break
|
596
|
|
597
|
if i + 1 < n:
|
598
|
raw = tokens[i + 1]
|
599
|
old = parameters.get(key)
|
600
|
new = infer_type(raw)
|
601
|
parameters[key] = new
|
602
|
updates[key] = new
|
603
|
status = f"{key} changed from {old} to {new}."
|
604
|
printlog(status)
|
605
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
606
|
f.write(f'{parameters.get("prefix_text","")} {status}\n')
|
607
|
i += 2
|
608
|
else:
|
609
|
status = f"{key} is currently set to {parameters.get(key, '<unknown>')}."
|
610
|
printlog(status)
|
611
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
612
|
f.write(f'{parameters.get("prefix_text","")} {status}\n')
|
613
|
i += 1
|
614
|
|
615
|
return updates
|
616
|
|
617
|
|
618
|
|
619
|
def update_params_file(file_path, updates):
|
620
|
lines = []
|
621
|
if os.path.exists(file_path):
|
622
|
with open(file_path, 'r', encoding='utf-8') as f:
|
623
|
lines = f.readlines()
|
624
|
else:
|
625
|
printlog(f"Params file '{file_path}' not found. Creating a new one.")
|
626
|
new_lines = []
|
627
|
seen = set()
|
628
|
|
629
|
for line in lines:
|
630
|
stripped = line.strip()
|
631
|
if not stripped or stripped.startswith('#') or '=' not in line:
|
632
|
new_lines.append(line)
|
633
|
continue
|
634
|
|
635
|
key, sep, val = line.partition('=')
|
636
|
key = key.strip()
|
637
|
if key in updates:
|
638
|
v = updates[key]
|
639
|
if isinstance(v, bool):
|
640
|
v_str = 'true' if v else 'false'
|
641
|
elif isinstance(v, list):
|
642
|
v_str = '[' + ','.join(v) + ']'
|
643
|
else:
|
644
|
v_str = str(v)
|
645
|
new_lines.append(f"{key}={v_str}\n")
|
646
|
seen.add(key)
|
647
|
else:
|
648
|
new_lines.append(line)
|
649
|
|
650
|
for key, v in updates.items():
|
651
|
if key not in seen:
|
652
|
if isinstance(v, bool):
|
653
|
v_str = 'true' if v else 'false'
|
654
|
elif isinstance(v, list):
|
655
|
v_str = '[' + ','.join(v) + ']'
|
656
|
else:
|
657
|
v_str = str(v)
|
658
|
new_lines.append(f"{key}={v_str}\n")
|
659
|
|
660
|
with open(file_path, 'w', encoding='utf-8') as f:
|
661
|
f.writelines(new_lines)
|
662
|
|
663
|
outputRes = (f"Applied setting changes: {updates}")
|
664
|
printlog(outputRes)
|
665
|
|
666
|
def process_line(line) -> bool:
|
667
|
global history
|
668
|
|
669
|
line = line.lstrip()
|
670
|
|
671
|
if "-->" in line[:35]:
|
672
|
colon_index = line.find(":")
|
673
|
if colon_index != -1:
|
674
|
content = line[colon_index + 1:].strip().lower()
|
675
|
|
676
|
has_keyword = any(
|
677
|
keyword.lower() in content for keyword in parameters.get("process_lines_containing", [])
|
678
|
)
|
679
|
|
680
|
has_prefix = any(
|
681
|
content.startswith(pref.lower()) for pref in parameters.get("command_prefix", [])
|
682
|
)
|
683
|
|
684
|
if not (has_keyword or has_prefix):
|
685
|
return False
|
686
|
|
687
|
|
688
|
if line.startswith("*EVENT"):
|
689
|
command = line
|
690
|
else:
|
691
|
if "*==settingchange==*" in line:
|
692
|
updates = parse_setting_change(line)
|
693
|
if updates:
|
694
|
update_params_file(PARAMS_FILE, updates)
|
695
|
extract_parameters()
|
696
|
return True
|
697
|
if ':' not in line:
|
698
|
return False
|
699
|
sender, _, rest = line.partition(':')
|
700
|
rest = rest.lstrip()
|
701
|
|
702
|
if sender.lower() == bot_name.lower() or sender in parameters.get("ignored_names", []):
|
703
|
return False
|
704
|
|
705
|
lw = rest.lower()
|
706
|
for prefix in parameters.get("ignore_words_starts_with", []):
|
707
|
if prefix and lw.startswith(prefix.lower()):
|
708
|
return False
|
709
|
for word in parameters.get("ignore_words_exact", []):
|
710
|
if word and lw == word.lower():
|
711
|
return False
|
712
|
for substr in parameters.get("ignore_words_contains", []):
|
713
|
if substr and substr.lower() in lw:
|
714
|
return False
|
715
|
|
716
|
|
717
|
if ": !!reset" in line:
|
718
|
extract_parameters()
|
719
|
history = get_default_history()
|
720
|
with open(HISTORY_FILE, 'w', encoding='utf-8') as f:
|
721
|
json.dump(history, f, indent=4)
|
722
|
output_response(line, "History cleared.", bypass_processing=True)
|
723
|
return True
|
724
|
|
725
|
force_process = False
|
726
|
for keyword in parameters.get("process_lines_containing", []):
|
727
|
if keyword.lower() in line.lower():
|
728
|
force_process = True
|
729
|
break
|
730
|
|
731
|
if parameters.get("process_all_lines", False) or sender in parameters.get("always_processed_players", []) or force_process:
|
732
|
command = line
|
733
|
else:
|
734
|
matched = None
|
735
|
for pref in parameters.get("command_prefix", []):
|
736
|
if rest.startswith(pref):
|
737
|
matched = pref
|
738
|
break
|
739
|
if not matched:
|
740
|
return False
|
741
|
msg = rest[len(matched):].lstrip()
|
742
|
command = f"{sender}: {msg}"
|
743
|
|
744
|
|
745
|
|
746
|
rag_context = ""
|
747
|
if not command.startswith("*EVENT"):
|
748
|
query = command.partition(":")[2].strip()
|
749
|
if query.startswith("@@") or query.endswith("?"):# or any(w in query.lower() for w in ["how", "what", "why", "when", "where"]):
|
750
|
matches = search_all_contexts(query, top_k=1)
|
751
|
if matches:
|
752
|
rag_context = "\n\n".join(f"[{ctx}] {text}" for ctx, text, _ in matches)
|
753
|
printlog("\n๐ Injecting RAG context:\n" + rag_context)
|
754
|
|
755
|
if rag_context:
|
756
|
command = (
|
757
|
f"{command}\n\n"
|
758
|
f"{parameters['rag_prompt']}\n"
|
759
|
f"{rag_context}"
|
760
|
)
|
761
|
|
762
|
|
763
|
if parameters.get("use_context_builder", False):
|
764
|
context_lines = load_context_builder_lines()
|
765
|
max_lines = parameters.get("context_builder_max_lines", 10)
|
766
|
|
767
|
if context_lines:
|
768
|
limited_context = context_lines[-max_lines:]
|
769
|
context_block = "\n".join(limited_context)
|
770
|
printlog(f"\n๐ Injecting {len(limited_context)} context builder line(s):\n" + context_block)
|
771
|
command = f"\n{parameters.get("context_builder_prompt", '')}\n{context_block}\n{parameters.get('context_builder_prompt_post', '')}\n{command}"
|
772
|
|
773
|
|
774
|
printlog("๐ทFinal command:\n" + command)
|
775
|
response = send_to_ollama(command)
|
776
|
|
777
|
chat_mode = not command.startswith("*EVENT")
|
778
|
|
779
|
if parameters.get("announce_status", False):
|
780
|
printlog("Got Response:\n" + json.dumps(response, indent=4))
|
781
|
|
782
|
ollama_response = response.get('message', {}).get('content', "No response")
|
783
|
tokens_in_prompt = response.get('prompt_eval_count', 0)
|
784
|
tokens_in_response = response.get('eval_count', 0)
|
785
|
total_s = response.get('total_duration', 0) / 1_000_000_000
|
786
|
|
787
|
printlog(
|
788
|
f"\nProcess complete\n"
|
789
|
f" - total duration: {total_s}\n"
|
790
|
f" - tokens in prompt: {tokens_in_prompt}\n"
|
791
|
f" - tokens in response: {tokens_in_response}\n"
|
792
|
f" - response: {ollama_response}"
|
793
|
)
|
794
|
|
795
|
if chat_mode:
|
796
|
update_history(command, ollama_response)
|
797
|
else:
|
798
|
evt = command.replace("*EVENT", "").strip()
|
799
|
update_history(evt, ollama_response)
|
800
|
|
801
|
if parameters.get("chatbot_processing", False) and parameters.get("local_mode", False):
|
802
|
printlog("\nSetting all chatting to 1")
|
803
|
with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
|
804
|
f.write("SET_ALL_CHATTING 1\n")
|
805
|
|
806
|
output_response(command, ollama_response)
|
807
|
return True
|
808
|
|
809
|
MAX_WORDS = 200
|
810
|
|
811
|
def group_lines_by_speaker_and_chunk(new_lines, max_words=200):
|
812
|
timestamped_pattern = re.compile(r'^\[\d{4}/\d{2}/\d{2}-\d{2}:\d{2}:\d{2}\]\s+(.*)$')
|
813
|
|
814
|
cleaned_lines = []
|
815
|
skipped = []
|
816
|
|
817
|
for i, line in enumerate(new_lines):
|
818
|
line = line.strip()
|
819
|
if not line:
|
820
|
continue
|
821
|
|
822
|
match = timestamped_pattern.match(line)
|
823
|
if match:
|
824
|
content = match.group(1).strip()
|
825
|
else:
|
826
|
content = line
|
827
|
|
828
|
if len(content.split()) < 2 or len(content) < 5:
|
829
|
skipped.append((i + 1, line))
|
830
|
else:
|
831
|
cleaned_lines.append(content)
|
832
|
|
833
|
all_words = []
|
834
|
line_accumulator = []
|
835
|
final_chunks = []
|
836
|
|
837
|
for line in cleaned_lines:
|
838
|
words = line.split()
|
839
|
if not words:
|
840
|
continue
|
841
|
|
842
|
if len(all_words) + len(words) > max_words:
|
843
|
chunk = " ".join(line_accumulator).strip()
|
844
|
if len(chunk.split()) >= 50:
|
845
|
final_chunks.append(chunk)
|
846
|
line_accumulator = []
|
847
|
all_words = []
|
848
|
|
849
|
line_accumulator.append(line)
|
850
|
all_words.extend(words)
|
851
|
|
852
|
if line_accumulator:
|
853
|
chunk = " ".join(line_accumulator).strip()
|
854
|
if len(chunk.split()) >= 50:
|
855
|
final_chunks.append(chunk)
|
856
|
|
857
|
return final_chunks, skipped
|
858
|
|
859
|
|
860
|
def add_to_player_chat_context():
|
861
|
|
862
|
os.makedirs(PLAYER_CONTEXT_DIR, exist_ok=True)
|
863
|
printlog("๐งฉ Running incremental FAISS chat update...")
|
864
|
|
865
|
last_line_path = os.path.join(PLAYER_CONTEXT_DIR, "last_line_index.txt")
|
866
|
last_index = 0
|
867
|
if os.path.exists(last_line_path):
|
868
|
with open(last_line_path, "r") as f:
|
869
|
last_index = int(f.read().strip())
|
870
|
|
871
|
if os.path.exists(PLAYER_FAISS_INDEX):
|
872
|
index = faiss.read_index(PLAYER_FAISS_INDEX)
|
873
|
else:
|
874
|
index = faiss.IndexFlatL2(384)
|
875
|
|
876
|
if os.path.exists(PLAYER_METADATA):
|
877
|
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
|
878
|
metadata = json.load(f)
|
879
|
else:
|
880
|
metadata = []
|
881
|
|
882
|
with open(CHAT_LOG_PATH, "r", encoding="utf-8", errors="ignore") as f:
|
883
|
all_lines = f.readlines()
|
884
|
|
885
|
new_lines = all_lines[last_index:]
|
886
|
printlog(f"๐ Lines in chatlog: {len(all_lines)} | New lines: {len(new_lines)}")
|
887
|
|
888
|
grouped, skipped = group_lines_by_speaker_and_chunk(new_lines)
|
889
|
|
890
|
printlog(f"๐ง Created {len(grouped)} new speaker chunks (incremental update)")
|
891
|
|
892
|
if skipped:
|
893
|
printlog(f"โ ๏ธ Skipped {len(skipped)} malformed line(s):")
|
894
|
for ln, content in skipped[:10]:
|
895
|
printlog(f" [Line {ln}] {content}")
|
896
|
if len(skipped) > 10:
|
897
|
printlog(f" ... and {len(skipped) - 10} more")
|
898
|
|
899
|
if not grouped:
|
900
|
printlog("โ ๏ธ No new lines to embed.")
|
901
|
return
|
902
|
|
903
|
printlog("๐ Generating embeddings...")
|
904
|
embeddings = EMBED_MODEL.encode(grouped, show_progress_bar=True)
|
905
|
|
906
|
index.add(embeddings)
|
907
|
for chunk in grouped:
|
908
|
metadata.append({
|
909
|
"chunk_id": len(metadata),
|
910
|
"text": chunk
|
911
|
})
|
912
|
|
913
|
faiss.write_index(index, PLAYER_FAISS_INDEX)
|
914
|
with open(PLAYER_METADATA, "w", encoding="utf-8") as f:
|
915
|
json.dump(metadata, f, indent=2)
|
916
|
|
917
|
with open(last_line_path, "w") as f:
|
918
|
f.write(str(len(all_lines)))
|
919
|
|
920
|
printlog("โ
Incremental context update complete.")
|
921
|
|
922
|
|
923
|
|
924
|
def update_player_chat_context(bypass_flag=False):
|
925
|
|
926
|
global last_chat_line_index
|
927
|
printlog(f"๐ Writing to actual resolved paths:\n - index: {os.path.abspath(PLAYER_FAISS_INDEX)}\n - metadata: {os.path.abspath(PLAYER_METADATA)}")
|
928
|
|
929
|
if not parameters.get("build_chat_context", False) and not bypass_flag:
|
930
|
printlog("๐ซ Skipping: build_chat_context is False.")
|
931
|
return
|
932
|
|
933
|
os.makedirs(PLAYER_CONTEXT_DIR, exist_ok=True)
|
934
|
|
935
|
if os.path.exists(PLAYER_FAISS_INDEX):
|
936
|
index = faiss.read_index(PLAYER_FAISS_INDEX)
|
937
|
else:
|
938
|
index = faiss.IndexFlatL2(384)
|
939
|
|
940
|
if os.path.exists(PLAYER_METADATA):
|
941
|
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
|
942
|
metadata = json.load(f)
|
943
|
else:
|
944
|
metadata = []
|
945
|
|
946
|
with open(CHAT_LOG_PATH, "r", encoding="utf-8", errors="ignore") as f:
|
947
|
lines = f.readlines()
|
948
|
|
949
|
new_lines = lines[last_chat_line_index:]
|
950
|
printlog(f"๐ Total chat log lines: {len(lines)} | New lines: {len(new_lines)}")
|
951
|
|
952
|
grouped, skipped = group_lines_by_speaker_and_chunk(new_lines)
|
953
|
|
954
|
printlog(f"๐ง Created {len(grouped)} speaker chunks from full rebuild")
|
955
|
|
956
|
if skipped:
|
957
|
printlog(f"โ ๏ธ Skipped {len(skipped)} malformed line(s):")
|
958
|
for ln, content in skipped[:10]:
|
959
|
printlog(f" [Line {ln}] {content}")
|
960
|
if len(skipped) > 10:
|
961
|
printlog(f" ... and {len(skipped) - 10} more")
|
962
|
|
963
|
if grouped:
|
964
|
embeddings = EMBED_MODEL.encode(grouped, show_progress_bar=True)
|
965
|
index.add(embeddings)
|
966
|
for chunk in grouped:
|
967
|
metadata.append({
|
968
|
"chunk_id": len(metadata),
|
969
|
"text": chunk
|
970
|
})
|
971
|
|
972
|
faiss.write_index(index, PLAYER_FAISS_INDEX)
|
973
|
with open(PLAYER_METADATA, "w", encoding="utf-8") as f:
|
974
|
json.dump(metadata, f, indent=2)
|
975
|
|
976
|
printlog(f"๐ฅ Added {len(grouped)} new player chat(s) to RAG index.")
|
977
|
else:
|
978
|
printlog("โ ๏ธ No new chat chunks to add.")
|
979
|
|
980
|
last_chat_line_index += len(new_lines)
|
981
|
printlog(f"๐ Updated last_chat_line_index to: {last_chat_line_index}")
|
982
|
|
983
|
printlog("โ
Player chat context update complete.")
|
984
|
|
985
|
def reload_player_chat_context():
|
986
|
try:
|
987
|
if os.path.exists(PLAYER_FAISS_INDEX) and os.path.exists(PLAYER_METADATA):
|
988
|
faiss_index = faiss.read_index(PLAYER_FAISS_INDEX)
|
989
|
with open(PLAYER_METADATA, "r", encoding="utf-8") as f:
|
990
|
metadata = json.load(f)
|
991
|
loaded_contexts["player_chats"] = (faiss_index, metadata)
|
992
|
printlog("๐ Reloaded 'player_chats' context.")
|
993
|
else:
|
994
|
printlog("โ ๏ธ Player chat FAISS index or metadata not found. Skipping reload.")
|
995
|
except Exception as e:
|
996
|
printlog(f"โ Failed to reload player chat context: {e}")
|
997
|
|
998
|
|
999
|
def start_background_chat_context_loop():
|
1000
|
def loop():
|
1001
|
while True:
|
1002
|
printlog("๐งช Chat context background loop running...")
|
1003
|
extract_parameters()
|
1004
|
printlog(f"๐ build_chat_context is: {parameters.get('build_chat_context', False)}")
|
1005
|
|
1006
|
if parameters.get("build_chat_context", False):
|
1007
|
printlog("โก๏ธ Calling add_to_player_chat_context()")
|
1008
|
add_to_player_chat_context()
|
1009
|
printlog("โก๏ธ Calling reload_player_chat_context()")
|
1010
|
reload_player_chat_context()
|
1011
|
else:
|
1012
|
printlog("โ build_chat_context is false. Skipping context build.")
|
1013
|
|
1014
|
time.sleep(parameters.get("build_chat_context_interval", 15))
|
1015
|
|
1016
|
|
1017
|
thread = threading.Thread(target=loop, daemon=True)
|
1018
|
thread.start()
|
1019
|
|
1020
|
def initialize():
|
1021
|
global last_used_options, last_chat_line_index
|
1022
|
|
1023
|
printlog(f"{header_bar}\n{get_timestamp()}Process started.")
|
1024
|
|
1025
|
extract_parameters(announce_params=True, compare_to_last_used_options=True, initialize=True)
|
1026
|
|
1027
|
if parameters.get("force_rebuild_chat_context", False):
|
1028
|
last_chat_line_index = 0
|
1029
|
try:
|
1030
|
if os.path.exists(PLAYER_FAISS_INDEX):
|
1031
|
os.remove(PLAYER_FAISS_INDEX)
|
1032
|
printlog("๐๏ธ Deleted existing FAISS index for rebuild.")
|
1033
|
if os.path.exists(PLAYER_METADATA):
|
1034
|
os.remove(PLAYER_METADATA)
|
1035
|
printlog("๐๏ธ Deleted existing metadata for rebuild.")
|
1036
|
except Exception as e:
|
1037
|
printlog(f"โ Error deleting context files: {e}")
|
1038
|
|
1039
|
load_all_contexts()
|
1040
|
|
1041
|
default_history = get_default_history()
|
1042
|
|
1043
|
if parameters["clear_history_on_start"]:
|
1044
|
printlog("\nClearing history file.")
|
1045
|
history = default_history
|
1046
|
with open(HISTORY_FILE, 'w') as file:
|
1047
|
json.dump(history, file)
|
1048
|
else:
|
1049
|
history = extract_history()
|
1050
|
printlog(f'\nLoaded history from {HISTORY_FILE}. Number of items: {len(history)-len(default_history)}/{parameters["history_size"]}')
|
1051
|
|
1052
|
|
1053
|
with open(CHAT_LOG_NO_DATA, 'w') as file:
|
1054
|
pass
|
1055
|
|
1056
|
printlog(f"\n{header_bar}\n")
|
1057
|
|
1058
|
def main():
|
1059
|
last_offset = 0
|
1060
|
last_processed_time = time.time()
|
1061
|
last_wait_time = None
|
1062
|
|
1063
|
initialize()
|
1064
|
if parameters.get("build_chat_context", False):
|
1065
|
add_to_player_chat_context()
|
1066
|
#start_background_chat_context_loop()
|
1067
|
|
1068
|
while True:
|
1069
|
try:
|
1070
|
size = os.path.getsize(CHAT_LOG_NO_DATA)
|
1071
|
except OSError:
|
1072
|
size = 0
|
1073
|
|
1074
|
if size < last_offset:
|
1075
|
last_offset = 0
|
1076
|
|
1077
|
new_lines = []
|
1078
|
with open(CHAT_LOG_NO_DATA, 'r', encoding='utf-8', errors='ignore') as f:
|
1079
|
f.seek(last_offset)
|
1080
|
for raw in f:
|
1081
|
new_lines.append(raw.rstrip('\n'))
|
1082
|
last_offset = f.tell()
|
1083
|
|
1084
|
for line in new_lines:
|
1085
|
if line.startswith("*==settingchange==*"):
|
1086
|
printlog(f"\n{header_bar}\n{get_timestamp()}Processing setting change:\n{line}")
|
1087
|
process_line(line)
|
1088
|
last_processed_time = time.time()
|
1089
|
continue
|
1090
|
|
1091
|
extract_parameters()
|
1092
|
printlog(f"\n{header_bar}\n{get_timestamp()}Processing line:\n{line}")
|
1093
|
handled = process_line(line)
|
1094
|
if handled:
|
1095
|
last_processed_time = time.time()
|
1096
|
else:
|
1097
|
printlog(f"\n{get_timestamp()}Skipping line: {line}\n")
|
1098
|
printlog(f"{get_timestamp()}Uptime: {format_time(time.time() - start_time)}")
|
1099
|
printlog(header_bar)
|
1100
|
|
1101
|
if last_processed_time is not None:
|
1102
|
wait = int(time.time() - last_processed_time)
|
1103
|
if wait != last_wait_time:
|
1104
|
sys.stdout.write(f"\rWaiting for input: {format_time(wait)}")
|
1105
|
sys.stdout.flush()
|
1106
|
last_wait_time = wait
|
1107
|
|
1108
|
time.sleep(parameters["parse_file_speed"])
|
1109
|
|
1110
|
|
1111
|
if __name__ == "__main__":
|
1112
|
main()
|
1113
|
|