~nickwinston123/armagetronad/arma_chatbot_config

« back to all changes in this revision

Viewing changes to ollama_chat/ollama_chat.py

  • Committer: hackermans
  • Date: 2025-05-29 20:16:47 UTC
  • Revision ID: nickwinston123@gmail.com-20250529201647-ybinllbs3ntf7xld
- arma_terminal
 - added clear_key setting that will clear the log buffer
 - added ability to click line and copy to clipboard or press f2 to copy entire buffer
- ollama_chat
 - smart_processor param to enable AI if there are smart_processor_active_players active players or less
 - Ractor/fix

Show diffs side-by-side

added added

removed removed

Lines of Context:
97
97
    "context_builder_max_lines": 10,
98
98
    "context_builder_prompt": "",
99
99
    "context_builder_prompt_post": "",
100
 
    "process_lines_containing": ["thomas,big"]
 
100
    "process_lines_containing": ["thomas,big"],
 
101
    "smart_processor": True,
 
102
    "smart_processor_active_players": 1,
 
103
    "spam_maxlen": 150,
101
104
 
102
105
}
103
106
 
127
130
        }
128
131
    ]
129
132
 
130
 
def load_context_builder_lines():
 
133
def load_context_builder_lines(update_tracker=True):
131
134
    last_line_index = 0
132
135
    if os.path.exists(CONTEXT_LAST_LINE_TRACKER):
133
136
        with open(CONTEXT_LAST_LINE_TRACKER, "r") as f:
142
145
    with open(CONTEXT_BUILDER_DATA, "r", encoding="utf-8", errors="ignore") as f:
143
146
        lines = f.readlines()
144
147
 
145
 
    new_lines = [line.strip() for line in lines[last_line_index:] if line.strip()]
 
148
    new_lines = [
 
149
        line.strip().replace("epixxware.com", "The CLASSIC Submarine")
 
150
        for line in lines[last_line_index:]
 
151
        if line.strip()
 
152
    ]
146
153
 
147
 
    with open(CONTEXT_LAST_LINE_TRACKER, "w") as f:
148
 
        f.write(str(len(lines)))
 
154
    if update_tracker:
 
155
        with open(CONTEXT_LAST_LINE_TRACKER, "w") as f:
 
156
            f.write(str(len(lines)))
149
157
 
150
158
    return new_lines
151
159
 
 
160
 
152
161
def load_all_contexts():
153
162
    printlog(f"\n🧠 Looking for contexts in: {os.path.abspath(CONTEXT_DIR)}")
154
163
    if not os.path.exists(CONTEXT_DIR):
445
454
def output_response(command, response, bypass_processing=False):
446
455
    cleansed_response = cleanse_text(command, response)
447
456
 
448
 
    max_len = 150
 
457
    max_len = parameters.get("spam_maxlen", 150)
449
458
    words = cleansed_response.split()
450
459
    chunks = []
451
460
    current_chunk = ""
462
471
    output_lines = []
463
472
    total_delay = 0.0
464
473
 
 
474
    printlog("📤 Outputting response chunks (cleaned):")
 
475
    for i, chunk in enumerate(chunks):
 
476
        if not chunk.strip():
 
477
            continue
 
478
        printlog(chunk)
 
479
 
465
480
    if not bypass_processing:
466
481
        time_taken_to_process = time.time() - initialization_time
 
482
        simualated_items = ""
467
483
 
468
 
        if parameters["reading_wpm_speed"] > 0:
469
 
            reading_time = calculate_wpm_time(command, parameters["reading_wpm_speed"])
 
484
        if parameters.get("reading_wpm_speed", 0) > 0 or parameters.get("typing_wpm_speed", 0) > 0:
 
485
            simualated_items = "Simulated: "
 
486
            
 
487
        if parameters.get("reading_wpm_speed", 0) > 0:
 
488
            reading_time = calculate_wpm_time(command, parameters.get("reading_wpm_speed", 0))
470
489
            additional_sleep_time = reading_time - time_taken_to_process
471
490
            if additional_sleep_time > 0 and "*EVENT" not in command:
472
 
                printlog(f"\nSimulating {additional_sleep_time:.2f}s reading delay.")
 
491
                simualated_items += (f"{additional_sleep_time:.2f}s reading delay. ")
473
492
                total_delay += additional_sleep_time
474
493
 
475
 
        if parameters["typing_wpm_speed"] > 0:
476
 
            typing_time = calculate_wpm_time(cleansed_response, parameters["typing_wpm_speed"])
477
 
            printlog(f"\nSimulating {typing_time:.2f}s typing delay.")
 
494
        if parameters.get("typing_wpm_speed", 0) > 0:
 
495
            typing_time = calculate_wpm_time(cleansed_response, parameters.get("typing_wpm_speed", 0))
 
496
            simualated_items += (f"{additional_sleep_time:.2f}s typing delay. ")
478
497
            total_delay += typing_time
 
498
        
 
499
        if simualated_items != "Simulated: ":
 
500
            printlog(f"{simualated_items}\n")
479
501
 
480
502
    delay_per_chunk = total_delay / max(1, len(chunks))
481
 
    printlog("\nOutputting response chunks:")
 
503
 
 
504
    if parameters.get("chatbot_processing", False) and parameters.get("local_mode", False):
 
505
        printlog("\nSetting all chatting to 1")
 
506
        with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
 
507
            f.write("SET_ALL_CHATTING 1\n")
482
508
 
483
509
    last_delay = round(total_delay, 2) 
484
510
 
489
515
        delay_seconds = round(total_delay + delay_per_chunk * i, 2)
490
516
        last_delay = delay_seconds  
491
517
        line = f'DELAY_COMMAND {delay_seconds:.2f} {parameters.get("prefix_text","")} {chunk}'
492
 
        printlog(f"→ {line}")
493
518
        output_lines.append(line)
494
519
 
495
 
    if parameters["chatbot_processing"] and parameters["local_mode"]:
496
 
        printlog(f"\nDelaying SET_ALL_CHATTING 0 by {last_delay:.2f}s")
 
520
    if parameters.get("chatbot_processing", False) and parameters.get("local_mode", False):
497
521
        output_lines.append(f'DELAY_COMMAND {last_delay:.2f} SET_ALL_CHATTING 0')
498
522
 
 
523
    printlog("Sending commands to OUTPUT_FILE: " + "\n".join(output_lines) )
499
524
    with open(OUTPUT_FILE, 'a', encoding='utf-8') as file:
500
525
        file.write("\n".join(output_lines) + "\n")
501
526
 
530
555
            "*==settingchange==* params <key> <value> … "
531
556
            f"Available parameters: {available}"
532
557
        )
533
 
 
534
 
        max_len = 151
535
 
        chunks  = [help_msg[i:i+max_len] for i in range(0, len(help_msg), max_len)]
536
 
        with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
537
 
            for idx, chunk in enumerate(chunks, 1):               
538
 
                f.write(f'DELAY_COMMAND {idx:.2f} {parameters.get("prefix_text","")} {chunk}\n')
539
 
 
 
558
        output_response(command, help_msg, bypass_processing=True)
540
559
        printlog(help_msg)
541
560
        return {}
542
561
 
547
566
            return []
548
567
        joined = ' '.join(seq)
549
568
        if joined.startswith('[') and joined.endswith(']'):
550
 
            joined = joined[1:-1]       
 
569
            joined = joined[1:-1]
551
570
        return [n.strip().strip(',') for n in joined.split(',') if n.strip()]
552
571
 
553
572
    if cmd in ("add_process_player", "remove_process_player", "toggle_process_player"):
555
574
        before = list(parameters.get("always_processed_players", []))
556
575
 
557
576
        if not names:
558
 
            printlog(f"No player names provided for {cmd}.")
 
577
            msg = f"No player names provided for {cmd}."
 
578
            printlog(msg)
 
579
            output_response(command, msg, bypass_processing=True)
559
580
            return {}
560
581
 
561
582
        for name in names:
565
586
            elif cmd == "remove_process_player":
566
587
                if name in parameters["always_processed_players"]:
567
588
                    parameters["always_processed_players"].remove(name)
568
 
            else:  
 
589
            else: 
569
590
                if name in parameters["always_processed_players"]:
570
591
                    parameters["always_processed_players"].remove(name)
571
592
                else:
572
593
                    parameters["always_processed_players"].append(name)
573
594
 
574
595
        after  = list(parameters.get("always_processed_players", []))
575
 
        #status = f"{cmd.replace('_', ' ').title()} | before: {before}, after: {after}"
576
596
        status = f"{cmd.replace('_', ' ').title()} after: {after}"
577
597
        printlog(status)
578
 
        with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
579
 
            f.write(f'{parameters.get("prefix_text","")} {status}\n')
 
598
        output_response(command, status, bypass_processing=True)
580
599
        return {"always_processed_players": after}
581
600
 
582
601
    updates, i, n = {}, 0, len(tokens)
590
609
            updates['initial_prompt']    = new
591
610
            status = f"initial_prompt changed from \"{old}\" to \"{new}\"."
592
611
            printlog(status)
593
 
            with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
594
 
                f.write(f'{parameters.get("prefix_text","")} {status}\n')
 
612
            output_response(command, status, bypass_processing=True)
595
613
            break
596
614
 
597
615
        if i + 1 < n:
602
620
            updates[key]    = new
603
621
            status = f"{key} changed from {old} to {new}."
604
622
            printlog(status)
605
 
            with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
606
 
                f.write(f'{parameters.get("prefix_text","")} {status}\n')
 
623
            output_response(command, status, bypass_processing=True)
607
624
            i += 2
608
625
        else:
609
626
            status = f"{key} is currently set to {parameters.get(key, '<unknown>')}."
610
627
            printlog(status)
611
 
            with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
612
 
                f.write(f'{parameters.get("prefix_text","")} {status}\n')
 
628
            output_response(command, status, bypass_processing=True)
613
629
            i += 1
614
630
 
615
631
    return updates
616
632
 
617
 
 
618
 
 
619
633
def update_params_file(file_path, updates):
620
634
    lines = []
621
635
    if os.path.exists(file_path):
663
677
    outputRes = (f"Applied setting changes: {updates}")
664
678
    printlog(outputRes)
665
679
 
 
680
 
 
681
def apply_context_builder(input_text: str) -> str:
 
682
    if not parameters.get("use_context_builder", False):
 
683
        return input_text
 
684
 
 
685
    context_lines = load_context_builder_lines()
 
686
    if not context_lines:
 
687
        return input_text
 
688
 
 
689
    max_lines = parameters.get("context_builder_max_lines", 10)
 
690
    limited_context = context_lines[-max_lines:]
 
691
    context_block = "\n".join(limited_context)
 
692
 
 
693
    printlog(f"\n📚 Injecting {len(limited_context)} context builder line(s):\n{context_block}")
 
694
 
 
695
    return (
 
696
        f"{parameters.get('context_builder_prompt', '')}\n"
 
697
        f"{context_block}\n"
 
698
        f"{parameters.get('context_builder_prompt_post', '')}\n"
 
699
        f"{input_text}"
 
700
    )
 
701
 
 
702
def apply_rag(input_text: str) -> str:
 
703
    if input_text.startswith("*EVENT"):
 
704
        return input_text
 
705
 
 
706
    query = input_text.partition(":")[2].strip()
 
707
    if not (query.startswith("@@") or query.endswith("?")):
 
708
        return input_text
 
709
 
 
710
    matches = search_all_contexts(query, top_k=1)
 
711
    if not matches:
 
712
        return input_text
 
713
 
 
714
    rag_context = "\n\n".join(f"[{ctx}] {text}" for ctx, text, _ in matches)
 
715
    printlog("\n📚 Injecting RAG context:\n" + rag_context)
 
716
 
 
717
    return (
 
718
        f"{input_text}\n\n"
 
719
        f"{parameters['rag_prompt']}\n"
 
720
        f"{rag_context}"
 
721
    )
 
722
 
666
723
def process_line(line) -> bool:
667
724
    global history
 
725
    processing_reason = None
 
726
    smart_override = False
 
727
 
 
728
    if parameters.get("smart_processor", False):
 
729
        try:
 
730
            context_lines = load_context_builder_lines(update_tracker=False)
 
731
            for cline in reversed(context_lines):
 
732
                if "Round ended." in cline or "Round started." in cline:
 
733
                    match_total = re.search(r'Player Count: (\d+)', cline)
 
734
                    match_specs = re.search(r'Spectator Count: (\d+)', cline)
 
735
                    if match_total and match_specs:
 
736
                        total = int(match_total.group(1))
 
737
                        specs = int(match_specs.group(1))
 
738
                        active = total - specs
 
739
                        threshold = parameters.get("smart_processor_active_players", 1)
 
740
                        if active <= threshold:
 
741
                            smart_override = True
 
742
                            processing_reason = f"Smart override (active={active} <= threshold={threshold})"
 
743
                            printlog(f"🔓 SMART OVERRIDE ACTIVE — {processing_reason}")
 
744
                        else:
 
745
                            printlog(f"🚫 SMART OVERRIDE SKIPPED (active={active}, threshold={threshold})")
 
746
                    break
 
747
        except Exception as e:
 
748
            printlog(f"⚠️ Smart processor failed to parse context lines: {e}")
668
749
 
669
750
    line = line.lstrip()
670
 
    
 
751
 
671
752
    if "-->" in line[:35]:
672
753
        colon_index = line.find(":")
673
754
        if colon_index != -1:
674
755
            content = line[colon_index + 1:].strip().lower()
675
 
 
676
 
            has_keyword = any(
677
 
                keyword.lower() in content for keyword in parameters.get("process_lines_containing", [])
678
 
            )
679
 
 
680
 
            has_prefix = any(
681
 
                content.startswith(pref.lower()) for pref in parameters.get("command_prefix", [])
682
 
            )
683
 
 
 
756
            has_keyword = any(keyword.lower() in content for keyword in parameters.get("process_lines_containing", []))
 
757
            has_prefix = any(content.startswith(pref.lower()) for pref in parameters.get("command_prefix", []))
684
758
            if not (has_keyword or has_prefix):
685
759
                return False
686
760
 
687
 
 
688
761
    if line.startswith("*EVENT"):
689
 
        command = line
 
762
        processing_reason = "Event line"
 
763
        ollama_input = line
 
764
 
690
765
    else:
691
766
        if "*==settingchange==*" in line:
692
767
            updates = parse_setting_change(line)
693
768
            if updates:
694
769
                update_params_file(PARAMS_FILE, updates)
695
770
                extract_parameters()
 
771
                printlog("🔧 Line processed due to setting change.")
696
772
            return True
 
773
 
697
774
        if ':' not in line:
698
775
            return False
 
776
 
699
777
        sender, _, rest = line.partition(':')
700
778
        rest = rest.lstrip()
701
779
 
704
782
 
705
783
        lw = rest.lower()
706
784
        for prefix in parameters.get("ignore_words_starts_with", []):
707
 
            if prefix and lw.startswith(prefix.lower()):
 
785
            if lw.startswith(prefix.lower()):
708
786
                return False
709
787
        for word in parameters.get("ignore_words_exact", []):
710
 
            if word and lw == word.lower():
 
788
            if lw == word.lower():
711
789
                return False
712
790
        for substr in parameters.get("ignore_words_contains", []):
713
 
            if substr and substr.lower() in lw:
 
791
            if substr.lower() in lw:
714
792
                return False
715
793
 
716
 
 
717
794
        if ": !!reset" in line:
718
795
            extract_parameters()
719
796
            history = get_default_history()
720
797
            with open(HISTORY_FILE, 'w', encoding='utf-8') as f:
721
798
                json.dump(history, f, indent=4)
722
799
            output_response(line, "History cleared.", bypass_processing=True)
 
800
            printlog("🔁 History reset command processed.")
723
801
            return True
724
 
        
725
 
        force_process = False
726
 
        for keyword in parameters.get("process_lines_containing", []):
727
 
            if keyword.lower() in line.lower():
728
 
                force_process = True
729
 
                break
730
802
 
731
 
        if parameters.get("process_all_lines", False) or sender in parameters.get("always_processed_players", []) or force_process:
732
 
            command = line
 
803
        if parameters.get("process_all_lines", False):
 
804
            processing_reason = "process_all_lines is enabled"
 
805
            ollama_input = line
 
806
        elif sender in parameters.get("always_processed_players", []):
 
807
            processing_reason = f"Sender '{sender}' is in always_processed_players"
 
808
            ollama_input = line
 
809
        elif any(keyword.lower() in line.lower() for keyword in parameters.get("process_lines_containing", [])):
 
810
            processing_reason = "Matched keyword in process_lines_containing"
 
811
            ollama_input = line
 
812
        elif smart_override:
 
813
            ollama_input = line 
733
814
        else:
734
815
            matched = None
735
816
            for pref in parameters.get("command_prefix", []):
736
817
                if rest.startswith(pref):
737
818
                    matched = pref
738
819
                    break
739
 
            if not matched:
 
820
            if matched:
 
821
                processing_reason = f"Matched command prefix '{matched}'"
 
822
                msg = rest[len(matched):].lstrip()
 
823
                ollama_input = f"{sender}: {msg}"
 
824
            else:
740
825
                return False
741
 
            msg = rest[len(matched):].lstrip()
742
 
            command = f"{sender}: {msg}"
743
 
 
744
 
 
745
 
 
746
 
    rag_context = ""
747
 
    if not command.startswith("*EVENT"):
748
 
        query = command.partition(":")[2].strip()
749
 
        if query.startswith("@@")  or query.endswith("?"):# or any(w in query.lower() for w in ["how", "what", "why", "when", "where"]):
750
 
            matches = search_all_contexts(query, top_k=1)
751
 
            if matches:
752
 
                rag_context = "\n\n".join(f"[{ctx}] {text}" for ctx, text, _ in matches)
753
 
                printlog("\n📚 Injecting RAG context:\n" + rag_context)
754
 
 
755
 
    if rag_context:
756
 
        command = (
757
 
            f"{command}\n\n"
758
 
            f"{parameters['rag_prompt']}\n"
759
 
            f"{rag_context}"
760
 
        )
761
 
 
762
 
 
763
 
    if parameters.get("use_context_builder", False):
764
 
        context_lines = load_context_builder_lines()
765
 
        max_lines = parameters.get("context_builder_max_lines", 10)
766
 
 
767
 
        if context_lines:
768
 
            limited_context = context_lines[-max_lines:] 
769
 
            context_block = "\n".join(limited_context)
770
 
            printlog(f"\n📚 Injecting {len(limited_context)} context builder line(s):\n" + context_block)
771
 
            command = f"\n{parameters.get("context_builder_prompt", '')}\n{context_block}\n{parameters.get('context_builder_prompt_post', '')}\n{command}"
772
 
 
773
 
 
774
 
    printlog("🔷Final command:\n" + command)
775
 
    response = send_to_ollama(command)
776
 
 
777
 
    chat_mode = not command.startswith("*EVENT")
 
826
 
 
827
    if processing_reason:
 
828
        printlog(f"✅ Line is being processed because: {processing_reason}")
 
829
 
 
830
    ollama_input = apply_context_builder(ollama_input)
 
831
    ollama_input = apply_rag(ollama_input)
 
832
 
 
833
    printlog("🔷Final ollama_input:\n" + ollama_input)
 
834
 
 
835
    response = send_to_ollama(ollama_input)
 
836
    chat_mode = not ollama_input.startswith("*EVENT")
778
837
 
779
838
    if parameters.get("announce_status", False):
780
839
        printlog("Got Response:\n" + json.dumps(response, indent=4))
786
845
 
787
846
    printlog(
788
847
        f"\nProcess complete\n"
789
 
        f" - total duration: {total_s}\n"
790
 
        f" - tokens in prompt: {tokens_in_prompt}\n"
 
848
        f" - total duration:     {total_s}\n"
 
849
        f" - tokens in prompt:   {tokens_in_prompt}\n"
791
850
        f" - tokens in response: {tokens_in_response}\n"
792
 
        f" - response: {ollama_response}"
 
851
        f" - response:           {ollama_response.replace('\r\n', '\n').replace('\n', ' ')}"
793
852
    )
794
853
 
795
854
    if chat_mode:
796
 
        update_history(command, ollama_response)
 
855
        update_history(ollama_input, ollama_response)
797
856
    else:
798
 
        evt = command.replace("*EVENT", "").strip()
 
857
        evt = ollama_input.replace("*EVENT", "").strip()
799
858
        update_history(evt, ollama_response)
800
859
 
801
 
    if parameters.get("chatbot_processing", False) and parameters.get("local_mode", False):
802
 
        printlog("\nSetting all chatting to 1")
803
 
        with open(OUTPUT_FILE, 'a', encoding='utf-8') as f:
804
 
            f.write("SET_ALL_CHATTING 1\n")
805
 
 
806
 
    output_response(command, ollama_response)
 
860
    output_response(ollama_input, ollama_response)
807
861
    return True
808
862
 
809
863
MAX_WORDS = 200
1082
1136
            last_offset = f.tell()
1083
1137
 
1084
1138
        for line in new_lines:
 
1139
 
1085
1140
            if line.startswith("*==settingchange==*"):
1086
1141
                printlog(f"\n{header_bar}\n{get_timestamp()}Processing setting change:\n{line}")
1087
1142
                process_line(line)