Minecraft接入ChatGLM3大语言模型

运行平台:网易我的世界基岩版开发包

由于本地没有优秀的显卡快速推理,于是运用一台远程服务器进行推理,并将结果传回本地。

流程:Mod Python2.7 -> 本地Python3 -> 远程SSH服务器

py2:subprocess 库 py3:paramiko 库

主要踩坑:IO流,即时输出,多线程

ModAPI

# -*- coding: utf-8 -*-
import os
import subprocess
import threading
from QuModLibs.Server import *
from globals import *
import math
import random


# Code
@Listen('LoadServerAddonScriptsAfter')
def OnServerScriptLoaded(args):
    loaded_entity = compFactory.CreateGame(levelId).GetLoadActors()
    SetLoadEntityList(loaded_entity)
    loaded_item = compFactory.CreateItem(levelId).GetLoadItems(True)
    SetLoadItemList(loaded_item)


def extract_between_split(text, start, end):
    try:
        start_part = text.split(start, 1)[1]
        return start_part.split(end, 1)[0]
    except IndexError:
        return None


class Config:
    ai_attack_chat = False
    ai_cmd = False
    ai_eat = False
    is_chat_ai_map = {}
    global_process = None
    stop_flag = threading.Event()


def read_output(playerId):
    process = Config.global_process
    if process is not None:
        while not Config.stop_flag.is_set():
            print '持续读取输出'
            output = process.stdout.readline().strip()
            if output:
                if output.startswith('输入问题:'):
                    print output
                    continue
                print output
                set_timer(0, notify_one_msg, playerId, output)
            # if output == '' and process.poll() is not None:
            #     print '主动退出'
            #     break
        print ('输出线程执行完毕。已关闭实时输出。')


@Listen(Events.ServerChatEvent)
def ServerChatEvent(args):
    data = Events.ServerChatEvent(args)
    playerId = data.playerId
    msg = data.message

    if msg == 'ai':
        args['cancel'] = True
        notify_one_msg(playerId, '已开启AI功能')
        Config.is_chat_ai_map[playerId] = True
        Config.stop_flag = threading.Event()
        # 1、将全局的stdin,stdout赋值
        if Config.global_process is None:
            path = 'D:\\NEMO\\Project\\PycharmProjects\\pythonProject\\Test\\Demo02.py'
            process = subprocess.Popen(['python', path], stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE, env=dict(os.environ, PYTHONIOENCODING='utf-8'),
                                       universal_newlines=True)
            Config.global_process = process
            # 2、开启一个线程来获取输出
            output_thread = threading.Thread(target=read_output, args=(playerId,))
            output_thread.start()
            print '开启输出线程'
    elif msg == 'stop ai':
        args['cancel'] = True
        stop_ai_chat(playerId)
    elif msg == 'stop':
        args['cancel'] = True
        stop_ai_chat(playerId)
    elif msg == 'ai attack chat':
        args['cancel'] = True
        if Config.is_chat_ai_map.get(playerId):
            Config.ai_attack_chat = not Config.ai_attack_chat
            if Config.ai_attack_chat:
                clear_history()
                send_ai_msg(
                    '以下的对话中,我会给你发送一个游戏Minecraft中的某个生物的英文字符串,你需要假设你是该生物,你受到了玩家的攻击,直接输出一段简短的中文表达你的身份以及你的情感。')
                notify_one_msg(playerId, '已开启AI Attack Chat')
            else:
                clear_history()
                notify_one_msg(playerId, '已关闭AI Attack Chat')
        else:
            notify_one_msg(playerId, '未开启ai功能。')
    elif msg == 'ai eat':
        args['cancel'] = True
        if Config.is_chat_ai_map.get(playerId):
            Config.ai_eat = not Config.ai_eat
            if Config.ai_eat:
                clear_history()
                send_ai_msg(
                    '以下的对话中,我会给你发送一个游戏Minecraft中的某个食物的英文字符串,你可以使用中文十分简单地介绍一下这个食物,回复的时候用“你吃下的食物是”来开头。')
                notify_one_msg(playerId, '已开启AI Eat Chat')
            else:
                clear_history()
                notify_one_msg(playerId, '已关闭AI Eat Chat')
        else:
            notify_one_msg(playerId, '未开启ai功能。')
    elif msg == 'ai cmd':
        args['cancel'] = True
        if Config.is_chat_ai_map.get(playerId):
            Config.ai_cmd = not Config.ai_cmd
            if Config.ai_cmd:
                clear_history()
                send_ai_msg(
                    '以下的对话中,我会告诉你我的需求,你根据我的需求输出一段Minecraft的指令。例如我输入:"给予我64颗钻石",你应该输出:"命令:/give @s diamond 64"。')
                notify_one_msg(playerId, '已开启AI CMD')
            else:
                clear_history()
                notify_one_msg(playerId, '已关闭AI CMD')
        else:
            notify_one_msg(playerId, '未开启ai功能。')
    elif Config.is_chat_ai_map.get(playerId, False):
        content = msg
        send_ai_msg(content)


def clear_history():
    send_ai_msg('clear history')


def stop_ai_chat(playerId):
    if Config.global_process is not None:
        Config.global_process.stdin.write('stop' + '\n')
        Config.global_process.stdin.flush()
        Config.global_process = None
    notify_one_msg(playerId, '已关闭AI对话')
    Config.is_chat_ai_map[playerId] = False
    # 通过设置标志位来关闭输出线程
    Config.stop_flag.set()


# TODO 没考虑多人模式
def send_ai_msg(content):
    if Config.global_process is not None:
        print '用户向stdin写入', content
        Config.global_process.stdin.write(content + '\n')
        Config.global_process.stdin.flush()
    else:
        print '未加载ai模型'


@Listen(Events.ActuallyHurtServerEvent)
def ActuallyHurtServerEvent(args):
    if Config.ai_attack_chat:
        data = Events.ActuallyHurtServerEvent(args)
        type_str = get_type_str(data.entityId)
        if data.srcId in get_player_list():
            # msg_to_send = '假如你是' + type_str.split(":")[1] + ',你受到了来自玩家的攻击,请输出一段简短的中文来表达你的身份和情感。直接输出这句话即可。'
            send_ai_msg(type_str.split(":")[1])


@Listen('PlayerIntendLeaveServerEvent')
def PlayerIntendLeaveServerEvent(args):
    stop_ai_chat(args['playerId'])


# AI 饮食
@Listen('PlayerEatFoodServerEvent')
def PlayerEatFoodServerEvent(args):
    if Config.ai_eat:
        playerId = args['playerId']
        itemDict = args['itemDict']
        FoodName = itemDict['newItemName'].split(":")[1]
        send_ai_msg(FoodName)


# ServerModConfig

本地 Python3(需要 pip paramiko库)

import sys
import time
​
import paramiko
​
# 使用示例
hostname = 'xxx'
port = xxx
username = 'xxx'
password = 'xxx'
commands = [
    'xxx/nemo_llm/bin/python xxx/ChatGLM3-6B/ChatGLM3-main/basic_demo/demo02.py'
]
​
# 创建 SSH 客户端实例
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, port, username, password)
​
# 执行远程脚本
stdin, stdout, stderr = ssh.exec_command(commands[0], get_pty=True)
​
print('已建立连接,正在加载模型,请稍等...')
​
flag_found = False
​
​
def send_input(stdin):
    """ 向远程脚本发送用户输入 """
    while True:
        # print('用户输入:', end='')
        user_input = input()
        if flag_found:
            # '这个换行不能丢'
            stdin.write(user_input + '\n')
            # stdin.flush()
​
            if user_input.strip() == 'stop':
                break
        else:
            print('模型加载中,请加载完毕后再进行输入。')
​
​
def read_output(stdout):
    global flag_found
    """ 实时读取远程脚本的标准输出,直到检测到特定标志 """
    while not stdout.channel.exit_status_ready():
        line = stdout.readline()
        if "READY_TO_CHAT" in line:
            flag_found = True
            print("模型加载完成,开始接收对话内容。")
            sys.stdout.flush()
        elif flag_found:
            print(line, end='')
            sys.stdout.flush()
        # while not stdout.channel.exit_status_ready():
        #     if stdout.channel.recv_ready():
        #         output = stdout.channel.recv(1024).decode()
        #         if "READY_TO_CHAT" in output:
        #             flag_found = True
        #             print("模型加载完成,开始接收对话内容。")
        #         elif flag_found:
        #             # print(output, end='')
        #             print(output)
​
        # 读取错误输出(如果有)
        # if stderr.channel.recv_ready():
        #     error = stderr.channel.recv(1024).decode()
        #     print('错误:' + error, end='')
​
        time.sleep(1)
​
​
# 启动读取和发送输入的线程
import threading
​
# 启动一个线程来读取远程脚本的输出
output_thread = threading.Thread(target=read_output, args=(stdout,))
output_thread.start()
​
# 启动一个线程来发送用户输入
send_input(stdin)
​
# 等待输出线程结束
output_thread.join()
​
# 关闭 SSH 连接
ssh.close()

SSH服务器上的python代码

from peft import AutoPeftModelForCausalLM, PeftModel
from transformers import AutoTokenizer,AutoModel, AutoModelForCausalLM
import os
​
MODEL_PATH = os.environ.get('MODEL_PATH', 'xxx/LLMs/chatglm3-6b')
TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
​
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval()
​
history = []
​
print('READY_TO_CHAT')
​
# 连续对话
while True:
    question = input('输入问题:')
    if question == 'stop':
        break
    response, history = model.chat(tokenizer, question, history=history)
    print(response)

猜你喜欢

转载自blog.csdn.net/qq_42817985/article/details/142738294