Compare commits
4 Commits
4e959438ff
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b01f5e7a30 | ||
| 19117525ed | |||
| 1c6ea8869a | |||
| aadb000710 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,7 @@
|
||||
# ---> Python
|
||||
log_!*.json
|
||||
config.json
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
|
||||
@@ -39,6 +39,10 @@ class MeshtasticController:
|
||||
"mute_public_commands": True,
|
||||
"print_all_packets": False,
|
||||
}
|
||||
self.config_file = self.get_config_file_path()
|
||||
if os.path.exists(self.config_file):
|
||||
self.ollama_url = self.load_config(self.config_file, 'url')
|
||||
self.light_url = self.load_config(self.config_file, 'lightwebhook')
|
||||
self.ollama_host = "http://localhost:11434/api/chat"
|
||||
self.ollama_url = ''
|
||||
self.ollama_model = 'qwen2.5-coder:0.5b'
|
||||
@@ -75,11 +79,36 @@ class MeshtasticController:
|
||||
"!llm": self.handle_llm,
|
||||
"hey": self.handle_hey,
|
||||
"\N{LIZARD}": self.handle_lizard,
|
||||
"!light": self.handle_light,
|
||||
}
|
||||
self.private_commands = set(self.commands.keys()) - self.public_commands
|
||||
self.config_file = self.get_config_file_path()
|
||||
if os.path.exists(self.config_file):
|
||||
self.ollama_url = self.load_config(self.config_file, 'url')
|
||||
|
||||
|
||||
|
||||
def handle_light(self, message, from_node, packet):
|
||||
"""Runs the message through ollama."""
|
||||
try:
|
||||
# URL to which the request will be sent
|
||||
url = self.light_url
|
||||
|
||||
# Data payload for POST request (optional)
|
||||
data = {
|
||||
"key": "value",
|
||||
"message": message
|
||||
}
|
||||
# Sending a GET request
|
||||
response = requests.get(url)
|
||||
if response.status_code == 200:
|
||||
#print("GET Request Successful")
|
||||
#print(response.json())
|
||||
self.schedule_command_message("Toggled light", from_node)
|
||||
else:
|
||||
error_message = f"Error: {response.status_code} - {response.text}"
|
||||
self.schedule_command_message(error_message, from_node)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# Handle any errors that occur during the subprocess execution
|
||||
error_message = f"Error running LLM: {e.stderr}"
|
||||
self.schedule_command_message(error_message, from_node)
|
||||
|
||||
def load_logs(self):
|
||||
# Load existing logs from files into memory
|
||||
@@ -193,12 +222,12 @@ class MeshtasticController:
|
||||
config_file = "config.json"
|
||||
return os.path.join(current_dir, config_file)
|
||||
|
||||
def load_config(self, file, key):
|
||||
if os.path.exists(file):
|
||||
with open(file, 'r') as self.config_file:
|
||||
config = json.load(self.config_file)
|
||||
value = config[key]
|
||||
if self.config["debug"]: print(value)
|
||||
def load_config(self, file_path, key):
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, 'r') as config_file:
|
||||
config = json.load(config_file)
|
||||
value = config.get(key)
|
||||
if self.config.get("debug", False): print(value)
|
||||
return value
|
||||
|
||||
def handle_lizard(self, message, from_node, packet):
|
||||
@@ -522,7 +551,7 @@ class MeshtasticController:
|
||||
"""Callback function for handling received packets and processing commands."""
|
||||
try:
|
||||
if self.config["print_all_packets"]: print(f"\n{packet}")
|
||||
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP':
|
||||
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP' and packet['toId'] != '^all':
|
||||
message_bytes = packet['decoded']['payload']
|
||||
message_string = message_bytes.decode('utf-8')
|
||||
from_node = packet['fromId'] # Get the node ID of the sender
|
||||
|
||||
1
prep.sh
1
prep.sh
@@ -1,3 +1,4 @@
|
||||
python -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install meshtastic
|
||||
cp config.json.example config.json
|
||||
@@ -5,8 +5,12 @@ source venv/bin/activate
|
||||
pip install meshtastic
|
||||
```
|
||||
|
||||
If using a remote llama connection update config.json to be the remote llama link.
|
||||
If using a remote llama connection rename config.json.example to config.json, and update config.json to be the remote llama link.
|
||||
Going to the correct address in a browser will return "405 method not allowed"
|
||||
```
|
||||
cp config.json.example config.json
|
||||
```
|
||||
|
||||
|
||||
Run Python Meshroller
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user