id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,290,000 | cifar100.py | enriquetomasmb_nebula/nebula/core/datasets/cifar100/cifar100.py | from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torchvision.datasets import CIFAR100
import os
class CIFAR100Dataset(NebulaDataset):
def __init__(
self,
num_classes=100,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load CIFAR100 train dataset
if self.train_set is None:
self.train_set = self.load_cifar100_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_cifar100_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_cifar100_dataset(self, train=True):
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
apply_transforms = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std, inplace=True),
]
)
return CIFAR100(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "data"),
train=train,
download=True,
transform=apply_transforms,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 4,115 | Python | .py | 87 | 36.908046 | 124 | 0.642018 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,001 | tasks.py | enriquetomasmb_nebula/nebula/core/utils/tasks.py | import asyncio
import logging
async def debug_tasks():
while True:
tasks = asyncio.all_tasks()
logging.info(f"Active tasks: {len(tasks)}")
for task in tasks:
logging.info(f"Task: {task}")
await asyncio.sleep(5)
| 261 | Python | .py | 9 | 22.444444 | 51 | 0.624 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,002 | certificate.py | enriquetomasmb_nebula/nebula/core/utils/certificate.py | import os
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import Encoding, NoEncryption, PrivateFormat, load_pem_private_key
from cryptography.hazmat.backends import default_backend
import datetime
import os
import ipaddress
def generate_ca_certificate(dir_path):
keyfile_path = os.path.join(dir_path, "ca_key.pem")
certfile_path = os.path.join(dir_path, "ca_cert.pem")
if os.path.exists(keyfile_path) and os.path.exists(certfile_path):
print("CA Certificate and key already exist")
return keyfile_path, certfile_path
# Generate certfile and keyfile for the CA (pem format)
ca_private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
ca_issuer = x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, "ES"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "Spain"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "Murcia"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Nebula"),
x509.NameAttribute(NameOID.COMMON_NAME, "ca.nebula"),
]
)
valid_from = datetime.datetime.utcnow()
valid_to = valid_from + datetime.timedelta(days=365)
cert = (
x509.CertificateBuilder()
.subject_name(ca_issuer)
.issuer_name(ca_issuer)
.public_key(ca_private_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to)
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True)
.sign(ca_private_key, hashes.SHA256(), default_backend())
)
with open(keyfile_path, "wb") as f:
f.write(
ca_private_key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption(),
)
)
with open(certfile_path, "wb") as f:
f.write(cert.public_bytes(Encoding.PEM))
return keyfile_path, certfile_path
def generate_certificate(dir_path, node_id, ip):
keyfile_path = os.path.join(dir_path, f"{node_id}_key.pem")
certfile_path = os.path.join(dir_path, f"{node_id}_cert.pem")
ip_obj = ipaddress.ip_address(ip)
if os.path.exists(keyfile_path) and os.path.exists(certfile_path):
print("Certificate and key already exist")
return keyfile_path, certfile_path
with open(os.path.join(dir_path, "ca_key.pem"), "rb") as f:
ca_private_key = load_pem_private_key(f.read(), password=None)
with open(os.path.join(dir_path, "ca_cert.pem"), "rb") as f:
ca_cert = x509.load_pem_x509_certificate(f.read())
# Generate certfile and keyfile for the participant to use in the federation (pem format)
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
subject = x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "California"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "San Francisco"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Nebula"),
x509.NameAttribute(NameOID.COMMON_NAME, f"{node_id}.nebula"),
]
)
valid_from = datetime.datetime.utcnow()
valid_to = valid_from + datetime.timedelta(days=365)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(ca_cert.subject)
.public_key(private_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("localhost"), x509.IPAddress(ip_obj)]),
critical=False,
)
.sign(ca_private_key, hashes.SHA256(), default_backend())
)
with open(keyfile_path, "wb") as f:
f.write(
private_key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption(),
)
)
with open(certfile_path, "wb") as f:
f.write(cert.public_bytes(Encoding.PEM))
return keyfile_path, certfile_path
if __name__ == "__main__":
current_dir = os.getcwd()
generate_certificate(os.path.join(current_dir, "certs"))
| 4,636 | Python | .py | 103 | 36.980583 | 116 | 0.665114 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,003 | nebulalogger.py | enriquetomasmb_nebula/nebula/core/utils/nebulalogger.py | from aim.pytorch_lightning import AimLogger
from datetime import datetime
import logging
from lightning.pytorch.utilities import rank_zero_only
from aim.sdk.run import Run
from aim import Image
from lightning.pytorch.loggers.logger import rank_zero_experiment
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.engine import Engine
class NebulaLogger(AimLogger):
def __init__(self, config, engine: "Engine", scenario_start_time, *args, **kwargs):
self.config = config
self.engine = engine
self.scenario_start_time = scenario_start_time
self.local_step = 0
self.global_step = 0
super().__init__(*args, **kwargs)
def finalize(self, status: str = "") -> None:
super().finalize(status)
logging.info(f"Finalizing logger: {status}")
def get_step(self):
return int((datetime.now() - datetime.strptime(self.scenario_start_time, "%d/%m/%Y %H:%M:%S")).total_seconds())
def log_data(self, data, step=None):
time_start = datetime.now()
try:
logging.debug(f"Logging data: {data}")
super().log_metrics(data)
except Exception as e:
logging.error(f"Error logging statistics data [{data}]: {e}")
logging.debug(f"Time taken to log data: {datetime.now() - time_start}")
def log_figure(self, figure, step=None, name=None):
time_start = datetime.now()
try:
logging.debug(f"Logging figure: {name}")
self.experiment.track(Image(figure), name=name)
except Exception as e:
logging.error(f"Error logging figure: {e}")
logging.debug(f"Time taken to log figure: {datetime.now() - time_start}")
| 1,723 | Python | .py | 39 | 37.025641 | 119 | 0.659905 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,004 | helper.py | enriquetomasmb_nebula/nebula/core/utils/helper.py | import logging
from typing import OrderedDict, List, Optional
import copy
import torch
def cosine_metric2(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
logging.info("Cosine similarity cannot be computed due to missing model")
return None
cos_similarities = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
if l1.shape != l2.shape:
# Adjust the shape of the smaller layer to match the larger layer
min_len = min(l1.shape[0], l2.shape[0])
l1, l2 = l1[:min_len], l2[:min_len]
cos_sim = torch.nn.functional.cosine_similarity(l1.unsqueeze(0), l2.unsqueeze(0), dim=1)
cos_similarities.append(cos_sim.item())
if cos_similarities:
avg_cos_sim = torch.mean(torch.tensor(cos_similarities))
# result = torch.clamp(avg_cos_sim, min=0).item()
# return result
return avg_cos_sim.item() if similarity else (1 - avg_cos_sim.item())
else:
return None
def cosine_metric(model1: OrderedDict, model2: OrderedDict, similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
logging.info("Cosine similarity cannot be computed due to missing model")
return None
cos_similarities: List = []
for layer in model1:
if layer in model2:
l1 = model1[layer].to("cpu")
l2 = model2[layer].to("cpu")
if l1.shape != l2.shape:
# Adjust the shape of the smaller layer to match the larger layer
min_len = min(l1.shape[0], l2.shape[0])
l1, l2 = l1[:min_len], l2[:min_len]
cos = torch.nn.CosineSimilarity(dim=l1.dim() - 1)
cos_mean = torch.mean(cos(l1.float(), l2.float())).mean()
cos_similarities.append(cos_mean)
else:
logging.info("Layer {} not found in model 2".format(layer))
if cos_similarities:
cos = torch.Tensor(cos_similarities)
avg_cos = torch.mean(cos)
relu_cos = torch.nn.functional.relu(avg_cos) # relu to avoid negative values
return relu_cos.item() if similarity else (1 - relu_cos.item())
else:
return None
def euclidean_metric(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], standardized: bool = False, similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
return None
distances = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
if standardized:
l1 = (l1 - l1.mean()) / l1.std()
l2 = (l2 - l2.mean()) / l2.std()
distance = torch.norm(l1 - l2, p=2)
if similarity:
norm_sum = torch.norm(l1, p=2) + torch.norm(l2, p=2)
similarity_score = 1 - (distance / norm_sum if norm_sum != 0 else 0)
distances.append(similarity_score.item())
else:
distances.append(distance.item())
if distances:
avg_distance = torch.mean(torch.tensor(distances))
return avg_distance.item()
else:
return None
def minkowski_metric(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], p: int, similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
return None
distances = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
distance = torch.norm(l1 - l2, p=p)
if similarity:
norm_sum = torch.norm(l1, p=p) + torch.norm(l2, p=p)
similarity_score = 1 - (distance / norm_sum if norm_sum != 0 else 0)
distances.append(similarity_score.item())
else:
distances.append(distance.item())
if distances:
avg_distance = torch.mean(torch.tensor(distances))
return avg_distance.item()
else:
return None
def manhattan_metric(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
return None
distances = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
distance = torch.norm(l1 - l2, p=1)
if similarity:
norm_sum = torch.norm(l1, p=1) + torch.norm(l2, p=1)
similarity_score = 1 - (distance / norm_sum if norm_sum != 0 else 0)
distances.append(similarity_score.item())
else:
distances.append(distance.item())
if distances:
avg_distance = torch.mean(torch.tensor(distances))
return avg_distance.item()
else:
return None
def pearson_correlation_metric(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
return None
correlations = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
if l1.shape != l2.shape:
min_len = min(l1.shape[0], l2.shape[0])
l1, l2 = l1[:min_len], l2[:min_len]
correlation = torch.corrcoef(torch.stack((l1, l2)))[0, 1]
if similarity:
adjusted_similarity = (correlation + 1) / 2
correlations.append(adjusted_similarity.item())
else:
correlations.append(1 - (correlation + 1) / 2)
if correlations:
avg_correlation = torch.mean(torch.tensor(correlations))
return avg_correlation.item()
else:
return None
def jaccard_metric(model1: OrderedDict[str, torch.Tensor], model2: OrderedDict[str, torch.Tensor], similarity: bool = False) -> Optional[float]:
if model1 is None or model2 is None:
return None
jaccard_scores = []
for layer in model1:
if layer in model2:
l1 = model1[layer].flatten()
l2 = model2[layer].flatten()
intersection = torch.sum(torch.min(l1, l2))
union = torch.sum(torch.max(l1, l2))
jaccard_sim = intersection / union if union != 0 else 0
if similarity:
jaccard_scores.append(jaccard_sim.item())
else:
jaccard_scores.append(1 - jaccard_sim.item())
if jaccard_scores:
avg_jaccard = torch.mean(torch.tensor(jaccard_scores))
return avg_jaccard.item()
else:
return None
def normalise_layers(untrusted_params, trusted_params):
trusted_norms = dict([k, torch.norm(trusted_params[k].data.view(-1).float())] for k in trusted_params.keys())
normalised_params = copy.deepcopy(untrusted_params)
state_dict = copy.deepcopy(untrusted_params)
for layer in untrusted_params:
layer_norm = torch.norm(state_dict[layer].data.view(-1).float())
scaling_factor = min(layer_norm / trusted_norms[layer], 1)
logging.debug("Layer: {} ScalingFactor {}".format(layer, scaling_factor))
# logging.info("Scaling client {} layer {} with factor {}".format(client, layer, scaling_factor))
normalised_layer = torch.mul(state_dict[layer], scaling_factor)
normalised_params[layer] = normalised_layer
return normalised_params
| 7,803 | Python | .py | 168 | 36.422619 | 174 | 0.608805 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,005 | deterministic.py | enriquetomasmb_nebula/nebula/core/utils/deterministic.py | import logging
import numpy as np
import os
import random
import torch
def enable_deterministic(config):
seed = config.participant["scenario_args"]["random_seed"]
logging.info("Fixing randomness with seed {}".format(seed))
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 512 | Python | .py | 16 | 28.375 | 63 | 0.748988 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,006 | locker.py | enriquetomasmb_nebula/nebula/core/utils/locker.py | import threading
import logging
import inspect
import asyncio
class Locker:
def __init__(self, name, verbose=True, async_lock=False, *args, **kwargs):
self._name = name
self._verbose = verbose
self._async_lock = async_lock
if async_lock:
self._lock = asyncio.Lock(*args, **kwargs)
else:
self._lock = threading.Lock(*args, **kwargs)
def acquire(self, *args, **kwargs):
caller = inspect.stack()[1]
filename = caller.filename.split("/")[-1]
lineno = caller.lineno
if self._verbose:
if "timeout" in kwargs:
logging.debug(f"🔒 Acquiring lock [{self._name}] from {filename}:{lineno} with timeout {kwargs['timeout']}")
else:
logging.debug(f"🔒 Acquiring lock [{self._name}] from {filename}:{lineno}")
if self._async_lock:
raise RuntimeError("Use 'await acquire_async' for acquiring async locks")
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
caller = inspect.stack()[1]
filename = caller.filename.split("/")[-1]
lineno = caller.lineno
if self._verbose:
logging.debug(f"🔓 Releasing lock [{self._name}] from {filename}:{lineno}")
self._lock.release()
def locked(self):
result = self._lock.locked()
if self._verbose:
logging.debug(f"� Lock [{self._name}] is locked? {result}")
return result
def __enter__(self):
if self._async_lock:
raise RuntimeError("Use 'async with' for acquiring async locks")
logging.debug(f"🔒 Acquiring lock [{self._name}] using [with] statement")
self.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._async_lock:
raise RuntimeError("Use 'async with' for releasing async locks")
logging.debug(f"🔓 Releasing lock [{self._name}] using [with] statement")
self.release()
async def acquire_async(self, *args, **kwargs):
caller = inspect.stack()[1]
filename = caller.filename.split("/")[-1]
lineno = caller.lineno
if not self._async_lock:
raise RuntimeError("Use 'acquire' for acquiring non-async locks")
if self._verbose:
logging.debug(f"🔒 Acquiring async lock [{self._name}] from {filename}:{lineno}")
await self._lock.acquire()
async def release_async(self, *args, **kwargs):
caller = inspect.stack()[1]
filename = caller.filename.split("/")[-1]
lineno = caller.lineno
if not self._async_lock:
raise RuntimeError("Use 'release' for releasing non-async locks")
if self._verbose:
logging.debug(f"🔓 Releasing async lock [{self._name}] from {filename}:{lineno}")
self._lock.release()
async def locked_async(self):
result = self._lock.locked()
if self._verbose:
logging.debug(f"� Async lock [{self._name}] is locked? {result}")
async def __aenter__(self):
logging.debug(f"🔒 Acquiring async lock [{self._name}] using [async with] statement")
await self.acquire_async()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
logging.debug(f"🔓 Releasing async lock [{self._name}] using [async with] statement")
await self.release_async()
| 3,476 | Python | .py | 77 | 36.077922 | 127 | 0.602306 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,007 | nebulalogger_tensorboard.py | enriquetomasmb_nebula/nebula/core/utils/nebulalogger_tensorboard.py | import logging
from lightning.pytorch.loggers import TensorBoardLogger
from datetime import datetime
class NebulaTensorBoardLogger(TensorBoardLogger):
def __init__(self, scenario_start_time, *args, **kwargs):
self.scenario_start_time = scenario_start_time
self.local_step = 0
self.global_step = 0
super().__init__(*args, **kwargs)
def get_step(self):
return int((datetime.now() - datetime.strptime(self.scenario_start_time, "%d/%m/%Y %H:%M:%S")).total_seconds())
def log_data(self, data, step=None):
if step is None:
step = self.get_step()
# logging.debug(f"Logging data for global step {step} | local step {self.local_step} | global step {self.global_step}")
try:
super().log_metrics(data, step)
except ValueError as e:
pass
except Exception as e:
logging.error(f"Error logging statistics data [{data}] for step [{step}]: {e}")
def log_metrics(self, metrics, step=None):
if step is None:
self.local_step += 1
step = self.global_step + self.local_step
# logging.debug(f"Logging metrics for global step {step} | local step {self.local_step} | global step {self.global_step}")
if "epoch" in metrics:
metrics.pop("epoch")
try:
super().log_metrics(metrics, step)
except Exception as e:
logging.error(f"Error logging metrics [{metrics}] for step [{step}]: {e}")
def log_figure(self, figure, step=None, name=None):
if step is None:
step = self.get_step()
try:
self.experiment.add_figure(name, figure, step)
except Exception as e:
logging.error(f"Error logging figure [{name}] for step [{step}]: {e}")
def get_logger_config(self):
return {"scenario_start_time": self.scenario_start_time, "local_step": self.local_step, "global_step": self.global_step}
def set_logger_config(self, logger_config):
if logger_config is None:
return
try:
self.scenario_start_time = logger_config["scenario_start_time"]
self.local_step = logger_config["local_step"]
self.global_step = logger_config["global_step"]
except Exception as e:
logging.error(f"Error setting logger config: {e}")
| 2,376 | Python | .py | 50 | 38.18 | 130 | 0.62063 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,008 | messages.py | enriquetomasmb_nebula/nebula/core/network/messages.py | import logging
from nebula.core.pb import nebula_pb2
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class MessagesManager:
def __init__(self, addr, config, cm: "CommunicationsManager"):
self.addr = addr
self.config = config
self.cm = cm
def generate_discovery_message(self, action, latitude=0.0, longitude=0.0):
message = nebula_pb2.DiscoveryMessage(
action=action,
latitude=latitude,
longitude=longitude,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.discovery_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
def generate_control_message(self, action, log="Control message"):
message = nebula_pb2.ControlMessage(
action=action,
log=log,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.control_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
def generate_federation_message(self, action, arguments=[], round=None):
logging.info(f"Building federation message with [Action {action}], arguments {arguments}, and round {round}")
message = nebula_pb2.FederationMessage(
action=action,
arguments=[str(arg) for arg in (arguments or [])],
round=round,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.federation_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
def generate_model_message(self, round, serialized_model, weight=1):
message = nebula_pb2.ModelMessage(
round=round,
parameters=serialized_model,
weight=weight,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.model_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
def generate_connection_message(self, action):
message = nebula_pb2.ConnectionMessage(
action=action,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.connection_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
def generate_reputation_message(self, reputation):
message = nebula_pb2.ReputationMessage(
reputation=reputation,
)
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.source = self.addr
message_wrapper.reputation_message.CopyFrom(message)
data = message_wrapper.SerializeToString()
return data
| 2,948 | Python | .py | 72 | 32.069444 | 117 | 0.664921 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,009 | discoverer.py | enriquetomasmb_nebula/nebula/core/network/discoverer.py | import asyncio
import logging
from nebula.addons.functions import print_msg_box
from typing import TYPE_CHECKING
from nebula.core.pb import nebula_pb2
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class Discoverer:
def __init__(self, addr, config, cm: "CommunicationsManager"):
print_msg_box(msg=f"Starting discoverer module...", indent=2, title="Discoverer module")
self.addr = addr
self.config = config
self.cm = cm
self.grace_time = self.config.participant["discoverer_args"]["grace_time_discovery"]
self.period = self.config.participant["discoverer_args"]["discovery_frequency"]
self.interval = self.config.participant["discoverer_args"]["discovery_interval"]
async def start(self):
asyncio.create_task(self.run_discover())
async def run_discover(self):
if self.config.participant["scenario_args"]["federation"] == "CFL":
logging.info("� Federation is CFL. Discoverer is disabled...")
return
await asyncio.sleep(self.grace_time)
while True:
if len(self.cm.connections) > 0:
latitude = self.config.participant["mobility_args"]["latitude"]
longitude = self.config.participant["mobility_args"]["longitude"]
message = self.cm.mm.generate_discovery_message(action=nebula_pb2.DiscoveryMessage.Action.DISCOVER, latitude=latitude, longitude=longitude)
try:
logging.debug(f"� Sending discovery message to neighbors...")
current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
await self.cm.send_message_to_neighbors(message, current_connections, self.interval)
except Exception as e:
logging.error(f"� Cannot send discovery message to neighbors. Error: {str(e)}")
await asyncio.sleep(self.period)
| 1,992 | Python | .py | 35 | 46.8 | 155 | 0.671282 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,010 | forwarder.py | enriquetomasmb_nebula/nebula/core/network/forwarder.py | import asyncio
import logging
import time
from typing import TYPE_CHECKING
from nebula.addons.functions import print_msg_box
from nebula.core.utils.locker import Locker
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class Forwarder:
def __init__(self, config, cm: "CommunicationsManager"):
print_msg_box(msg=f"Starting forwarder module...", indent=2, title="Forwarder module")
self.config = config
self.cm = cm
self.pending_messages = asyncio.Queue()
self.pending_messages_lock = Locker("pending_messages_lock", verbose=False, async_lock=True)
self.interval = self.config.participant["forwarder_args"]["forwarder_interval"]
self.number_forwarded_messages = self.config.participant["forwarder_args"]["number_forwarded_messages"]
self.messages_interval = self.config.participant["forwarder_args"]["forward_messages_interval"]
async def start(self):
asyncio.create_task(self.run_forwarder())
async def run_forwarder(self):
if self.config.participant["scenario_args"]["federation"] == "CFL":
logging.info("游대 Federation is CFL. Forwarder is disabled...")
return
while True:
# logging.debug(f"游대 Pending messages: {self.pending_messages.qsize()}")
start_time = time.time()
await self.pending_messages_lock.acquire_async()
await self.process_pending_messages(messages_left=self.number_forwarded_messages)
await self.pending_messages_lock.release_async()
sleep_time = max(0, self.interval - (time.time() - start_time))
await asyncio.sleep(sleep_time)
async def process_pending_messages(self, messages_left):
while messages_left > 0 and not self.pending_messages.empty():
msg, neighbors = await self.pending_messages.get()
for neighbor in neighbors[:messages_left]:
if neighbor not in self.cm.connections:
continue
try:
logging.debug(f"游대 Sending message (forwarding) --> to {neighbor}")
await self.cm.send_message(neighbor, msg)
except Exception as e:
logging.error(f"游대 Error forwarding message to {neighbor}. Error: {str(e)}")
pass
await asyncio.sleep(self.messages_interval)
messages_left -= len(neighbors)
if len(neighbors) > messages_left:
logging.debug(f"游대 Putting message back in queue for forwarding to the remaining neighbors")
await self.pending_messages.put((msg, neighbors[messages_left:]))
async def forward(self, msg, addr_from):
if self.config.participant["scenario_args"]["federation"] == "CFL":
logging.info("游대 Federation is CFL. Forwarder is disabled...")
return
try:
await self.pending_messages_lock.acquire_async()
current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
pending_nodes_to_send = [n for n in current_connections if n != addr_from]
logging.debug(f"游대 Puting message in queue for forwarding to {pending_nodes_to_send}")
await self.pending_messages.put((msg, pending_nodes_to_send))
except Exception as e:
logging.error(f"游대 Error forwarding message. Error: {str(e)}")
finally:
await self.pending_messages_lock.release_async()
| 3,574 | Python | .py | 63 | 45.507937 | 111 | 0.652774 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,011 | propagator.py | enriquetomasmb_nebula/nebula/core/network/propagator.py | import asyncio
import logging
from collections import deque
from abc import ABC, abstractmethod
from nebula.addons.functions import print_msg_box
from typing import TYPE_CHECKING
from typing import List, Tuple, Any, Optional
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
from nebula.config.config import Config
from nebula.core.aggregation.aggregator import Aggregator
from nebula.core.engine import Engine
from nebula.core.training.lightning import Lightning
class PropagationStrategy(ABC):
@abstractmethod
def is_node_eligible(self, node: str) -> bool:
pass
@abstractmethod
def prepare_model_payload(self, node: str) -> Optional[Tuple[Any, float]]:
pass
class InitialModelPropagation(PropagationStrategy):
def __init__(self, aggregator: "Aggregator", trainer: "Lightning", engine: "Engine"):
self.aggregator = aggregator
self.trainer = trainer
self.engine = engine
def get_round(self):
return self.engine.get_round()
def is_node_eligible(self, node: str) -> bool:
return node not in self.engine.cm.get_ready_connections()
def prepare_model_payload(self, node: str) -> Optional[Tuple[Any, float]]:
return self.trainer.get_model_parameters(initialize=True), self.trainer.DEFAULT_MODEL_WEIGHT
class StableModelPropagation(PropagationStrategy):
def __init__(self, aggregator: "Aggregator", trainer: "Lightning", engine: "Engine"):
self.aggregator = aggregator
self.trainer = trainer
self.engine = engine
self.addr = self.engine.get_addr()
def get_round(self):
return self.engine.get_round()
def is_node_eligible(self, node: str) -> bool:
return (node not in self.aggregator.get_nodes_pending_models_to_aggregate()) or (self.engine.cm.connections[node].get_federated_round() < self.get_round())
def prepare_model_payload(self, node: str) -> Optional[Tuple[Any, float]]:
return self.trainer.get_model_parameters(), self.trainer.get_model_weight()
class Propagator:
def __init__(self, cm: "CommunicationsManager"):
self.engine: Engine = cm.engine
self.config: Config = cm.get_config()
self.addr = cm.get_addr()
self.cm: "CommunicationsManager" = cm
self.aggregator: "Aggregator" = self.engine.aggregator
self.trainer: "Lightning" = self.engine._trainer
self.status_history = deque(maxlen=self.config.participant["propagator_args"]["history_size"])
self.interval = self.config.participant["propagator_args"]["propagate_interval"]
self.model_interval = self.config.participant["propagator_args"]["propagate_model_interval"]
self.early_stop = self.config.participant["propagator_args"]["propagation_early_stop"]
self.stable_rounds_count = 0
# Propagation strategies (adapt to the specific use case)
self.strategies = {"initialization": InitialModelPropagation(self.aggregator, self.trainer, self.engine), "stable": StableModelPropagation(self.aggregator, self.trainer, self.engine)}
def start(self):
print_msg_box(msg=f"Starting propagator functionality...\nModel propagation through the network", indent=2, title="Propagator")
def get_round(self):
return self.engine.get_round()
def update_and_check_neighbors(self, strategy, eligible_neighbors):
# Update the status of eligible neighbors
current_status = [n for n in eligible_neighbors]
# Check if the deque is full and the new status is different from the last one
if self.status_history and current_status != self.status_history[-1]:
logging.info(f"Status History deque is full and the new status is different from the last one: {list(self.status_history)}")
self.status_history.append(current_status)
return True
# Add the current status to the deque
logging.info(f"Adding current status to the deque: {current_status}")
self.status_history.append(current_status)
# If the deque is full and all elements are the same, stop propagation
if len(self.status_history) == self.status_history.maxlen and all(s == self.status_history[0] for s in self.status_history):
logging.info(f"Propagator exited for {self.status_history.maxlen} equal rounds: {list(self.status_history)}")
return False
return True
def reset_status_history(self):
self.status_history.clear()
async def propagate(self, strategy_id: str):
self.reset_status_history()
if strategy_id not in self.strategies:
logging.info(f"Strategy {strategy_id} not found.")
return False
if self.get_round() is None:
logging.info("Propagation halted: round is not set.")
return False
strategy = self.strategies[strategy_id]
logging.info(f"Starting model propagation with strategy: {strategy_id}")
current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
eligible_neighbors = [neighbor_addr for neighbor_addr in current_connections if strategy.is_node_eligible(neighbor_addr)]
logging.info(f"Eligible neighbors for model propagation: {eligible_neighbors}")
if not eligible_neighbors:
logging.info("Propagation complete: No eligible neighbors.")
return False
logging.info(f"Checking repeated statuses during propagation")
if not self.update_and_check_neighbors(strategy, eligible_neighbors):
logging.info("Exiting propagation due to repeated statuses.")
return False
model_params, weight = strategy.prepare_model_payload(None)
if model_params:
serialized_model = (
model_params if isinstance(model_params, bytes)
else self.trainer.serialize_model(model_params)
)
else:
serialized_model = None
round_number = -1 if strategy_id == "initialization" else self.get_round()
for neighbor_addr in eligible_neighbors:
asyncio.create_task(
self.cm.send_model(neighbor_addr, round_number, serialized_model, weight)
)
if len(self.aggregator.get_nodes_pending_models_to_aggregate()) >= len(self.aggregator._federation_nodes):
return False
await asyncio.sleep(self.interval)
return True
| 6,523 | Python | .py | 117 | 47.111111 | 191 | 0.695283 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,012 | communications.py | enriquetomasmb_nebula/nebula/core/network/communications.py | import hashlib
import logging
import sys
import os
import traceback
import collections
from datetime import datetime
import requests
import asyncio
import subprocess
from nebula.addons.mobility import Mobility
from nebula.core.network.discoverer import Discoverer
from nebula.core.network.forwarder import Forwarder
from nebula.core.network.health import Health
from nebula.core.network.propagator import Propagator
from nebula.core.pb import nebula_pb2
from nebula.core.network.messages import MessagesManager
from nebula.core.network.connection import Connection
from nebula.core.utils.locker import Locker
from nebula.core.utils.helper import (
cosine_metric,
euclidean_metric,
minkowski_metric,
manhattan_metric,
pearson_correlation_metric,
jaccard_metric,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.engine import Engine
class CommunicationsManager:
def __init__(self, engine: "Engine"):
logging.info("� Initializing Communications Manager")
self._engine = engine
self.addr = engine.get_addr()
self.host = self.addr.split(":")[0]
self.port = int(self.addr.split(":")[1])
self.config = engine.get_config()
self.id = str(self.config.participant["device_args"]["idx"])
self.register_endpoint = f'http://{self.config.participant["scenario_args"]["controller"]}/nebula/dashboard/{self.config.participant["scenario_args"]["name"]}/node/register'
self.wait_endpoint = f'http://{self.config.participant["scenario_args"]["controller"]}/nebula/dashboard/{self.config.participant["scenario_args"]["name"]}/node/wait'
self._connections = {}
self.connections_lock = Locker(name="connections_lock", async_lock=True)
self.connections_manager_lock = Locker(name="connections_manager_lock", async_lock=True)
self.connection_attempt_lock_incoming = Locker(name="connection_attempt_lock_incoming", async_lock=True)
self.connection_attempt_lock_outgoing = Locker(name="connection_attempt_lock_outgoing", async_lock=True)
# Pending connections to be established
self.pending_connections = set()
self.incoming_connections = {}
self.outgoing_connections = {}
self.ready_connections = set()
self._mm = MessagesManager(addr=self.addr, config=self.config, cm=self)
self.received_messages_hashes = collections.deque(maxlen=self.config.participant["message_args"]["max_local_messages"])
self.receive_messages_lock = Locker(name="receive_messages_lock", async_lock=True)
self._discoverer = Discoverer(addr=self.addr, config=self.config, cm=self)
# self._health = Health(addr=self.addr, config=self.config, cm=self)
self._forwarder = Forwarder(config=self.config, cm=self)
self._propagator = Propagator(cm=self)
self._mobility = Mobility(config=self.config, cm=self)
# List of connections to reconnect {addr: addr, tries: 0}
self.connections_reconnect = []
self.max_connections = 1000
self.network_engine = None
self.stop_network_engine = asyncio.Event()
self.loop = asyncio.get_event_loop()
max_concurrent_tasks = 5
self.semaphore_send_model = asyncio.Semaphore(max_concurrent_tasks)
@property
def engine(self):
return self._engine
@property
def connections(self):
return self._connections
@property
def mm(self):
return self._mm
@property
def discoverer(self):
return self._discoverer
@property
def health(self):
return self._health
@property
def forwarder(self):
return self._forwarder
@property
def propagator(self):
return self._propagator
@property
def mobility(self):
return self._mobility
async def check_federation_ready(self):
# Check if all my connections are in ready_connections
logging.info(f"🔗 check_federation_ready | Ready connections: {self.ready_connections} | Connections: {self.connections.keys()}")
if set(self.connections.keys()) == self.ready_connections:
return True
async def add_ready_connection(self, addr):
self.ready_connections.add(addr)
async def handle_incoming_message(self, data, addr_from):
try:
message_wrapper = nebula_pb2.Wrapper()
message_wrapper.ParseFromString(data)
source = message_wrapper.source
logging.debug(f"📥 handle_incoming_message | Received message from {addr_from} with source {source}")
if source == self.addr:
return
if message_wrapper.HasField("discovery_message"):
if await self.include_received_message_hash(hashlib.md5(data).hexdigest()):
await self.forwarder.forward(data, addr_from=addr_from)
await self.handle_discovery_message(source, message_wrapper.discovery_message)
elif message_wrapper.HasField("control_message"):
await self.handle_control_message(source, message_wrapper.control_message)
elif message_wrapper.HasField("federation_message"):
if await self.include_received_message_hash(hashlib.md5(data).hexdigest()):
if self.config.participant["device_args"]["proxy"] or message_wrapper.federation_message.action == nebula_pb2.FederationMessage.Action.Value("FEDERATION_START"):
await self.forwarder.forward(data, addr_from=addr_from)
await self.handle_federation_message(source, message_wrapper.federation_message)
elif message_wrapper.HasField("model_message"):
if await self.include_received_message_hash(hashlib.md5(data).hexdigest()):
# TODO: Improve the technique. Now only forward model messages if the node is a proxy
# Need to update the expected model messages receiving during the round
# Round -1 is the initialization round --> all nodes should receive the model
if self.config.participant["device_args"]["proxy"] or message_wrapper.model_message.round == -1:
await self.forwarder.forward(data, addr_from=addr_from)
await self.handle_model_message(source, message_wrapper.model_message)
elif message_wrapper.HasField("connection_message"):
await self.handle_connection_message(source, message_wrapper.connection_message)
else:
logging.info(f"Unknown handler for message: {message_wrapper}")
except Exception as e:
logging.error(f"📥 handle_incoming_message | Error while processing: {e}")
logging.error(traceback.format_exc())
async def handle_discovery_message(self, source, message):
logging.info(f"� handle_discovery_message | Received [Action {message.action}] from {source} (network propagation)")
try:
await self.engine.event_manager.trigger_event(source, message)
except Exception as e:
logging.error(f"� handle_discovery_message | Error while processing: {e}")
async def handle_control_message(self, source, message):
logging.info(f"🔧 handle_control_message | Received [Action {message.action}] from {source} with log {message.log}")
try:
await self.engine.event_manager.trigger_event(source, message)
except Exception as e:
logging.error(f"🔧 handle_control_message | Error while processing: {message.action} {message.log} | {e}")
async def handle_federation_message(self, source, message):
logging.info(f"� handle_federation_message | Received [Action {message.action}] from {source} with arguments {message.arguments}")
try:
await self.engine.event_manager.trigger_event(source, message)
except Exception as e:
logging.error(f"� handle_federation_message | Error while processing: {message.action} {message.arguments} | {e}")
async def handle_model_message(self, source, message):
logging.info(f"🤖 handle_model_message | Received model from {source} with round {message.round}")
if self.get_round() is not None:
await self.engine.get_round_lock().acquire_async()
current_round = self.get_round()
await self.engine.get_round_lock().release_async()
if message.round != current_round and message.round != -1:
logging.info(f"�� handle_model_message | Received a model from a different round | Model round: {message.round} | Current round: {current_round}")
if message.round > current_round:
logging.info(f"🤖 handle_model_message | Saving model from {source} for future round {message.round}")
await self.engine.aggregator.include_next_model_in_buffer(
message.parameters,
message.weight,
source=source,
round=message.round,
)
else:
logging.info(f"�� handle_model_message | Ignoring model from {source} from a previous round")
return
if not self.engine.get_federation_ready_lock().locked() and len(self.engine.get_federation_nodes()) == 0:
logging.info(f"🤖 handle_model_message | There are no defined federation nodes")
return
try:
# get_federation_ready_lock() is locked when the model is being initialized (first round)
# non-starting nodes receive the initialized model from the starting node
if not self.engine.get_federation_ready_lock().locked() or self.engine.get_initialization_status():
decoded_model = self.engine.trainer.deserialize_model(message.parameters)
if self.config.participant["adaptive_args"]["model_similarity"]:
logging.info(f"🤖 handle_model_message | Checking model similarity")
cosine_value = cosine_metric(self.trainer.get_model_parameters(), decoded_model, similarity=True)
euclidean_value = euclidean_metric(self.trainer.get_model_parameters(), decoded_model, similarity=True)
minkowski_value = minkowski_metric(self.trainer.get_model_parameters(), decoded_model, p=2, similarity=True)
manhattan_value = manhattan_metric(self.trainer.get_model_parameters(), decoded_model, similarity=True)
pearson_correlation_value = pearson_correlation_metric(self.trainer.get_model_parameters(), decoded_model, similarity=True)
jaccard_value = jaccard_metric(self.trainer.get_model_parameters(), decoded_model, similarity=True)
with open(f"{self.log_dir}/participant_{self.idx}_similarity.csv", "a+") as f:
if os.stat(f"{self.log_dir}/participant_{self.idx}_similarity.csv").st_size == 0:
f.write("timestamp,source_ip,nodes,round,current_round,cosine,euclidean,minkowski,manhattan,pearson_correlation,jaccard\n")
f.write(f"{datetime.now()}, {source}, {message.round}, {current_round}, {cosine_value}, {euclidean_value}, {minkowski_value}, {manhattan_value}, {pearson_correlation_value}, {jaccard_value}\n")
await self.engine.aggregator.include_model_in_buffer(
decoded_model,
message.weight,
source=source,
round=message.round,
)
else:
if message.round != -1:
# Be sure that the model message is from the initialization round (round = -1)
logging.info(f"🤖 handle_model_message | Saving model from {source} for future round {message.round}")
await self.engine.aggregator.include_next_model_in_buffer(
message.parameters,
message.weight,
source=source,
round=message.round,
)
return
logging.info(f"🤖 handle_model_message | Initializing model (executed by {source})")
try:
model = self.engine.trainer.deserialize_model(message.parameters)
self.engine.trainer.set_model_parameters(model, initialize=True)
logging.info(f"🤖 handle_model_message | Model Parameters Initialized")
self.engine.set_initialization_status(True)
await self.engine.get_federation_ready_lock().release_async() # Enable learning cycle once the initialization is done
try:
await self.engine.get_federation_ready_lock().release_async() # Release the lock acquired at the beginning of the engine
except RuntimeError:
pass
except RuntimeError:
pass
except Exception as e:
logging.error(f"🤖 handle_model_message | Unknown error adding model: {e}")
logging.error(traceback.format_exc())
else:
logging.info(f"🤖 handle_model_message | Tried to add a model while learning is not running")
if message.round != -1:
# Be sure that the model message is from the initialization round (round = -1)
logging.info(f"🤖 handle_model_message | Saving model from {source} for future round {message.round}")
await self.engine.aggregator.include_next_model_in_buffer(
message.parameters,
message.weight,
source=source,
round=message.round,
)
return
async def handle_connection_message(self, source, message):
try:
await self.engine.event_manager.trigger_event(source, message)
except Exception as e:
logging.error(f"🔗 handle_connection_message | Error while processing: {message.action} | {e}")
def get_connections_lock(self):
return self.connections_lock
def get_config(self):
return self.config
def get_addr(self):
return self.addr
def get_round(self):
return self.engine.get_round()
async def start(self):
logging.info(f"� Starting Communications Manager...")
await self.deploy_network_engine()
async def deploy_network_engine(self):
logging.info(f"� Deploying Network engine...")
self.network_engine = await asyncio.start_server(self.handle_connection_wrapper, self.host, self.port)
self.network_task = asyncio.create_task(self.network_engine.serve_forever(), name="Network Engine")
logging.info(f"� Network engine deployed at host {self.host} and port {self.port}")
async def handle_connection_wrapper(self, reader, writer):
asyncio.create_task(self.handle_connection(reader, writer))
async def handle_connection(self, reader, writer):
async def process_connection(reader, writer):
try:
addr = writer.get_extra_info("peername")
connected_node_id = await reader.readline()
connected_node_id = connected_node_id.decode("utf-8").strip()
connected_node_port = addr[1]
if ":" in connected_node_id:
connected_node_id, connected_node_port = connected_node_id.split(":")
connection_addr = f"{addr[0]}:{connected_node_port}"
direct = await reader.readline()
direct = direct.decode("utf-8").strip()
direct = True if direct == "True" else False
logging.info(f"🔗 [incoming] Connection from {addr} - {connection_addr} [id {connected_node_id} | port {connected_node_port} | direct {direct}] (incoming)")
if self.id == connected_node_id:
logging.info("🔗 [incoming] Connection with yourself is not allowed")
writer.write("CONNECTION//CLOSE\n".encode("utf-8"))
await writer.drain()
writer.close()
await writer.wait_closed()
return
async with self.connections_manager_lock:
if len(self.connections) >= self.max_connections:
logging.info("🔗 [incoming] Maximum number of connections reached")
logging.info(f"🔗 [incoming] Sending CONNECTION//CLOSE to {addr}")
writer.write("CONNECTION//CLOSE\n".encode("utf-8"))
await writer.drain()
writer.close()
await writer.wait_closed()
return
logging.info(f"🔗 [incoming] Connections: {self.connections}")
if connection_addr in self.connections:
logging.info(f"🔗 [incoming] Already connected with {self.connections[connection_addr]}")
logging.info(f"🔗 [incoming] Sending CONNECTION//EXISTS to {addr}")
writer.write("CONNECTION//EXISTS\n".encode("utf-8"))
await writer.drain()
writer.close()
await writer.wait_closed()
return
if connection_addr in self.pending_connections:
logging.info(f"🔗 [incoming] Connection with {connection_addr} is already pending")
if int(self.host.split(".")[3]) < int(addr[0].split(".")[3]):
logging.info(f"🔗 [incoming] Closing incoming connection since self.host < host (from {connection_addr})")
writer.write("CONNECTION//CLOSE\n".encode("utf-8"))
await writer.drain()
writer.close()
await writer.wait_closed()
return
else:
logging.info(f"🔗 [incoming] Closing outgoing connection since self.host >= host (from {connection_addr})")
if connection_addr in self.outgoing_connections:
out_reader, out_writer = self.outgoing_connections.pop(connection_addr)
out_writer.write("CONNECTION//CLOSE\n".encode("utf-8"))
await out_writer.drain()
out_writer.close()
await out_writer.wait_closed()
logging.info(f"🔗 [incoming] Including {connection_addr} in pending connections")
self.pending_connections.add(connection_addr)
self.incoming_connections[connection_addr] = (reader, writer)
logging.info(f"🔗 [incoming] Creating new connection with {addr} (id {connected_node_id})")
await writer.drain()
connection = Connection(
self,
reader,
writer,
connected_node_id,
addr[0],
connected_node_port,
direct=direct,
config=self.config,
)
async with self.connections_manager_lock:
logging.info(f"🔗 [incoming] Including {connection_addr} in connections")
self.connections[connection_addr] = connection
logging.info(f"🔗 [incoming] Sending CONNECTION//NEW to {addr}")
writer.write("CONNECTION//NEW\n".encode("utf-8"))
await writer.drain()
writer.write(f"{self.id}\n".encode("utf-8"))
await writer.drain()
await connection.start()
except Exception as e:
logging.error(f"�� [incoming] Error while handling connection with {addr}: {e}")
finally:
if connection_addr in self.pending_connections:
logging.info(f"🔗 [incoming] Removing {connection_addr} from pending connections: {self.pending_connections}")
self.pending_connections.remove(connection_addr)
if connection_addr in self.incoming_connections:
logging.info(f"🔗 [incoming] Removing {connection_addr} from incoming connections: {self.incoming_connections.keys()}")
self.incoming_connections.pop(connection_addr)
await process_connection(reader, writer)
async def stop(self):
logging.info(f"� Stopping Communications Manager... [Removing connections and stopping network engine]")
connections = list(self.connections.values())
for node in connections:
await node.stop()
if hasattr(self, "server"):
self.network_engine.close()
await self.network_engine.wait_closed()
self.network_task.cancel()
async def run_reconnections(self):
for connection in self.connections_reconnect:
if connection["addr"] in self.connections:
connection["tries"] = 0
logging.info(f"🔗 Node {connection.addr} is still connected!")
else:
connection["tries"] += 1
await self.connect(connection["addr"])
def verify_connections(self, neighbors):
# Return True if all neighbors are connected
if all(neighbor in self.connections for neighbor in neighbors):
return True
return False
async def network_wait(self):
await self.stop_network_engine.wait()
async def deploy_additional_services(self):
logging.info(f"� Deploying additional services...")
self._generate_network_conditions()
await self._forwarder.start()
await self._discoverer.start()
# await self._health.start()
self._propagator.start()
await self._mobility.start()
def _generate_network_conditions(self):
# TODO: Implement selection of network conditions from frontend
if self.config.participant["network_args"]["simulation"]:
interface = self.config.participant["network_args"]["interface"]
bandwidth = self.config.participant["network_args"]["bandwidth"]
delay = self.config.participant["network_args"]["delay"]
delay_distro = self.config.participant["network_args"]["delay-distro"]
delay_distribution = self.config.participant["network_args"]["delay-distribution"]
loss = self.config.participant["network_args"]["loss"]
duplicate = self.config.participant["network_args"]["duplicate"]
corrupt = self.config.participant["network_args"]["corrupt"]
reordering = self.config.participant["network_args"]["reordering"]
logging.info(f"� Network simulation is enabled | Interface: {interface} | Bandwidth: {bandwidth} | Delay: {delay} | Delay Distro: {delay_distro} | Delay Distribution: {delay_distribution} | Loss: {loss} | Duplicate: {duplicate} | Corrupt: {corrupt} | Reordering: {reordering}")
try:
results = subprocess.run(
[
"tcset",
str(interface),
"--rate",
str(bandwidth),
"--delay",
str(delay),
"--delay-distro",
str(delay_distro),
"--delay-distribution",
str(delay_distribution),
"--loss",
str(loss),
"--duplicate",
str(duplicate),
"--corrupt",
str(corrupt),
"--reordering",
str(reordering),
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
except Exception as e:
logging.error(f"� Network simulation error: {e}")
return
else:
logging.info("� Network simulation is disabled. Using default network conditions...")
def _reset_network_conditions(self):
interface = self.config.participant["network_args"]["interface"]
logging.info(f"� Resetting network conditions")
try:
results = subprocess.run(
["tcdel", str(interface), "--all"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
except Exception as e:
logging.error(f"�� Network simulation error: {e}")
return
def _set_network_conditions(
self,
interface="eth0",
network="192.168.50.2",
bandwidth="5Gbps",
delay="0ms",
delay_distro="0ms",
delay_distribution="normal",
loss="0%",
duplicate="0%",
corrupt="0%",
reordering="0%",
):
logging.info(
f"� Changing network conditions | Interface: {interface} | Network: {network} | Bandwidth: {bandwidth} | Delay: {delay} | Delay Distro: {delay_distro} | Delay Distribution: {delay_distribution} | Loss: {loss} | Duplicate: {duplicate} | Corrupt: {corrupt} | Reordering: {reordering}"
)
try:
results = subprocess.run(
[
"tcset",
str(interface),
"--network",
str(network) if network is not None else "",
"--rate",
str(bandwidth),
"--delay",
str(delay),
"--delay-distro",
str(delay_distro),
"--delay-distribution",
str(delay_distribution),
"--loss",
str(loss),
"--duplicate",
str(duplicate),
"--corrupt",
str(corrupt),
"--reordering",
str(reordering),
"--change",
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
except Exception as e:
logging.error(f"�� Network simulation error: {e}")
return
async def include_received_message_hash(self, hash_message):
try:
await self.receive_messages_lock.acquire_async()
if hash_message in self.received_messages_hashes:
# logging.info(f"�� handle_incoming_message | Ignoring message already received.")
return False
self.received_messages_hashes.append(hash_message)
if len(self.received_messages_hashes) % 100 == 0:
logging.info(f"📥 Received {len(self.received_messages_hashes)} messages")
return True
except Exception as e:
logging.error(f"�� handle_incoming_message | Error including message hash: {e}")
return False
finally:
await self.receive_messages_lock.release_async()
async def send_message_to_neighbors(self, message, neighbors=None, interval=0):
if neighbors is None:
current_connections = await self.get_all_addrs_current_connections(only_direct=True)
neighbors = set(current_connections)
logging.info(f"Sending message to ALL neighbors: {neighbors}")
else:
logging.info(f"Sending message to neighbors: {neighbors}")
for neighbor in neighbors:
asyncio.create_task(self.send_message(neighbor, message))
if interval > 0:
await asyncio.sleep(interval)
async def send_message(self, dest_addr, message):
try:
conn = self.connections[dest_addr]
await conn.send(data=message)
except Exception as e:
logging.error(f"�� Cannot send message {message} to {dest_addr}. Error: {str(e)}")
await self.disconnect(dest_addr, mutual_disconnection=False)
async def send_model(self, dest_addr, round, serialized_model, weight=1):
async with self.semaphore_send_model:
try:
conn = self.connections.get(dest_addr)
if conn is None:
logging.info(f"�� Connection with {dest_addr} not found")
return
logging.info(f"Sending model to {dest_addr} with round {round}: weight={weight} |Â size={sys.getsizeof(serialized_model) / (1024 ** 2) if serialized_model is not None else 0} MB")
message = self.mm.generate_model_message(round, serialized_model, weight)
await conn.send(data=message, is_compressed=True)
logging.info(f"Model sent to {dest_addr} with round {round}")
except Exception as e:
logging.error(f"�� Cannot send model to {dest_addr}: {str(e)}")
await self.disconnect(dest_addr, mutual_disconnection=False)
async def establish_connection(self, addr, direct=True, reconnect=False):
logging.info(f"🔗 [outgoing] Establishing connection with {addr} (direct: {direct})")
async def process_establish_connection(addr, direct, reconnect):
try:
host = str(addr.split(":")[0])
port = str(addr.split(":")[1])
if host == self.host and port == self.port:
logging.info("🔗 [outgoing] Connection with yourself is not allowed")
return False
async with self.connections_manager_lock:
if addr in self.connections:
logging.info(f"🔗 [outgoing] Already connected with {self.connections[addr]}")
return False
if addr in self.pending_connections:
logging.info(f"🔗 [outgoing] Connection with {addr} is already pending")
if int(self.host.split(".")[3]) >= int(host.split(".")[3]):
logging.info(f"🔗 [outgoing] Closing outgoing connection since self.host >= host (from {addr})")
return False
else:
logging.info(f"🔗 [outgoing] Closing incoming connection since self.host < host (from {addr})")
if addr in self.incoming_connections:
inc_reader, inc_writer = self.incoming_connections.pop(addr)
inc_writer.write("CONNECTION//CLOSE\n".encode("utf-8"))
await inc_writer.drain()
inc_writer.close()
await inc_writer.wait_closed()
self.pending_connections.add(addr)
logging.info(f"🔗 [outgoing] Including {addr} in pending connections: {self.pending_connections}")
logging.info(f"🔗 [outgoing] Openning connection with {host}:{port}")
reader, writer = await asyncio.open_connection(host, port)
logging.info(f"🔗 [outgoing] Connection opened with {writer.get_extra_info('peername')}")
async with self.connections_manager_lock:
self.outgoing_connections[addr] = (reader, writer)
writer.write(f"{self.id}:{self.port}\n".encode("utf-8"))
await writer.drain()
writer.write(f"{direct}\n".encode("utf-8"))
await writer.drain()
connection_status = await reader.readline()
connection_status = connection_status.decode("utf-8").strip()
logging.info(f"🔗 [outgoing] Received connection status {connection_status} (from {addr})")
logging.info(f"🔗 [outgoing] Connections: {self.connections}")
if connection_status == "CONNECTION//CLOSE":
logging.info(f"🔗 [outgoing] Connection with {addr} closed")
if addr in self.pending_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from pending connections: {self.pending_connections}")
self.pending_connections.remove(addr)
if addr in self.outgoing_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from outgoing connections: {self.outgoing_connections.keys()}")
self.outgoing_connections.pop(addr)
if addr in self.incoming_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from incoming connections: {self.incoming_connections.keys()}")
self.incoming_connections.pop(addr)
writer.close()
await writer.wait_closed()
return False
elif connection_status == "CONNECTION//PENDING":
logging.info(f"🔗 [outgoing] Connection with {addr} is already pending")
writer.close()
await writer.wait_closed()
return False
elif connection_status == "CONNECTION//EXISTS":
logging.info(f"🔗 [outgoing] Already connected {self.connections[addr]}")
writer.close()
await writer.wait_closed()
return True
elif connection_status == "CONNECTION//NEW":
async with self.connections_manager_lock:
connected_node_id = await reader.readline()
connected_node_id = connected_node_id.decode("utf-8").strip()
logging.info(f"🔗 [outgoing] Received connected node id: {connected_node_id} (from {addr})")
logging.info(f"🔗 [outgoing] Creating new connection with {host}:{port} (id {connected_node_id})")
connection = Connection(self, reader, writer, connected_node_id, host, port, direct=direct, config=self.config)
self.connections[addr] = connection
await connection.start()
else:
logging.info(f"🔗 [outgoing] Unknown connection status {connection_status}")
writer.close()
await writer.wait_closed()
return False
if reconnect:
logging.info(f"🔗 [outgoing] Reconnection check is enabled on node {addr}")
self.connections_reconnect.append({"addr": addr, "tries": 0})
self.config.add_neighbor_from_config(addr)
return True
except Exception as e:
logging.info(f"�� [outgoing] Error adding direct connected neighbor {addr}: {str(e)}")
return False
finally:
if addr in self.pending_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from pending connections: {self.pending_connections}")
self.pending_connections.remove(addr)
if addr in self.outgoing_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from outgoing connections: {self.outgoing_connections.keys()}")
self.outgoing_connections.pop(addr)
if addr in self.incoming_connections:
logging.info(f"🔗 [outgoing] Removing {addr} from incoming connections: {self.incoming_connections.keys()}")
self.incoming_connections.pop(addr)
asyncio.create_task(process_establish_connection(addr, direct, reconnect))
async def connect(self, addr, direct=True):
await self.get_connections_lock().acquire_async()
duplicated = addr in self.connections.keys()
await self.get_connections_lock().release_async()
if duplicated:
if direct: # Upcoming direct connection
if not self.connections[addr].get_direct():
logging.info(f"🔗 [outgoing] Upgrading non direct connected neighbor {addr} to direct connection")
return await self.establish_connection(addr, direct=True, reconnect=False)
else: # Upcoming undirected connection
logging.info(f"🔗 [outgoing] Already direct connected neighbor {addr}, reconnecting...")
return await self.establish_connection(addr, direct=True, reconnect=False)
else:
logging.info(f"�� Cannot add a duplicate {addr} (undirected connection), already connected")
return False
else:
if direct:
return await self.establish_connection(addr, direct=True, reconnect=False)
else:
return await self.establish_connection(addr, direct=False, reconnect=False)
async def register(self):
data = {"node": self.addr}
logging.info(f"Registering node {self.addr} in the controller")
response = requests.post(self.register_endpoint, json=data)
if response.status_code == 200:
logging.info(f"Node {self.addr} registered successfully in the controller")
else:
logging.error(f"Error registering node {self.addr} in the controller")
async def wait_for_controller(self):
while True:
response = requests.get(self.wait_endpoint)
if response.status_code == 200:
logging.info(f"Continue signal received from controller")
break
else:
logging.info(f"Waiting for controller signal...")
await asyncio.sleep(1)
async def disconnect(self, dest_addr, mutual_disconnection=True):
logging.info(f"Trying to disconnect {dest_addr}")
if dest_addr not in self.connections:
logging.info(f"Connection {dest_addr} not found")
return
try:
if mutual_disconnection:
await self.connections[dest_addr].send(data=self.mm.generate_connection_message(nebula_pb2.ConnectionMessage.Action.DISCONNECT))
await asyncio.sleep(1)
self.connections[dest_addr].stop()
except Exception as e:
logging.error(f"�� Error while disconnecting {dest_addr}: {str(e)}")
if dest_addr in self.connections:
logging.info(f"Removing {dest_addr} from connections")
del self.connections[dest_addr]
current_connections = await self.get_all_addrs_current_connections(only_direct=True)
current_connections = set(current_connections)
logging.info(f"Current connections: {current_connections}")
self.config.update_neighbors_from_config(current_connections, dest_addr)
async def get_all_addrs_current_connections(self, only_direct=False, only_undirected=False):
try:
await self.get_connections_lock().acquire_async()
if only_direct:
return {addr for addr, conn in self.connections.items() if conn.get_direct()}
elif only_undirected:
return {addr for addr, conn in self.connections.items() if not conn.get_direct()}
else:
return set(self.connections.keys())
finally:
await self.get_connections_lock().release_async()
async def get_addrs_current_connections(self, only_direct=False, only_undirected=False, myself=False):
current_connections = await self.get_all_addrs_current_connections(only_direct=only_direct, only_undirected=only_undirected)
current_connections = set(current_connections)
if myself:
current_connections.add(self.addr)
return current_connections
async def get_connection_by_addr(self, addr):
try:
await self.get_connections_lock().acquire_async()
for key, conn in self.connections.items():
if addr in key:
return conn
return None
except Exception as e:
logging.error(f"Error getting connection by address: {e}")
return None
finally:
await self.get_connections_lock().release_async()
async def get_direct_connections(self):
try:
await self.get_connections_lock().acquire_async()
return {conn for _, conn in self.connections.items() if conn.get_direct()}
finally:
await self.get_connections_lock().release_async()
async def get_undirect_connections(self):
try:
await self.get_connections_lock().acquire_async()
return {conn for _, conn in self.connections.items() if not conn.get_direct()}
finally:
await self.get_connections_lock().release_async()
async def get_nearest_connections(self, top: int = 1):
try:
await self.get_connections_lock().acquire_async()
sorted_connections = sorted(
self.connections.values(),
key=lambda conn: (conn.get_neighbor_distance() if conn.get_neighbor_distance() is not None else float("inf")),
)
if top == 1:
return sorted_connections[0]
else:
return sorted_connections[:top]
finally:
await self.get_connections_lock().release_async()
def get_ready_connections(self):
return {addr for addr, conn in self.connections.items() if conn.get_ready()}
def check_finished_experiment(self):
return all(conn.get_federated_round() == self.config.participant["scenario_args"]["rounds"] - 1 for conn in self.connections.values())
def __str__(self):
return f"Connections: {[str(conn) for conn in self.connections.values()]}"
| 43,081 | Python | .py | 748 | 42.362299 | 298 | 0.587681 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,013 | health.py | enriquetomasmb_nebula/nebula/core/network/health.py | import asyncio
import logging
import time
from nebula.addons.functions import print_msg_box
from typing import TYPE_CHECKING
from nebula.core.pb import nebula_pb2
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class Health:
def __init__(self, addr, config, cm: "CommunicationsManager"):
print_msg_box(msg=f"Starting health module...", indent=2, title="Health module")
self.addr = addr
self.config = config
self.cm = cm
self.period = self.config.participant["health_args"]["health_interval"]
self.alive_interval = self.config.participant["health_args"]["send_alive_interval"]
self.check_alive_interval = self.config.participant["health_args"]["check_alive_interval"]
self.timeout = self.config.participant["health_args"]["alive_timeout"]
async def start(self):
asyncio.create_task(self.run_send_alive())
asyncio.create_task(self.run_check_alive())
async def run_send_alive(self):
await asyncio.sleep(self.config.participant["health_args"]["grace_time_health"])
# Set all connections to active at the beginning of the health module
for conn in self.cm.connections.values():
conn.set_active(True)
while True:
if len(self.cm.connections) > 0:
message = self.cm.mm.generate_control_message(nebula_pb2.ControlMessage.Action.ALIVE, log="Alive message")
current_connections = list(self.cm.connections.values())
for conn in current_connections:
if conn.get_direct():
try:
logging.info(f"🕒 Sending alive message to {conn.get_addr()}...")
await conn.send(data=message)
except Exception as e:
logging.error(f"�� Cannot send alive message to {conn.get_addr()}. Error: {str(e)}")
await asyncio.sleep(self.alive_interval)
await asyncio.sleep(self.period)
async def run_check_alive(self):
await asyncio.sleep(self.config.participant["health_args"]["grace_time_health"] + self.check_alive_interval)
while True:
if len(self.cm.connections) > 0:
current_connections = list(self.cm.connections.values())
for conn in current_connections:
if conn.get_direct():
if time.time() - conn.get_last_active() > self.timeout:
logging.error(f"⬅� 🕒 Heartbeat timeout for {conn.get_addr()}...")
await self.cm.disconnect(conn.get_addr(), mutual_disconnection=False)
await asyncio.sleep(self.check_alive_interval)
async def alive(self, source):
current_time = time.time()
if source not in self.cm.connections:
logging.error(f"�� Connection {source} not found in connections...")
return
conn = self.cm.connections[source]
if conn.get_last_active() < current_time:
logging.debug(f"🕒 Updating last active time for {source}")
conn.set_active(True)
| 3,242 | Python | .py | 59 | 42.627119 | 122 | 0.620133 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,014 | connection.py | enriquetomasmb_nebula/nebula/core/network/connection.py | import asyncio
import logging
import time
from geopy import distance
import json
import zlib, bz2, lzma
import uuid
from dataclasses import dataclass
from typing import Dict, Any, Optional
import lz4.frame
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
@dataclass
class MessageChunk:
__slots__ = ['index', 'data', 'is_last']
index: int
data: bytes
is_last: bool
class Connection:
DEFAULT_FEDERATED_ROUND = -1
def __init__(
self,
cm: "CommunicationsManager",
reader,
writer,
id,
host,
port,
direct=True,
active=True,
compression="zlib",
config=None,
):
self.cm = cm
self.reader = reader
self.writer = writer
self.id = str(id)
self.host = host
self.port = port
self.addr = f"{host}:{port}"
self.direct = direct
self.active = active
self.last_active = time.time()
self.compression = compression
self.config = config
self.federated_round = Connection.DEFAULT_FEDERATED_ROUND
self.latitude = None
self.longitude = None
self.loop = asyncio.get_event_loop()
self.read_task = None
self.process_task = None
self.pending_messages_queue = asyncio.Queue(maxsize=100)
self.message_buffers: Dict[bytes, Dict[int, MessageChunk]] = {}
self.EOT_CHAR = b"\x00\x00\x00\x04"
self.COMPRESSION_CHAR = b"\x00\x00\x00\x01"
self.DATA_TYPE_PREFIXES = {"pb": b"\x01\x00\x00\x00", "string": b"\x02\x00\x00\x00", "json": b"\x03\x00\x00\x00", "bytes": b"\x04\x00\x00\x00"}
self.HEADER_SIZE = 21
self.MAX_CHUNK_SIZE = 1024 # 1 KB
self.BUFFER_SIZE = 1024 # 1 KB
logging.info(f"Connection [established]: {self.addr} (id: {self.id}) (active: {self.active}) (direct: {self.direct})")
def __str__(self):
return f"Connection to {self.addr} (id: {self.id}) (active: {self.active}) (last active: {self.last_active}) (direct: {self.direct})"
def __repr__(self):
return self.__str__()
def __del__(self):
self.stop()
def get_addr(self):
return self.addr
def get_federated_round(self):
return self.federated_round
def get_tunnel_status(self):
if self.reader is None or self.writer is None:
return False
return True
def update_round(self, federated_round):
self.federated_round = federated_round
def update_geolocation(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
self.config.participant["mobility_args"]["neighbors_distance"][self.addr] = self.compute_distance_myself()
def get_geolocation(self):
if self.latitude is None or self.longitude is None:
raise ValueError("Geo-location not set for this neighbor")
return self.latitude, self.longitude
def get_neighbor_distance(self):
if self.addr not in self.config.participant["mobility_args"]["neighbors_distance"]:
return None
return self.config.participant["mobility_args"]["neighbors_distance"][self.addr]
def compute_distance(self, latitude, longitude):
distance_m = distance.distance((self.latitude, self.longitude), (latitude, longitude)).m
return distance_m
def compute_distance_myself(self):
distance_m = self.compute_distance(self.config.participant["mobility_args"]["latitude"], self.config.participant["mobility_args"]["longitude"])
return distance_m
def get_ready(self):
return True if self.federated_round != Connection.DEFAULT_FEDERATED_ROUND else False
def get_direct(self):
return self.direct
def set_direct(self, direct):
# config.participant["network_args"]["neighbors"] only contains direct neighbors (frotend purposes)
if direct:
self.config.add_neighbor_from_config(self.addr)
else:
self.config.remove_neighbor_from_config(self.addr)
self.last_active = time.time()
self.direct = direct
def set_active(self, active):
self.active = active
self.last_active = time.time()
def is_active(self):
return self.active
def get_last_active(self):
return self.last_active
async def start(self):
self.read_task = asyncio.create_task(self.handle_incoming_message(), name=f"Connection {self.addr} reader")
self.process_task = asyncio.create_task(self.process_message_queue(), name=f"Connection {self.addr} processor")
async def stop(self):
logging.info(f"❗️ Connection [stopped]: {self.addr} (id: {self.id})")
tasks = [self.read_task, self.process_task]
for task in tasks:
if task is not None:
task.cancel()
try:
await task
except asyncio.CancelledError:
logging.error(f"❗️ {self} cancelled...")
if self.writer is not None:
self.writer.close()
await self.writer.wait_closed()
async def reconnect(self, max_retries: int = 5, delay: int = 5) -> None:
for attempt in range(max_retries):
try:
logging.info(f"Attempting to reconnect to {self.addr} (attempt {attempt + 1}/{max_retries})")
await self.cm.connect(self.addr)
self.read_task = asyncio.create_task(self.handle_incoming_message(), name=f"Connection {self.addr} reader")
self.process_task = asyncio.create_task(self.process_message_queue(), name=f"Connection {self.addr} processor")
logging.info(f"Reconnected to {self.addr}")
return
except Exception as e:
logging.error(f"Reconnection attempt {attempt + 1} failed: {e}")
await asyncio.sleep(delay)
logging.error(f"Failed to reconnect to {self.addr} after {max_retries} attempts. Stopping connection...")
await self.stop()
async def send(self, data: Any, pb: bool = True, encoding_type: str = "utf-8", is_compressed: bool = False) -> None:
if self.writer is None:
logging.error("Cannot send data, writer is None")
return
try:
message_id = uuid.uuid4().bytes
data_prefix, encoded_data = self._prepare_data(data, pb, encoding_type)
if is_compressed:
encoded_data = await asyncio.to_thread(self._compress, encoded_data, self.compression)
if encoded_data is None:
return
data_to_send = data_prefix + encoded_data + self.COMPRESSION_CHAR
else:
data_to_send = data_prefix + encoded_data
await self._send_chunks(message_id, data_to_send)
except Exception as e:
logging.error(f"Error sending data: {e}")
await self.reconnect()
def _prepare_data(self, data: Any, pb: bool, encoding_type: str) -> tuple[bytes, bytes]:
if pb:
return self.DATA_TYPE_PREFIXES["pb"], data
elif isinstance(data, str):
return self.DATA_TYPE_PREFIXES["string"], data.encode(encoding_type)
elif isinstance(data, dict):
return self.DATA_TYPE_PREFIXES["json"], json.dumps(data).encode(encoding_type)
elif isinstance(data, bytes):
return self.DATA_TYPE_PREFIXES["bytes"], data
else:
raise ValueError(f"Unknown data type to send: {type(data)}")
def _compress(self, data: bytes, compression: str) -> Optional[bytes]:
if compression == "lz4":
return lz4.frame.compress(data)
elif compression == "zlib":
return zlib.compress(data)
elif compression == "bz2":
return bz2.compress(data)
elif compression == "lzma":
return lzma.compress(data)
else:
logging.error(f"Unsupported compression method: {compression}")
return None
async def _send_chunks(self, message_id: bytes, data: bytes) -> None:
chunk_size = self._calculate_chunk_size(len(data))
num_chunks = (len(data) + chunk_size - 1) // chunk_size
for chunk_index in range(num_chunks):
start = chunk_index * chunk_size
end = min(start + chunk_size, len(data))
chunk = data[start:end]
is_last_chunk = chunk_index == num_chunks - 1
header = message_id + chunk_index.to_bytes(4, "big") + (b"\x01" if is_last_chunk else b"\x00")
chunk_size_bytes = len(chunk).to_bytes(4, "big")
chunk_with_header = header + chunk_size_bytes + chunk + self.EOT_CHAR
self.writer.write(chunk_with_header)
await self.writer.drain()
# logging.debug(f"Sent message {message_id.hex()} | chunk {chunk_index+1}/{num_chunks} | size: {len(chunk)} bytes")
def _calculate_chunk_size(self, data_size: int) -> int:
return self.BUFFER_SIZE
async def handle_incoming_message(self) -> None:
reusable_buffer = bytearray(self.MAX_CHUNK_SIZE)
try:
while True:
if self.pending_messages_queue.full():
await asyncio.sleep(0.1) # Wait a bit if the queue is full to create backpressure
continue
header = await self._read_exactly(self.HEADER_SIZE)
message_id, chunk_index, is_last_chunk = self._parse_header(header)
chunk_data = await self._read_chunk(reusable_buffer)
self._store_chunk(message_id, chunk_index, chunk_data, is_last_chunk)
# logging.debug(f"Received chunk {chunk_index} of message {message_id.hex()} | size: {len(chunk_data)} bytes")
if is_last_chunk:
await self._process_complete_message(message_id)
except asyncio.CancelledError:
logging.info("Message handling cancelled")
except ConnectionError as e:
logging.error(f"Connection closed while reading: {e}")
await self.reconnect()
except Exception as e:
logging.error(f"Error handling incoming message: {e}")
async def _read_exactly(self, num_bytes: int, max_retries: int = 3) -> bytes:
data = b""
remaining = num_bytes
for _ in range(max_retries):
try:
while remaining > 0:
chunk = await self.reader.read(min(remaining, self.BUFFER_SIZE))
if not chunk:
raise ConnectionError("Connection closed while reading")
data += chunk
remaining -= len(chunk)
return data
except asyncio.IncompleteReadError as e:
if _ == max_retries - 1:
raise
logging.warning(f"Retrying read after IncompleteReadError: {e}")
raise RuntimeError("Max retries reached in _read_exactly")
def _parse_header(self, header: bytes) -> tuple[bytes, int, bool]:
message_id = header[:16]
chunk_index = int.from_bytes(header[16:20], "big")
is_last_chunk = header[20] == 1
return message_id, chunk_index, is_last_chunk
async def _read_chunk(self, buffer: bytearray = None) -> bytes:
if buffer is None:
buffer = bytearray(self.MAX_CHUNK_SIZE)
chunk_size_bytes = await self._read_exactly(4)
chunk_size = int.from_bytes(chunk_size_bytes, "big")
if chunk_size > self.MAX_CHUNK_SIZE:
raise ValueError(f"Chunk size {chunk_size} exceeds MAX_CHUNK_SIZE {self.MAX_CHUNK_SIZE}")
chunk = await self._read_exactly(chunk_size)
buffer[:chunk_size] = chunk
eot = await self._read_exactly(len(self.EOT_CHAR))
if eot != self.EOT_CHAR:
raise ValueError("Invalid EOT character")
return memoryview(buffer)[:chunk_size]
def _store_chunk(self, message_id: bytes, chunk_index: int, buffer: memoryview, is_last: bool) -> None:
if message_id not in self.message_buffers:
self.message_buffers[message_id] = {}
try:
self.message_buffers[message_id][chunk_index] = MessageChunk(chunk_index, buffer.tobytes(), is_last)
# logging.debug(f"Stored chunk {chunk_index} of message {message_id.hex()} | size: {len(data)} bytes")
except Exception as e:
if message_id in self.message_buffers:
del self.message_buffers[message_id]
logging.error(f"Error storing chunk {chunk_index} for message {message_id.hex()}: {e}")
async def _process_complete_message(self, message_id: bytes) -> None:
chunks = sorted(self.message_buffers[message_id].values(), key=lambda x: x.index)
complete_message = b"".join(chunk.data for chunk in chunks)
del self.message_buffers[message_id]
data_type_prefix = complete_message[:4]
message_content = complete_message[4:]
if message_content.endswith(self.COMPRESSION_CHAR):
message_content = await asyncio.to_thread(self._decompress, message_content[: -len(self.COMPRESSION_CHAR)], self.compression)
if message_content is None:
return
await self.pending_messages_queue.put((data_type_prefix, memoryview(message_content)))
# logging.debug(f"Processed complete message {message_id.hex()} | total size: {len(complete_message)} bytes")
def _decompress(self, data: bytes, compression: str) -> Optional[bytes]:
if compression == "zlib":
return zlib.decompress(data)
elif compression == "bz2":
return bz2.decompress(data)
elif compression == "lzma":
return lzma.decompress(data)
elif compression == "lz4":
return lz4.frame.decompress(data)
else:
logging.error(f"Unsupported compression method: {compression}")
return None
async def process_message_queue(self) -> None:
while True:
try:
if self.pending_messages_queue is None:
logging.error("Pending messages queue is not initialized")
return
data_type_prefix, message = await self.pending_messages_queue.get()
await self._handle_message(data_type_prefix, message)
self.pending_messages_queue.task_done()
except Exception as e:
logging.error(f"Error processing message queue: {e}")
finally:
await asyncio.sleep(0)
async def _handle_message(self, data_type_prefix: bytes, message: bytes) -> None:
if data_type_prefix == self.DATA_TYPE_PREFIXES["pb"]:
# logging.debug("Received a protobuf message")
asyncio.create_task(self.cm.handle_incoming_message(message, self.addr), name=f"Connection {self.addr} message handler")
elif data_type_prefix == self.DATA_TYPE_PREFIXES["string"]:
logging.debug(f"Received string message: {message.decode('utf-8')}")
elif data_type_prefix == self.DATA_TYPE_PREFIXES["json"]:
logging.debug(f"Received JSON message: {json.loads(message.decode('utf-8'))}")
elif data_type_prefix == self.DATA_TYPE_PREFIXES["bytes"]:
logging.debug(f"Received bytes message of length: {len(message)}")
else:
logging.error(f"Unknown data type prefix: {data_type_prefix}")
| 15,670 | Python | .py | 316 | 38.920886 | 151 | 0.618393 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,015 | scikit.py | enriquetomasmb_nebula/nebula/core/training/scikit.py | import logging
import pickle
import traceback
from sklearn.metrics import accuracy_score
class Scikit:
def __init__(self, model, data, config=None, logger=None):
self.model = model
self.data = data
self.config = config
self.logger = logger
self.round = 0
self.epochs = 1
self.logger.log_data({"Round": self.round}, step=self.logger.global_step)
def set_model(self, model):
self.model = model
def get_round(self):
return self.round
def set_data(self, data):
self.data = data
def serialize_model(self, params=None):
if params is None:
params = self.model.get_params()
return pickle.dumps(params)
def deserialize_model(self, data):
try:
params = pickle.loads(data)
return params
except:
raise Exception("Error decoding parameters")
def set_model_parameters(self, params):
self.model.set_params(**params)
def get_model_parameters(self):
return self.model.get_params()
def set_epochs(self, epochs):
self.epochs = epochs
def fit(self):
try:
X_train, y_train = self.data.train_dataloader()
self.model.fit(X_train, y_train)
except Exception as e:
logging.error("Error with scikit-learn fit. {}".format(e))
logging.error(traceback.format_exc())
def interrupt_fit(self):
pass
def evaluate(self):
try:
X_test, y_test = self.data.test_dataloader()
y_pred = self.model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
logging.info(f"Accuracy: {accuracy}")
except Exception as e:
logging.error("Error with scikit-learn evaluate. {}".format(e))
logging.error(traceback.format_exc())
return None
def get_train_size(self):
return (
len(self.data.train_dataloader()),
len(self.data.test_dataloader()),
)
def finalize_round(self):
self.round += 1
if self.logger:
self.logger.log_data({"Round": self.round})
| 2,192 | Python | .py | 63 | 25.936508 | 81 | 0.600757 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,016 | lightning.py | enriquetomasmb_nebula/nebula/core/training/lightning.py | import copy
import gc
import logging
from collections import OrderedDict
import asyncio
import os
import pickle
import traceback
import hashlib
import io
import gzip
import torch
from lightning import Trainer
from lightning.pytorch.callbacks import ProgressBar, ModelSummary
from torch.nn import functional as F
from nebula.core.utils.deterministic import enable_deterministic
from lightning.pytorch.loggers import CSVLogger
from nebula.core.utils.nebulalogger_tensorboard import NebulaTensorBoardLogger
try:
from nebula.core.utils.nebulalogger import NebulaLogger
except:
pass
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class NebulaProgressBar(ProgressBar):
"""Nebula progress bar for training.
Logs the percentage of completion of the training process using logging.
"""
def __init__(self, log_every_n_steps=100):
super().__init__()
self.enable = True
self.log_every_n_steps = log_every_n_steps
def enable(self):
"""Enable progress bar logging."""
self.enable = True
def disable(self):
"""Disable the progress bar logging."""
self.enable = False
def on_train_epoch_start(self, trainer, pl_module):
"""Called when the training epoch starts."""
super().on_train_epoch_start(trainer, pl_module)
if self.enable:
logging_training.info(f"Starting Epoch {trainer.current_epoch}")
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
"""Called at the end of each training batch."""
super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx)
if self.enable:
if (batch_idx + 1) % self.log_every_n_steps == 0 or (batch_idx + 1) == self.total_train_batches:
# Calculate percentage complete for the current epoch
percent = ((batch_idx + 1) / self.total_train_batches) * 100 # +1 to count current batch
logging_training.info(f"Epoch {trainer.current_epoch} - {percent:.01f}% complete")
def on_train_epoch_end(self, trainer, pl_module):
"""Called at the end of the training epoch."""
super().on_train_epoch_end(trainer, pl_module)
if self.enable:
logging_training.info(f"Epoch {trainer.current_epoch} finished")
def on_validation_epoch_start(self, trainer, pl_module):
super().on_validation_epoch_start(trainer, pl_module)
if self.enable:
logging_training.info(f"Starting validation for Epoch {trainer.current_epoch}")
def on_validation_epoch_end(self, trainer, pl_module):
super().on_validation_epoch_end(trainer, pl_module)
if self.enable:
logging_training.info(f"Validation for Epoch {trainer.current_epoch} finished")
def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
super().on_test_batch_start(trainer, pl_module, batch, batch_idx, dataloader_idx)
if not self.has_dataloader_changed(dataloader_idx):
return
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
"""Called at the end of each test batch."""
super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if self.enable:
total_batches = self.total_test_batches_current_dataloader
if total_batches == 0:
logging_training.warning(f"Total test batches is 0 for dataloader {dataloader_idx}, cannot compute progress.")
return
if (batch_idx + 1) % self.log_every_n_steps == 0 or (batch_idx + 1) == total_batches:
percent = ((batch_idx + 1) / total_batches) * 100 # +1 to count the current batch
logging_training.info(f"Test Epoch {trainer.current_epoch}, Dataloader {dataloader_idx} - {percent:.01f}% complete")
def on_test_epoch_start(self, trainer, pl_module):
super().on_test_epoch_start(trainer, pl_module)
if self.enable:
logging_training.info(f"Starting testing for Epoch {trainer.current_epoch}")
def on_test_epoch_end(self, trainer, pl_module):
super().on_test_epoch_end(trainer, pl_module)
if self.enable:
logging_training.info(f"Testing for Epoch {trainer.current_epoch} finished")
class ParameterSerializeError(Exception):
"""Custom exception for errors setting model parameters."""
class ParameterDeserializeError(Exception):
"""Custom exception for errors setting model parameters."""
class ParameterSettingError(Exception):
"""Custom exception for errors setting model parameters."""
class Lightning:
DEFAULT_MODEL_WEIGHT = 1
BYPASS_MODEL_WEIGHT = 0
def __init__(self, model, data, config=None):
# self.model = torch.compile(model, mode="reduce-overhead")
self.model = model
self.data = data
self.config = config
self._trainer = None
self.epochs = 1
self.round = 0
self.experiment_name = self.config.participant["scenario_args"]["name"]
self.idx = self.config.participant["device_args"]["idx"]
self.log_dir = os.path.join(self.config.participant["tracking_args"]["log_dir"], self.experiment_name)
self._logger = None
self.create_logger()
enable_deterministic(self.config)
@property
def logger(self):
return self._logger
def get_round(self):
return self.round
def set_model(self, model):
self.model = model
def set_data(self, data):
self.data = data
def create_logger(self):
if self.config.participant["tracking_args"]["local_tracking"] == "csv":
nebulalogger = CSVLogger(f"{self.log_dir}", name="metrics", version=f"participant_{self.idx}")
elif self.config.participant["tracking_args"]["local_tracking"] == "basic":
logger_config = None
if self._logger is not None:
logger_config = self._logger.get_logger_config()
nebulalogger = NebulaTensorBoardLogger(self.config.participant["scenario_args"]["start_time"], f"{self.log_dir}", name="metrics", version=f"participant_{self.idx}", log_graph=False)
# Restore logger configuration
nebulalogger.set_logger_config(logger_config)
elif self.config.participant["tracking_args"]["local_tracking"] == "advanced":
nebulalogger = NebulaLogger(
config=self.config,
engine=self,
scenario_start_time=self.config.participant["scenario_args"]["start_time"],
repo=f"{self.config.participant['tracking_args']['log_dir']}",
experiment=self.experiment_name,
run_name=f"participant_{self.idx}",
train_metric_prefix="train_",
test_metric_prefix="test_",
val_metric_prefix="val_",
log_system_params=False,
)
# nebulalogger_aim = NebulaLogger(config=self.config, engine=self, scenario_start_time=self.config.participant["scenario_args"]["start_time"], repo=f"aim://nebula-frontend:8085",
# experiment=self.experiment_name, run_name=f"participant_{self.idx}",
# train_metric_prefix='train_', test_metric_prefix='test_', val_metric_prefix='val_', log_system_params=False)
self.config.participant["tracking_args"]["run_hash"] = nebulalogger.experiment.hash
else:
nebulalogger = None
self._logger = nebulalogger
def create_trainer(self):
# Create a new trainer and logger for each round
self.create_logger()
num_gpus = torch.cuda.device_count()
if self.config.participant["device_args"]["accelerator"] == "gpu" and num_gpus > 0:
gpu_index = self.config.participant["device_args"]["idx"] % num_gpus
logging_training.info("Creating trainer with accelerator GPU ({})".format(gpu_index))
self._trainer = Trainer(
callbacks=[ModelSummary(max_depth=1), NebulaProgressBar()],
max_epochs=self.epochs,
accelerator=self.config.participant["device_args"]["accelerator"],
devices=[gpu_index],
logger=self._logger,
enable_checkpointing=False,
enable_model_summary=False,
# deterministic=True
)
else:
logging_training.info("Creating trainer with accelerator CPU")
self._trainer = Trainer(
callbacks=[ModelSummary(max_depth=1), NebulaProgressBar()],
max_epochs=self.epochs,
accelerator=self.config.participant["device_args"]["accelerator"],
devices="auto",
logger=self._logger,
enable_checkpointing=False,
enable_model_summary=False,
# deterministic=True
)
logging_training.info(f"Trainer strategy: {self._trainer.strategy}")
def validate_neighbour_model(self, neighbour_model_param):
avg_loss = 0
running_loss = 0
bootstrap_dataloader = self.data.bootstrap_dataloader()
num_samples = 0
neighbour_model = copy.deepcopy(self.model)
neighbour_model.load_state_dict(neighbour_model_param)
# enable evaluation mode, prevent memory leaks.
# no need to switch back to training since model is not further used.
if torch.cuda.is_available():
neighbour_model = neighbour_model.to("cuda")
neighbour_model.eval()
# bootstrap_dataloader = bootstrap_dataloader.to('cuda')
with torch.no_grad():
for inputs, labels in bootstrap_dataloader:
if torch.cuda.is_available():
inputs = inputs.to("cuda")
labels = labels.to("cuda")
outputs = neighbour_model(inputs)
loss = F.cross_entropy(outputs, labels)
running_loss += loss.item()
num_samples += inputs.size(0)
avg_loss = running_loss / len(bootstrap_dataloader)
logging_training.info("Computed neighbor loss over {} data samples".format(num_samples))
return avg_loss
def get_hash_model(self):
"""
Returns:
str: SHA256 hash of model parameters
"""
return hashlib.sha256(self.serialize_model(self.model)).hexdigest()
def set_epochs(self, epochs):
self.epochs = epochs
def serialize_model(self, model):
# From https://pytorch.org/docs/stable/notes/serialization.html
try:
buffer = io.BytesIO()
with gzip.GzipFile(fileobj=buffer, mode="wb") as f:
torch.save(model, f, pickle_protocol=pickle.HIGHEST_PROTOCOL)
serialized_data = buffer.getvalue()
buffer.close()
del buffer
return serialized_data
except Exception as e:
raise ParameterSerializeError("Error serializing model") from e
def deserialize_model(self, data):
# From https://pytorch.org/docs/stable/notes/serialization.html
try:
buffer = io.BytesIO(data)
with gzip.GzipFile(fileobj=buffer, mode="rb") as f:
params_dict = torch.load(f)
buffer.close()
del buffer
return OrderedDict(params_dict)
except Exception as e:
raise ParameterDeserializeError("Error decoding parameters") from e
def set_model_parameters(self, params, initialize=False):
try:
self.model.load_state_dict(params)
except Exception as e:
raise ParameterSettingError("Error setting parameters") from e
def get_model_parameters(self, bytes=False, initialize=False):
if bytes:
return self.serialize_model(self.model.state_dict())
return self.model.state_dict()
async def train(self):
try:
self.create_trainer()
logging.info(f"{'='*10} [Training] Started (check training logs for progress) {'='*10}")
await asyncio.to_thread(self._train_sync)
logging.info(f"{'='*10} [Training] Finished (check training logs for progress) {'='*10}")
except Exception as e:
logging_training.error(f"Error training model: {e}")
logging_training.error(traceback.format_exc())
def _train_sync(self):
try:
self._trainer.fit(self.model, self.data)
except Exception as e:
logging_training.error(f"Error in _train_sync: {e}")
tb = traceback.format_exc()
logging_training.error(f"Traceback: {tb}")
# If "raise", the exception will be managed by the main thread
async def test(self):
try:
self.create_trainer()
logging.info(f"{'='*10} [Testing] Started (check training logs for progress) {'='*10}")
await asyncio.to_thread(self._test_sync)
logging.info(f"{'='*10} [Testing] Finished (check training logs for progress) {'='*10}")
except Exception as e:
logging_training.error(f"Error testing model: {e}")
logging_training.error(traceback.format_exc())
def _test_sync(self):
try:
self._trainer.test(self.model, self.data, verbose=True)
except Exception as e:
logging_training.error(f"Error in _test_sync: {e}")
tb = traceback.format_exc()
logging_training.error(f"Traceback: {tb}")
# If "raise", the exception will be managed by the main thread
def cleanup(self):
if self._trainer is not None:
self._trainer._teardown()
del self._trainer
if self.data is not None:
self.data.teardown()
gc.collect()
torch.cuda.empty_cache()
def get_model_weight(self):
weight = self.data.model_weight
if weight is None:
raise ValueError("Model weight not set. Please call setup('fit') before requesting model weight.")
return weight
def on_round_start(self):
self.data.setup()
self._logger.log_data({"Round": self.round})
# self.reporter.enqueue_data("Round", self.round)
def on_round_end(self):
self._logger.global_step = self._logger.global_step + self._logger.local_step
self._logger.local_step = 0
self.round += 1
self.model.on_round_end()
logging.info("Flushing memory cache at the end of round...")
self.cleanup()
def on_learning_cycle_end(self):
self._logger.log_data({"Round": self.round})
# self.reporter.enqueue_data("Round", self.round)
| 14,950 | Python | .py | 305 | 38.967213 | 193 | 0.631575 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,017 | siamese.py | enriquetomasmb_nebula/nebula/core/training/siamese.py | import logging
from collections import OrderedDict
import traceback
import hashlib
import io
import torch
from lightning import Trainer
from lightning.pytorch.callbacks import RichProgressBar, RichModelSummary
from lightning.pytorch.callbacks.progress.rich_progress import RichProgressBarTheme
from nebula.core.utils.deterministic import enable_deterministic
from torch.nn import functional as F
class Siamese:
def __init__(self, model, data, config=None, logger=None):
# self.model = torch.compile(model, mode="reduce-overhead")
self.model = model
self.data = data
self.config = config
self.logger = logger
self.__trainer = None
self.epochs = 1
logging.getLogger("lightning.pytorch").setLevel(logging.INFO)
self.round = 0
enable_deterministic(self.config)
self.logger.log_data({"Round": self.round}, step=self.logger.global_step)
@property
def logger(self):
return self._logger
def get_round(self):
return self.round
def set_model(self, model):
self.model = model
def set_data(self, data):
self.data = data
def create_trainer(self):
logging.info("[Trainer] Creating trainer with accelerator: {}".format(self.config.participant["device_args"]["accelerator"]))
progress_bar = RichProgressBar(
theme=RichProgressBarTheme(
description="green_yellow",
progress_bar="green1",
progress_bar_finished="green1",
progress_bar_pulse="#6206E0",
batch_progress="green_yellow",
time="grey82",
processing_speed="grey82",
metrics="grey82",
),
leave=True,
)
if self.config.participant["device_args"]["accelerator"] == "gpu":
# NEBULA uses 2 GPUs (max) to distribute the nodes.
if self.config.participant["device_args"]["devices"] > 1:
# If you have more than 2 GPUs, you should specify which ones to use.
gpu_id = ([1] if self.config.participant["device_args"]["idx"] % 2 == 0 else [2],)
else:
# If there is only one GPU, it will be used.
gpu_id = [1]
self.__trainer = Trainer(
callbacks=[RichModelSummary(max_depth=1), progress_bar],
max_epochs=self.epochs,
accelerator=self.config.participant["device_args"]["accelerator"],
devices=gpu_id,
logger=self.logger,
log_every_n_steps=50,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=True,
# deterministic=True
)
else:
# NEBULA uses only CPU to distribute the nodes
self.__trainer = Trainer(
callbacks=[RichModelSummary(max_depth=1), progress_bar],
max_epochs=self.epochs,
accelerator=self.config.participant["device_args"]["accelerator"],
devices="auto",
logger=self.logger,
log_every_n_steps=50,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=True,
# deterministic=True
)
def get_global_model_parameters(self):
return self.model.get_global_model_parameters()
def set_parameter_second_aggregation(self, params):
try:
logging.info(f"Setting parameters in second aggregation...")
self.model.load_state_dict(params)
except:
raise Exception("Error setting parameters")
def get_model_parameters(self, bytes=False):
if bytes:
return self.serialize_model(self.model.state_dict())
else:
return self.model.state_dict()
def get_hash_model(self):
"""
Returns:
str: SHA256 hash of model parameters
"""
return hashlib.sha256(self.serialize_model()).hexdigest()
def set_epochs(self, epochs):
self.epochs = epochs
####
# Model parameters serialization/deserialization
# From https://pytorch.org/docs/stable/notes/serialization.html
####
def serialize_model(self, model):
try:
buffer = io.BytesIO()
# with gzip.GzipFile(fileobj=buffer, mode='wb') as f:
# torch.save(params, f)
torch.save(model, buffer)
return buffer.getvalue()
except:
raise Exception("Error serializing model")
def deserialize_model(self, data):
try:
buffer = io.BytesIO(data)
# with gzip.GzipFile(fileobj=buffer, mode='rb') as f:
# params_dict = torch.load(f, map_location='cpu')
params_dict = torch.load(buffer, map_location="cpu")
return OrderedDict(params_dict)
except:
raise Exception("Error decoding parameters")
def set_model_parameters(self, params, initialize=False):
try:
if initialize:
self.model.load_state_dict(params)
self.model.global_load_state_dict(params)
self.model.historical_load_state_dict(params)
else:
# First aggregation
self.model.global_load_state_dict(params)
except:
raise Exception("Error setting parameters")
def train(self):
try:
self.create_trainer()
# torch.autograd.set_detect_anomaly(True)
# TODO: It is necessary to train only the local model, save the history of the previous model and then load it, the global model is the aggregation of all the models.
self.__trainer.fit(self.model, self.data)
# Save local model as historical model (previous round)
# It will be compared the next round during training local model (constrantive loss)
# When aggregation in global model (first) and aggregation with similarities and weights (second), the historical model keeps inmutable
logging.info(f"Saving historical model...")
self.model.save_historical_model()
except Exception as e:
logging.error(f"Error training model: {e}")
logging.error(traceback.format_exc())
def test(self):
try:
self.create_trainer()
self.__trainer.test(self.model, self.data, verbose=True)
except Exception as e:
logging.error(f"Error testing model: {e}")
logging.error(traceback.format_exc())
def get_model_weight(self):
return (
len(self.data.train_dataloader().dataset),
len(self.data.test_dataloader().dataset),
)
def finalize_round(self):
self.logger.global_step = self.logger.global_step + self.logger.local_step
self.logger.local_step = 0
self.round += 1
self.logger.log_data({"Round": self.round}, step=self.logger.global_step)
pass
| 7,163 | Python | .py | 168 | 31.702381 | 178 | 0.604531 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,018 | nebula_pb2.py | enriquetomasmb_nebula/nebula/core/pb/nebula_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nebula.proto
# Protobuf Python Version: 4.25.3
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cnebula.proto\x12\x06nebula\"\xe4\x02\n\x07Wrapper\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x35\n\x11\x64iscovery_message\x18\x02 \x01(\x0b\x32\x18.nebula.DiscoveryMessageH\x00\x12\x31\n\x0f\x63ontrol_message\x18\x03 \x01(\x0b\x32\x16.nebula.ControlMessageH\x00\x12\x37\n\x12\x66\x65\x64\x65ration_message\x18\x04 \x01(\x0b\x32\x19.nebula.FederationMessageH\x00\x12-\n\rmodel_message\x18\x05 \x01(\x0b\x32\x14.nebula.ModelMessageH\x00\x12\x37\n\x12\x63onnection_message\x18\x06 \x01(\x0b\x32\x19.nebula.ConnectionMessageH\x00\x12\x33\n\x10response_message\x18\x07 \x01(\x0b\x32\x17.nebula.ResponseMessageH\x00\x42\t\n\x07message\"\x9e\x01\n\x10\x44iscoveryMessage\x12/\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1f.nebula.DiscoveryMessage.Action\x12\x10\n\x08latitude\x18\x02 \x01(\x02\x12\x11\n\tlongitude\x18\x03 \x01(\x02\"4\n\x06\x41\x63tion\x12\x0c\n\x08\x44ISCOVER\x10\x00\x12\x0c\n\x08REGISTER\x10\x01\x12\x0e\n\nDEREGISTER\x10\x02\"\x9a\x01\n\x0e\x43ontrolMessage\x12-\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1d.nebula.ControlMessage.Action\x12\x0b\n\x03log\x18\x02 \x01(\t\"L\n\x06\x41\x63tion\x12\t\n\x05\x41LIVE\x10\x00\x12\x0c\n\x08OVERHEAD\x10\x01\x12\x0c\n\x08MOBILITY\x10\x02\x12\x0c\n\x08RECOVERY\x10\x03\x12\r\n\tWEAK_LINK\x10\x04\"\xcd\x01\n\x11\x46\x65\x64\x65rationMessage\x12\x30\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32 .nebula.FederationMessage.Action\x12\x11\n\targuments\x18\x02 \x03(\t\x12\r\n\x05round\x18\x03 \x01(\x05\"d\n\x06\x41\x63tion\x12\x14\n\x10\x46\x45\x44\x45RATION_START\x10\x00\x12\x0e\n\nREPUTATION\x10\x01\x12\x1e\n\x1a\x46\x45\x44\x45RATION_MODELS_INCLUDED\x10\x02\x12\x14\n\x10\x46\x45\x44\x45RATION_READY\x10\x03\"A\n\x0cModelMessage\x12\x12\n\nparameters\x18\x01 \x01(\x0c\x12\x0e\n\x06weight\x18\x02 \x01(\x03\x12\r\n\x05round\x18\x03 \x01(\x05\"l\n\x11\x43onnectionMessage\x12\x30\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32 .nebula.ConnectionMessage.Action\"%\n\x06\x41\x63tion\x12\x0b\n\x07\x43ONNECT\x10\x00\x12\x0e\n\nDISCONNECT\x10\x01\"#\n\x0fResponseMessage\x12\x10\n\x08response\x18\x01 \x01(\tb\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'nebula_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_globals['_WRAPPER']._serialized_start=25
_globals['_WRAPPER']._serialized_end=381
_globals['_DISCOVERYMESSAGE']._serialized_start=384
_globals['_DISCOVERYMESSAGE']._serialized_end=542
_globals['_DISCOVERYMESSAGE_ACTION']._serialized_start=490
_globals['_DISCOVERYMESSAGE_ACTION']._serialized_end=542
_globals['_CONTROLMESSAGE']._serialized_start=545
_globals['_CONTROLMESSAGE']._serialized_end=699
_globals['_CONTROLMESSAGE_ACTION']._serialized_start=623
_globals['_CONTROLMESSAGE_ACTION']._serialized_end=699
_globals['_FEDERATIONMESSAGE']._serialized_start=702
_globals['_FEDERATIONMESSAGE']._serialized_end=907
_globals['_FEDERATIONMESSAGE_ACTION']._serialized_start=807
_globals['_FEDERATIONMESSAGE_ACTION']._serialized_end=907
_globals['_MODELMESSAGE']._serialized_start=909
_globals['_MODELMESSAGE']._serialized_end=974
_globals['_CONNECTIONMESSAGE']._serialized_start=976
_globals['_CONNECTIONMESSAGE']._serialized_end=1084
_globals['_CONNECTIONMESSAGE_ACTION']._serialized_start=1047
_globals['_CONNECTIONMESSAGE_ACTION']._serialized_end=1084
_globals['_RESPONSEMESSAGE']._serialized_start=1086
_globals['_RESPONSEMESSAGE']._serialized_end=1121
# @@protoc_insertion_point(module_scope)
| 4,089 | Python | .py | 40 | 99.925 | 2,121 | 0.779619 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,019 | conf.py | enriquetomasmb_nebula/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
import subprocess
subprocess.call(['sphinx-apidoc', '-f', '-o', '.', '../nebula']) # The same that 'make apidoc'
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'NEBULA'
copyright = '2024, Enrique Tomás Martínez Beltrán'
author = 'Enrique Tomás Martínez Beltrán'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.napoleon',
# # 'sphinx.ext.viewcode',
# 'autoapi.extension'
# ]
extensions = [
'sphinx.ext.napoleon', # Support for NumPy and Google style docstrings
'autoapi.extension', # Automatically generate API documentation
'sphinx.ext.intersphinx', # Link to other project's documentation
'sphinx.ext.todo', # Support for todo items
'sphinx.ext.autodoc', # Support for automatic documentation
'sphinx.ext.autosummary', # Support for automatic summaries
'sphinx.ext.doctest', # Support for doctests
]
autodoc_typehints = 'description'
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'app', 'setup.py', 'docs']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_book_theme'
html_static_path = ['_static']
html_css_files = ['custom.css']
html_theme_options = {
"repository_url": "https://github.com/enriquetomasmb/nebula",
"use_repository_button": True,
}
html_logo = "_static/nebula-logo.jpg"
html_title = "NEBULA Documentation"
# -- Options for autoapi extension -------------------------------------------
autoapi_root = 'api'
autoapi_template_dir = "_templates/autoapi"
autoapi_dirs = ['../nebula']
autoapi_type = "python"
autoapi_options = [
"members",
"undoc-members",
"show-inheritance",
"show-module-summary",
"imported-members",
]
# autoapi_keep_files = True
| 2,710 | Python | .py | 66 | 38.818182 | 95 | 0.674179 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,020 | main.py | enriquetomasmb_nebula/app/main.py | import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) # Parent directory where is the NEBULA module
import nebula
from nebula.controller import Controller
from nebula.scenarios import ScenarioManagement
argparser = argparse.ArgumentParser(description="Controller of NEBULA platform", add_help=False)
argparser.add_argument(
"--grafanaport",
dest="grafanaport",
default=6040,
help="Grafana port (default: 6040)",
)
argparser.add_argument(
"--lokiport",
dest="lokiport",
default=6010,
help="Loki port (default: 6010)",
)
argparser.add_argument(
"--wafport",
dest="wafport",
default=6050,
help="WAF port (default: 6050)",
)
argparser.add_argument(
"-wp",
"--webport",
dest="webport",
default=6060,
help="Frontend port (default: 6060)",
)
argparser.add_argument(
"-t",
"--test",
dest="test",
action="store_true",
default=False,
help="Run tests"
)
argparser.add_argument(
"-st",
"--stop",
dest="stop",
nargs="?",
const="all", # If no argument is given, stop all
default=None,
help="Stop NEBULA platform or nodes only (use '--stop nodes' to stop only the nodes)",
)
argparser.add_argument(
"-sp",
"--statsport",
dest="statsport",
default=8080,
help="Statistics port (default: 8080)",
)
argparser.add_argument("-s", "--simulation", action="store_false", dest="simulation", help="Run simulation")
argparser.add_argument(
"-c",
"--config",
dest="config",
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), "config"),
help="Config directory path",
)
argparser.add_argument(
"-l",
"--logs",
dest="logs",
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs"),
help="Logs directory path",
)
argparser.add_argument(
"-ce",
"--certs",
dest="certs",
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), "certs"),
help="Certs directory path",
)
argparser.add_argument(
"-e",
"--env",
dest="env",
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), ".env"),
help=".env file path",
)
argparser.add_argument(
"-p",
"--production",
dest="production",
action="store_true",
default=False,
help="Production mode",
)
argparser.add_argument(
"-ad",
"--advanced",
dest="advanced_analytics",
action="store_true",
default=False,
help="Advanced analytics",
)
argparser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s " + nebula.__version__,
help="Show version",
)
argparser.add_argument(
"-a",
"--about",
action="version",
version="Created by Enrique Tomás Martínez Beltrán",
help="Show author",
)
argparser.add_argument("-h", "--help", action="help", default=argparse.SUPPRESS, help="Show help")
args = argparser.parse_args()
"""
Code for deploying the controller
"""
if __name__ == "__main__":
if args.stop == "all":
Controller.stop()
elif args.stop == "nodes":
ScenarioManagement.stop_nodes()
Controller(args).start()
| 3,170 | Python | .py | 127 | 21.188976 | 109 | 0.652906 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,021 | attacks.py | enriquetomasmb_nebula/nebula/addons/attacks/attacks.py | from typing import Any
import torch
import numpy as np
from torchmetrics.functional import pairwise_cosine_similarity
from copy import deepcopy
import logging
# To take into account:
# - Malicious nodes do not train on their own data
# - Malicious nodes aggregate the weights of the other nodes, but not their own
# - The received weights may be the node own weights (aggregated of neighbors), or
# if the attack is performed specifically for one of the neighbors, it can take
# its weights only (should be more effective if they are different).
def create_attack(attack_name):
"""
Function to create an attack object from its name.
"""
if attack_name == "GLLNeuronInversionAttack":
return GLLNeuronInversionAttack()
elif attack_name == "NoiseInjectionAttack":
return NoiseInjectionAttack()
elif attack_name == "SwappingWeightsAttack":
return SwappingWeightsAttack()
elif attack_name == "DelayerAttack":
return DelayerAttack()
else:
return None
class Attack:
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.attack(*args, **kwds)
def attack(self, received_weights):
"""
Function to perform the attack on the received weights. It should return the
attacked weights.
"""
raise NotImplementedError
class GLLNeuronInversionAttack(Attack):
"""
Function to perform neuron inversion attack on the received weights.
"""
def __init__(self, strength=5.0, perc=1.0):
super().__init__()
self.strength = strength
self.perc = perc
def attack(self, received_weights):
logging.info("[GLLNeuronInversionAttack] Performing neuron inversion attack")
lkeys = list(received_weights.keys())
logging.info(f"Layer inverted: {lkeys[-2]}")
received_weights[lkeys[-2]].data = torch.rand(received_weights[lkeys[-2]].shape) * 10000
return received_weights
class NoiseInjectionAttack(Attack):
"""
Function to perform noise injection attack on the received weights.
"""
def __init__(self, strength=10000, perc=1.0):
super().__init__()
self.strength = strength
self.perc = perc
def attack(self, received_weights):
logging.info("[NoiseInjectionAttack] Performing noise injection attack")
lkeys = list(received_weights.keys())
for k in lkeys:
logging.info(f"Layer noised: {k}")
received_weights[k].data += torch.randn(received_weights[k].shape) * self.strength
return received_weights
class SwappingWeightsAttack(Attack):
"""
Function to perform swapping weights attack on the received weights. Note that this
attack performance is not consistent due to its stochasticity.
Warning: depending on the layer the code may not work (due to reshaping in between),
or it may be slow (scales quadratically with the layer size).
Do not apply to last layer, as it would make the attack detectable (high loss
on malicious node).
"""
def __init__(self, layer_idx=0):
super().__init__()
self.layer_idx = layer_idx
def attack(self, received_weights):
logging.info("[SwappingWeightsAttack] Performing swapping weights attack")
lkeys = list(received_weights.keys())
wm = received_weights[lkeys[self.layer_idx]]
# Compute similarity matrix
sm = torch.zeros((wm.shape[0], wm.shape[0]))
for j in range(wm.shape[0]):
sm[j] = pairwise_cosine_similarity(wm[j].reshape(1, -1), wm.reshape(wm.shape[0], -1))
# Check rows/cols where greedy approach is optimal
nsort = np.full(sm.shape[0], -1)
rows = []
for j in range(sm.shape[0]):
k = torch.argmin(sm[j])
if torch.argmin(sm[:, k]) == j:
nsort[j] = k
rows.append(j)
not_rows = np.array([i for i in range(sm.shape[0]) if i not in rows])
# Ensure the rest of the rows are fully permuted (not optimal, but good enough)
nrs = deepcopy(not_rows)
nrs = np.random.permutation(nrs)
while np.any(nrs == not_rows):
nrs = np.random.permutation(nrs)
nsort[not_rows] = nrs
nsort = torch.tensor(nsort)
# Apply permutation to weights
received_weights[lkeys[self.layer_idx]] = received_weights[lkeys[self.layer_idx]][nsort]
received_weights[lkeys[self.layer_idx + 1]] = received_weights[lkeys[self.layer_idx + 1]][nsort]
if self.layer_idx + 2 < len(lkeys):
received_weights[lkeys[self.layer_idx + 2]] = received_weights[lkeys[self.layer_idx + 2]][:, nsort]
return received_weights
class DelayerAttack(Attack):
"""
Function to perform delayer attack on the received weights. It delays the
weights for an indefinite number of rounds.
"""
def __init__(self):
super().__init__()
self.weights = None
def attack(self, received_weights):
logging.info("[DelayerAttack] Performing delayer attack")
if self.weights is None:
self.weights = deepcopy(received_weights)
return self.weights
| 5,210 | Python | .tac | 119 | 36.554622 | 111 | 0.662584 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,022 | attacks.json | enriquetomasmb_nebula/nebula/tests/attacks.json | [
{
"scenario_title": "FedAvg_Fully_nodes5_MNIST_No Attack",
"scenario_description": "",
"simulation": true,
"federation": "DFL",
"topology": "Custom",
"nodes": {
"0": {
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true
},
"1": {
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"2": {
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"3": {
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"4": {
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
}
},
"nodes_graph": [
{
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true,
"neighbors": [
1,
2,
3,
4
],
"links": [],
"index": 0,
"x": 5.631418365613239,
"y": 22.694610891010647,
"z": -28.360725118467528,
"vx": -1.0328046273358557e-8,
"vy": 1.5929041989459476e-8,
"vz": 3.908696708641432e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "9937f075-faa3-4676-8b54-756622fd8981",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "52abc64e-91e6-4086-b939-4b34847cb7f0",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "a4edcb16-67e9-4d55-8380-bd22c205ad96",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
5.631418365613239,
22.694610891010647,
-28.360725118467528,
1
],
"up": [
0,
1,
0
],
"geometry": "9937f075-faa3-4676-8b54-756622fd8981",
"material": "52abc64e-91e6-4086-b939-4b34847cb7f0"
}
}
},
{
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
2,
3,
4
],
"links": [],
"index": 1,
"x": -20.140462201825898,
"y": -21.13417074227967,
"z": -21.551885000578707,
"vx": 1.8458080528715605e-8,
"vy": -3.116710371387466e-8,
"vz": 2.2704912480424454e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "9f3edff2-2806-4f30-8948-4287450b6915",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "5d13f03c-c2bf-40c8-8eda-6eb33a0da6e6",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-20.140462201825898,
-21.13417074227967,
-21.551885000578707,
1
],
"up": [
0,
1,
0
],
"geometry": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"material": "9f3edff2-2806-4f30-8948-4287450b6915"
}
}
},
{
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
3,
4
],
"links": [],
"index": 2,
"x": -28.71703280728614,
"y": 4.020987712595549,
"z": 22.705010362753697,
"vx": 1.0694677449996208e-8,
"vy": 2.9084972037089048e-8,
"vz": -2.015565459223841e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "7c040637-10f0-408b-8975-0338903659fb",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "44f3c661-2ccd-440c-827b-ffcc489b5c64",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-28.71703280728614,
4.020987712595549,
22.705010362753697,
1
],
"up": [
0,
1,
0
],
"geometry": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"material": "7c040637-10f0-408b-8975-0338903659fb"
}
}
},
{
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
4
],
"links": [],
"index": 3,
"x": 19.679145143125147,
"y": 22.16282780436268,
"z": 21.030079148818903,
"vx": -3.852416481366844e-9,
"vy": -4.9742524279957165e-8,
"vz": 6.992962533560291e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "75c95712-2b2f-44e0-84b1-2130d887113c",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "f54d2ff7-d369-4e22-b285-ca6cdeb23421",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "142323c7-c3b1-4d07-860b-a623114b67ad",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
19.679145143125147,
22.16282780436268,
21.030079148818903,
1
],
"up": [
0,
1,
0
],
"geometry": "75c95712-2b2f-44e0-84b1-2130d887113c",
"material": "f54d2ff7-d369-4e22-b285-ca6cdeb23421"
}
}
},
{
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
3
],
"links": [],
"index": 4,
"x": 23.546931500373656,
"y": -27.744255665689202,
"z": 6.177520607473647,
"vx": -1.4972295223986403e-8,
"vy": 3.58956139672832e-8,
"vz": -1.3450917130387546e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "13def5d0-adf5-4e3b-bb57-30362a221818",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "64c9ea9c-1c99-420a-b048-81aead906df3",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
23.546931500373656,
-27.744255665689202,
6.177520607473647,
1
],
"up": [
0,
1,
0
],
"geometry": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"material": "13def5d0-adf5-4e3b-bb57-30362a221818"
}
}
}
],
"n_nodes": 5,
"matrix": [
[
0,
1,
1,
1,
1
],
[
1,
0,
1,
1,
1
],
[
1,
1,
0,
1,
1
],
[
1,
1,
1,
0,
1
],
[
1,
1,
1,
1,
0
]
],
"dataset": "MNIST",
"iid": false,
"partition_selection": "dirichlet",
"partition_parameter": "0.5",
"model": "MLP",
"agg_algorithm": "FedAvg",
"rounds": "10",
"logginglevel": false,
"accelerator": "gpu",
"network_subnet": "192.168.50.0/24",
"network_gateway": "192.168.50.1",
"epochs": "1",
"attacks": "No Attack",
"poisoned_node_percent": "0",
"poisoned_sample_percent": "0",
"poisoned_noise_percent": "0",
"with_reputation": false,
"is_dynamic_topology": false,
"is_dynamic_aggregation": false,
"target_aggregation": false,
"random_geo": true,
"latitude": 38.023522,
"longitude": -1.174389,
"mobility": false,
"mobility_type": "both",
"radius_federation": "1000",
"scheme_mobility": "random",
"round_frequency": "1",
"mobile_participants_percent": "100",
"additional_participants": [],
"schema_additional_participants": "random"
},
{
"scenario_title": "FedAvg_Fully_nodes5_MNIST_GLLNeuronInversionAttack",
"scenario_description": "",
"simulation": true,
"federation": "DFL",
"topology": "Custom",
"nodes": {
"0": {
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true
},
"1": {
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"2": {
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"3": {
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"4": {
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
}
},
"nodes_graph": [
{
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true,
"neighbors": [
1,
2,
3,
4
],
"links": [],
"index": 0,
"x": 5.631418365613239,
"y": 22.694610891010647,
"z": -28.360725118467528,
"vx": -1.0328046273358557e-8,
"vy": 1.5929041989459476e-8,
"vz": 3.908696708641432e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "9937f075-faa3-4676-8b54-756622fd8981",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "52abc64e-91e6-4086-b939-4b34847cb7f0",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "a4edcb16-67e9-4d55-8380-bd22c205ad96",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
5.631418365613239,
22.694610891010647,
-28.360725118467528,
1
],
"up": [
0,
1,
0
],
"geometry": "9937f075-faa3-4676-8b54-756622fd8981",
"material": "52abc64e-91e6-4086-b939-4b34847cb7f0"
}
}
},
{
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
2,
3,
4
],
"links": [],
"index": 1,
"x": -20.140462201825898,
"y": -21.13417074227967,
"z": -21.551885000578707,
"vx": 1.8458080528715605e-8,
"vy": -3.116710371387466e-8,
"vz": 2.2704912480424454e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "9f3edff2-2806-4f30-8948-4287450b6915",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "5d13f03c-c2bf-40c8-8eda-6eb33a0da6e6",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-20.140462201825898,
-21.13417074227967,
-21.551885000578707,
1
],
"up": [
0,
1,
0
],
"geometry": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"material": "9f3edff2-2806-4f30-8948-4287450b6915"
}
}
},
{
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
3,
4
],
"links": [],
"index": 2,
"x": -28.71703280728614,
"y": 4.020987712595549,
"z": 22.705010362753697,
"vx": 1.0694677449996208e-8,
"vy": 2.9084972037089048e-8,
"vz": -2.015565459223841e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "7c040637-10f0-408b-8975-0338903659fb",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "44f3c661-2ccd-440c-827b-ffcc489b5c64",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-28.71703280728614,
4.020987712595549,
22.705010362753697,
1
],
"up": [
0,
1,
0
],
"geometry": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"material": "7c040637-10f0-408b-8975-0338903659fb"
}
}
},
{
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
4
],
"links": [],
"index": 3,
"x": 19.679145143125147,
"y": 22.16282780436268,
"z": 21.030079148818903,
"vx": -3.852416481366844e-9,
"vy": -4.9742524279957165e-8,
"vz": 6.992962533560291e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "75c95712-2b2f-44e0-84b1-2130d887113c",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "f54d2ff7-d369-4e22-b285-ca6cdeb23421",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "142323c7-c3b1-4d07-860b-a623114b67ad",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
19.679145143125147,
22.16282780436268,
21.030079148818903,
1
],
"up": [
0,
1,
0
],
"geometry": "75c95712-2b2f-44e0-84b1-2130d887113c",
"material": "f54d2ff7-d369-4e22-b285-ca6cdeb23421"
}
}
},
{
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
3
],
"links": [],
"index": 4,
"x": 23.546931500373656,
"y": -27.744255665689202,
"z": 6.177520607473647,
"vx": -1.4972295223986403e-8,
"vy": 3.58956139672832e-8,
"vz": -1.3450917130387546e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "13def5d0-adf5-4e3b-bb57-30362a221818",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "64c9ea9c-1c99-420a-b048-81aead906df3",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
23.546931500373656,
-27.744255665689202,
6.177520607473647,
1
],
"up": [
0,
1,
0
],
"geometry": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"material": "13def5d0-adf5-4e3b-bb57-30362a221818"
}
}
}
],
"n_nodes": 5,
"matrix": [
[
0,
1,
1,
1,
1
],
[
1,
0,
1,
1,
1
],
[
1,
1,
0,
1,
1
],
[
1,
1,
1,
0,
1
],
[
1,
1,
1,
1,
0
]
],
"dataset": "MNIST",
"iid": false,
"partition_selection": "dirichlet",
"partition_parameter": "0.5",
"model": "MLP",
"agg_algorithm": "FedAvg",
"rounds": "10",
"logginglevel": true,
"accelerator": "gpu",
"network_subnet": "192.168.50.0/24",
"network_gateway": "192.168.50.1",
"epochs": "1",
"attacks": "GLLNeuronInversionAttack",
"poisoned_node_percent": "0",
"poisoned_sample_percent": "0",
"poisoned_noise_percent": "0",
"with_reputation": false,
"is_dynamic_topology": false,
"is_dynamic_aggregation": false,
"target_aggregation": false,
"random_geo": true,
"latitude": 38.023522,
"longitude": -1.174389,
"mobility": false,
"mobility_type": "both",
"radius_federation": "1000",
"scheme_mobility": "random",
"round_frequency": "1",
"mobile_participants_percent": "100",
"additional_participants": [],
"schema_additional_participants": "random"
},
{
"scenario_title": "FedAvg_Fully_nodes5_MNIST_NoiseInjectionAttack",
"scenario_description": "",
"simulation": true,
"federation": "DFL",
"topology": "Custom",
"nodes": {
"0": {
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true
},
"1": {
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"2": {
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"3": {
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"4": {
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
}
},
"nodes_graph": [
{
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true,
"neighbors": [
1,
2,
3,
4
],
"links": [],
"index": 0,
"x": 5.631418365613239,
"y": 22.694610891010647,
"z": -28.360725118467528,
"vx": -1.0328046273358557e-8,
"vy": 1.5929041989459476e-8,
"vz": 3.908696708641432e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "9937f075-faa3-4676-8b54-756622fd8981",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "52abc64e-91e6-4086-b939-4b34847cb7f0",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "a4edcb16-67e9-4d55-8380-bd22c205ad96",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
5.631418365613239,
22.694610891010647,
-28.360725118467528,
1
],
"up": [
0,
1,
0
],
"geometry": "9937f075-faa3-4676-8b54-756622fd8981",
"material": "52abc64e-91e6-4086-b939-4b34847cb7f0"
}
}
},
{
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
2,
3,
4
],
"links": [],
"index": 1,
"x": -20.140462201825898,
"y": -21.13417074227967,
"z": -21.551885000578707,
"vx": 1.8458080528715605e-8,
"vy": -3.116710371387466e-8,
"vz": 2.2704912480424454e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "9f3edff2-2806-4f30-8948-4287450b6915",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "5d13f03c-c2bf-40c8-8eda-6eb33a0da6e6",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-20.140462201825898,
-21.13417074227967,
-21.551885000578707,
1
],
"up": [
0,
1,
0
],
"geometry": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"material": "9f3edff2-2806-4f30-8948-4287450b6915"
}
}
},
{
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
3,
4
],
"links": [],
"index": 2,
"x": -28.71703280728614,
"y": 4.020987712595549,
"z": 22.705010362753697,
"vx": 1.0694677449996208e-8,
"vy": 2.9084972037089048e-8,
"vz": -2.015565459223841e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "7c040637-10f0-408b-8975-0338903659fb",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "44f3c661-2ccd-440c-827b-ffcc489b5c64",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-28.71703280728614,
4.020987712595549,
22.705010362753697,
1
],
"up": [
0,
1,
0
],
"geometry": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"material": "7c040637-10f0-408b-8975-0338903659fb"
}
}
},
{
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
4
],
"links": [],
"index": 3,
"x": 19.679145143125147,
"y": 22.16282780436268,
"z": 21.030079148818903,
"vx": -3.852416481366844e-9,
"vy": -4.9742524279957165e-8,
"vz": 6.992962533560291e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "75c95712-2b2f-44e0-84b1-2130d887113c",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "f54d2ff7-d369-4e22-b285-ca6cdeb23421",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "142323c7-c3b1-4d07-860b-a623114b67ad",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
19.679145143125147,
22.16282780436268,
21.030079148818903,
1
],
"up": [
0,
1,
0
],
"geometry": "75c95712-2b2f-44e0-84b1-2130d887113c",
"material": "f54d2ff7-d369-4e22-b285-ca6cdeb23421"
}
}
},
{
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
3
],
"links": [],
"index": 4,
"x": 23.546931500373656,
"y": -27.744255665689202,
"z": 6.177520607473647,
"vx": -1.4972295223986403e-8,
"vy": 3.58956139672832e-8,
"vz": -1.3450917130387546e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "13def5d0-adf5-4e3b-bb57-30362a221818",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "64c9ea9c-1c99-420a-b048-81aead906df3",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
23.546931500373656,
-27.744255665689202,
6.177520607473647,
1
],
"up": [
0,
1,
0
],
"geometry": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"material": "13def5d0-adf5-4e3b-bb57-30362a221818"
}
}
}
],
"n_nodes": 5,
"matrix": [
[
0,
1,
1,
1,
1
],
[
1,
0,
1,
1,
1
],
[
1,
1,
0,
1,
1
],
[
1,
1,
1,
0,
1
],
[
1,
1,
1,
1,
0
]
],
"dataset": "MNIST",
"iid": false,
"partition_selection": "dirichlet",
"partition_parameter": "0.5",
"model": "MLP",
"agg_algorithm": "FedAvg",
"rounds": "10",
"logginglevel": true,
"accelerator": "gpu",
"network_subnet": "192.168.50.0/24",
"network_gateway": "192.168.50.1",
"epochs": "1",
"attacks": "NoiseInjectionAttack",
"poisoned_node_percent": "0",
"poisoned_sample_percent": "0",
"poisoned_noise_percent": "0",
"with_reputation": false,
"is_dynamic_topology": false,
"is_dynamic_aggregation": false,
"target_aggregation": false,
"random_geo": true,
"latitude": 38.023522,
"longitude": -1.174389,
"mobility": false,
"mobility_type": "both",
"radius_federation": "1000",
"scheme_mobility": "random",
"round_frequency": "1",
"mobile_participants_percent": "100",
"additional_participants": [],
"schema_additional_participants": "random"
},
{
"scenario_title": "FedAvg_Fully_nodes5_MNIST_SwappingWeightsAttack",
"scenario_description": "",
"simulation": true,
"federation": "DFL",
"topology": "Custom",
"nodes": {
"0": {
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true
},
"1": {
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"2": {
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"3": {
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"4": {
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
}
},
"nodes_graph": [
{
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true,
"neighbors": [
1,
2,
3,
4
],
"links": [],
"index": 0,
"x": 5.631418365613239,
"y": 22.694610891010647,
"z": -28.360725118467528,
"vx": -1.0328046273358557e-8,
"vy": 1.5929041989459476e-8,
"vz": 3.908696708641432e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "9937f075-faa3-4676-8b54-756622fd8981",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "52abc64e-91e6-4086-b939-4b34847cb7f0",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "a4edcb16-67e9-4d55-8380-bd22c205ad96",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
5.631418365613239,
22.694610891010647,
-28.360725118467528,
1
],
"up": [
0,
1,
0
],
"geometry": "9937f075-faa3-4676-8b54-756622fd8981",
"material": "52abc64e-91e6-4086-b939-4b34847cb7f0"
}
}
},
{
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
2,
3,
4
],
"links": [],
"index": 1,
"x": -20.140462201825898,
"y": -21.13417074227967,
"z": -21.551885000578707,
"vx": 1.8458080528715605e-8,
"vy": -3.116710371387466e-8,
"vz": 2.2704912480424454e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "9f3edff2-2806-4f30-8948-4287450b6915",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "5d13f03c-c2bf-40c8-8eda-6eb33a0da6e6",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-20.140462201825898,
-21.13417074227967,
-21.551885000578707,
1
],
"up": [
0,
1,
0
],
"geometry": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"material": "9f3edff2-2806-4f30-8948-4287450b6915"
}
}
},
{
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
3,
4
],
"links": [],
"index": 2,
"x": -28.71703280728614,
"y": 4.020987712595549,
"z": 22.705010362753697,
"vx": 1.0694677449996208e-8,
"vy": 2.9084972037089048e-8,
"vz": -2.015565459223841e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "7c040637-10f0-408b-8975-0338903659fb",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "44f3c661-2ccd-440c-827b-ffcc489b5c64",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-28.71703280728614,
4.020987712595549,
22.705010362753697,
1
],
"up": [
0,
1,
0
],
"geometry": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"material": "7c040637-10f0-408b-8975-0338903659fb"
}
}
},
{
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
4
],
"links": [],
"index": 3,
"x": 19.679145143125147,
"y": 22.16282780436268,
"z": 21.030079148818903,
"vx": -3.852416481366844e-9,
"vy": -4.9742524279957165e-8,
"vz": 6.992962533560291e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "75c95712-2b2f-44e0-84b1-2130d887113c",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "f54d2ff7-d369-4e22-b285-ca6cdeb23421",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "142323c7-c3b1-4d07-860b-a623114b67ad",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
19.679145143125147,
22.16282780436268,
21.030079148818903,
1
],
"up": [
0,
1,
0
],
"geometry": "75c95712-2b2f-44e0-84b1-2130d887113c",
"material": "f54d2ff7-d369-4e22-b285-ca6cdeb23421"
}
}
},
{
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
3
],
"links": [],
"index": 4,
"x": 23.546931500373656,
"y": -27.744255665689202,
"z": 6.177520607473647,
"vx": -1.4972295223986403e-8,
"vy": 3.58956139672832e-8,
"vz": -1.3450917130387546e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "13def5d0-adf5-4e3b-bb57-30362a221818",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "64c9ea9c-1c99-420a-b048-81aead906df3",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
23.546931500373656,
-27.744255665689202,
6.177520607473647,
1
],
"up": [
0,
1,
0
],
"geometry": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"material": "13def5d0-adf5-4e3b-bb57-30362a221818"
}
}
}
],
"n_nodes": 5,
"matrix": [
[
0,
1,
1,
1,
1
],
[
1,
0,
1,
1,
1
],
[
1,
1,
0,
1,
1
],
[
1,
1,
1,
0,
1
],
[
1,
1,
1,
1,
0
]
],
"dataset": "MNIST",
"iid": false,
"partition_selection": "dirichlet",
"partition_parameter": "0.5",
"model": "MLP",
"agg_algorithm": "FedAvg",
"rounds": "10",
"logginglevel": true,
"accelerator": "gpu",
"network_subnet": "192.168.50.0/24",
"network_gateway": "192.168.50.1",
"epochs": "1",
"attacks": "SwappingWeightsAttack",
"poisoned_node_percent": "0",
"poisoned_sample_percent": "0",
"poisoned_noise_percent": "0",
"with_reputation": false,
"is_dynamic_topology": false,
"is_dynamic_aggregation": false,
"target_aggregation": false,
"random_geo": true,
"latitude": 38.023522,
"longitude": -1.174389,
"mobility": false,
"mobility_type": "both",
"radius_federation": "1000",
"scheme_mobility": "random",
"round_frequency": "1",
"mobile_participants_percent": "100",
"additional_participants": [],
"schema_additional_participants": "random"
},
{
"scenario_title": "FedAvg_Fully_nodes5_MNIST_DelayerAttack",
"scenario_description": "",
"simulation": true,
"federation": "DFL",
"topology": "Custom",
"nodes": {
"0": {
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true
},
"1": {
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"2": {
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"3": {
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
},
"4": {
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false
}
},
"nodes_graph": [
{
"id": 0,
"ip": "192.168.50.2",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": true,
"neighbors": [
1,
2,
3,
4
],
"links": [],
"index": 0,
"x": 5.631418365613239,
"y": 22.694610891010647,
"z": -28.360725118467528,
"vx": -1.0328046273358557e-8,
"vy": 1.5929041989459476e-8,
"vz": 3.908696708641432e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "9937f075-faa3-4676-8b54-756622fd8981",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "52abc64e-91e6-4086-b939-4b34847cb7f0",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "a4edcb16-67e9-4d55-8380-bd22c205ad96",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
5.631418365613239,
22.694610891010647,
-28.360725118467528,
1
],
"up": [
0,
1,
0
],
"geometry": "9937f075-faa3-4676-8b54-756622fd8981",
"material": "52abc64e-91e6-4086-b939-4b34847cb7f0"
}
}
},
{
"id": 1,
"ip": "192.168.50.3",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
2,
3,
4
],
"links": [],
"index": 1,
"x": -20.140462201825898,
"y": -21.13417074227967,
"z": -21.551885000578707,
"vx": 1.8458080528715605e-8,
"vy": -3.116710371387466e-8,
"vz": 2.2704912480424454e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "9f3edff2-2806-4f30-8948-4287450b6915",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "5d13f03c-c2bf-40c8-8eda-6eb33a0da6e6",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-20.140462201825898,
-21.13417074227967,
-21.551885000578707,
1
],
"up": [
0,
1,
0
],
"geometry": "95c670ff-2cc0-427f-a91f-e474bab505d6",
"material": "9f3edff2-2806-4f30-8948-4287450b6915"
}
}
},
{
"id": 2,
"ip": "192.168.50.4",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
3,
4
],
"links": [],
"index": 2,
"x": -28.71703280728614,
"y": 4.020987712595549,
"z": 22.705010362753697,
"vx": 1.0694677449996208e-8,
"vy": 2.9084972037089048e-8,
"vz": -2.015565459223841e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "7c040637-10f0-408b-8975-0338903659fb",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "44f3c661-2ccd-440c-827b-ffcc489b5c64",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-28.71703280728614,
4.020987712595549,
22.705010362753697,
1
],
"up": [
0,
1,
0
],
"geometry": "26bf41c6-a855-4af4-9ebb-05bdf9d78bb4",
"material": "7c040637-10f0-408b-8975-0338903659fb"
}
}
},
{
"id": 3,
"ip": "192.168.50.5",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
4
],
"links": [],
"index": 3,
"x": 19.679145143125147,
"y": 22.16282780436268,
"z": 21.030079148818903,
"vx": -3.852416481366844e-9,
"vy": -4.9742524279957165e-8,
"vz": 6.992962533560291e-9,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "75c95712-2b2f-44e0-84b1-2130d887113c",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "f54d2ff7-d369-4e22-b285-ca6cdeb23421",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "142323c7-c3b1-4d07-860b-a623114b67ad",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
19.679145143125147,
22.16282780436268,
21.030079148818903,
1
],
"up": [
0,
1,
0
],
"geometry": "75c95712-2b2f-44e0-84b1-2130d887113c",
"material": "f54d2ff7-d369-4e22-b285-ca6cdeb23421"
}
}
},
{
"id": 4,
"ip": "192.168.50.6",
"port": "45000",
"role": "aggregator",
"malicious": false,
"proxy": false,
"start": false,
"neighbors": [
0,
1,
2,
3
],
"links": [],
"index": 4,
"x": 23.546931500373656,
"y": -27.744255665689202,
"z": 6.177520607473647,
"vx": -1.4972295223986403e-8,
"vy": 3.58956139672832e-8,
"vz": -1.3450917130387546e-8,
"__threeObj": {
"metadata": {
"version": 4.6,
"type": "Object",
"generator": "Object3D.toJSON"
},
"geometries": [
{
"uuid": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"type": "SphereGeometry",
"radius": 5,
"widthSegments": 32,
"heightSegments": 16,
"phiStart": 0,
"phiLength": 6.283185307179586,
"thetaStart": 0,
"thetaLength": 3.141592653589793
}
],
"materials": [
{
"uuid": "13def5d0-adf5-4e3b-bb57-30362a221818",
"type": "MeshLambertMaterial",
"color": 14245634,
"emissive": 0,
"reflectivity": 1,
"refractionRatio": 0.98,
"opacity": 0.75,
"depthFunc": 3,
"depthTest": true,
"depthWrite": true,
"colorWrite": true,
"stencilWrite": false,
"stencilWriteMask": 255,
"stencilFunc": 519,
"stencilRef": 0,
"stencilFuncMask": 255,
"stencilFail": 7680,
"stencilZFail": 7680,
"stencilZPass": 7680
}
],
"object": {
"uuid": "64c9ea9c-1c99-420a-b048-81aead906df3",
"type": "Mesh",
"layers": 1,
"matrix": [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
23.546931500373656,
-27.744255665689202,
6.177520607473647,
1
],
"up": [
0,
1,
0
],
"geometry": "6593363c-6081-4c21-b1e9-3ae4f03b3a3d",
"material": "13def5d0-adf5-4e3b-bb57-30362a221818"
}
}
}
],
"n_nodes": 5,
"matrix": [
[
0,
1,
1,
1,
1
],
[
1,
0,
1,
1,
1
],
[
1,
1,
0,
1,
1
],
[
1,
1,
1,
0,
1
],
[
1,
1,
1,
1,
0
]
],
"dataset": "MNIST",
"iid": false,
"partition_selection": "dirichlet",
"partition_parameter": "0.5",
"model": "MLP",
"agg_algorithm": "FedAvg",
"rounds": "10",
"logginglevel": true,
"accelerator": "gpu",
"network_subnet": "192.168.50.0/24",
"network_gateway": "192.168.50.1",
"epochs": "1",
"attacks": "DelayerAttack",
"poisoned_node_percent": "0",
"poisoned_sample_percent": "0",
"poisoned_noise_percent": "0",
"with_reputation": false,
"is_dynamic_topology": false,
"is_dynamic_aggregation": false,
"target_aggregation": false,
"random_geo": true,
"latitude": 38.023522,
"longitude": -1.174389,
"mobility": false,
"mobility_type": "both",
"radius_federation": "1000",
"scheme_mobility": "random",
"round_frequency": "1",
"mobile_participants_percent": "100",
"additional_participants": [],
"schema_additional_participants": "random"
}
] | 77,847 | Python | .tac | 3,027 | 14.070367 | 75 | 0.394114 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,023 | nebula.addons.attacks.poisoning.rst | enriquetomasmb_nebula/docs/nebula.addons.attacks.poisoning.rst | nebula.addons.attacks.poisoning package
=======================================
Submodules
----------
nebula.addons.attacks.poisoning.datapoison module
-------------------------------------------------
.. automodule:: nebula.addons.attacks.poisoning.datapoison
:members:
:undoc-members:
:show-inheritance:
nebula.addons.attacks.poisoning.labelflipping module
----------------------------------------------------
.. automodule:: nebula.addons.attacks.poisoning.labelflipping
:members:
:undoc-members:
:show-inheritance:
nebula.addons.attacks.poisoning.modelpoison module
--------------------------------------------------
.. automodule:: nebula.addons.attacks.poisoning.modelpoison
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: nebula.addons.attacks.poisoning
:members:
:undoc-members:
:show-inheritance:
| 896 | Python | .tac | 28 | 29.392857 | 61 | 0.601863 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,024 | nebula.addons.attacks.rst | enriquetomasmb_nebula/docs/nebula.addons.attacks.rst | nebula.addons.attacks package
=============================
Subpackages
-----------
.. toctree::
:maxdepth: 4
nebula.addons.attacks.poisoning
Submodules
----------
nebula.addons.attacks.attacks module
------------------------------------
.. automodule:: nebula.addons.attacks.attacks
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: nebula.addons.attacks
:members:
:undoc-members:
:show-inheritance:
| 476 | Python | .tac | 21 | 20.142857 | 45 | 0.604027 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,025 | nodes.py | huagetai_ComfyUI_LightGradient/nodes.py | import torch
import random
import numpy as np
from enum import Enum
from nodes import MAX_RESOLUTION
# Images generated by combination of different creases need to be normalized
def normalize_img(img):
return (img / img.max()) * 255
# cross = True if cross crease is wanted
def create_uneven_array(low, up, steps, spacing=1, cross=False):
span = up - low
dx = 1.0 / steps
if cross:
arr = np.array([low + (i*dx)**spacing*span for i in range(steps//2)])
return np.append(arr, arr[::-1])
else :
arr = np.array([low + (i*dx)**spacing*span for i in range(steps)])
return arr
def parabolic_crease(spacing, c, scale=100, corner=1, resolution = 1000):
"""
Parameters:
spacing = controls how close the intermediate values will be to lower value
c = higher the c more spread out the gradient will be
scale = lesser the scale more concentrated is gradient towards the corner
"""
img = np.zeros((resolution, resolution))
# Varying the scaling parameter of create_uneven_array will give the parabolic gradient transition
for i in range(resolution):
img[i] = create_uneven_array(255, 0, resolution, spacing + c*i/scale)
if corner == 1:
return img
elif corner == 2:
return img[::-1]
elif corner == 3:
return img.T
else:
return img.T[::-1]
# If cross=1, then cross crease else linear crease is returned
def cross_crease(spacing, cross=1, resolution = 1000):
a = create_uneven_array(255, 0, resolution, spacing, cross=True)
img = np.tile(a, (resolution, 1))
return normalize_img(img*img.T) if cross else img
# Final function to return some random crease from 8 different types
def custom_crease():
spacing = random.uniform(1, 1.5)
scale = random.randint(100, 300)
corner = random.randint(1, 4)
# constant determines the type of crease and also is used to scale spacing in parabolic_crease
constant = random.randint(1, 10)
# Returning those creases which are based on parabolic
parabolic = parabolic_crease(spacing, constant, scale, corner)
if constant == 1:
return parabolic
elif constant == 2:
return normalize_img(parabolic*parabolic.T*parabolic[::-1]*parabolic.T[::-1])
# Returning those creases which are based on parabolic and cross
cross = cross_crease(spacing)
if constant == 3:
return cross
elif constant == 4:
return normalize_img(parabolic * cross)
elif constant == 5:
return normalize_img(cross * parabolic * parabolic.T)
# Returning those creases which are based on parabolic and linear
linear = cross_crease(spacing, 0)
if constant == 6:
return linear
elif constant == 7:
return linear.T
else:
return normalize_img(linear * parabolic)
class LightPosition(Enum):
LEFT = "Left Light"
RIGHT = "Right Light"
TOP = "Top Light"
BOTTOM = "Bottom Light"
TOP_LEFT = "Top Left Light"
TOP_RIGHT = "Top Right Light"
BOTTOM_LEFT = "Bottom Left Light"
BOTTOM_RIGHT = "Bottom Right Light"
def toRgb(color):
if color.startswith('#') and len(color) == 7: # e.g. "#RRGGBB"
color_rgb =tuple(int(color[i:i+2], 16) for i in (1, 3, 5))
else: # e.g. "255,255,255"
color_rgb = tuple(int(i) for i in color.split(','))
return color_rgb
def rgb_to_int(rgb):
r, g, b = rgb
return (r << 16) + (g << 8) + b
def generate_gradient_image(width:int, height:int, start_color:tuple=(255,255,255), end_color:tuple=(0,0,0), multiplier:float=1.0, lightPosition:LightPosition=LightPosition.LEFT):
"""
Generate a gradient image with a light source effect.
Parameters:
width (int): Width of the image.
height (int): Height of the image.
start_color: Starting color RGB of the gradient.
end_color: Ending color RGB of the gradient.
multiplier: Weight of light.
lightPosition (LightPosition): Position of the light source.
Returns:
np.array: 2D gradient image array.
"""
# Create a gradient from 0 to 1 and apply multiplier
if lightPosition == LightPosition.LEFT:
gradient = np.tile(np.linspace(0, 1, width)**multiplier, (height, 1))
elif lightPosition == LightPosition.RIGHT:
gradient = np.tile(np.linspace(1, 0, width)**multiplier, (height, 1))
elif lightPosition == LightPosition.TOP:
gradient = np.tile(np.linspace(0, 1, height)**multiplier, (width, 1)).T
elif lightPosition == LightPosition.BOTTOM:
gradient = np.tile(np.linspace(1, 0, height)**multiplier, (width, 1)).T
elif lightPosition == LightPosition.BOTTOM_RIGHT:
x = np.linspace(1, 0, width)**multiplier
y = np.linspace(1, 0, height)**multiplier
x_mesh, y_mesh = np.meshgrid(x, y)
gradient = np.sqrt(x_mesh**2 + y_mesh**2) / np.sqrt(2.0)
elif lightPosition == LightPosition.BOTTOM_LEFT:
x = np.linspace(0, 1, width)**multiplier
y = np.linspace(1, 0, height)**multiplier
x_mesh, y_mesh = np.meshgrid(x, y)
gradient = np.sqrt(x_mesh**2 + y_mesh**2) / np.sqrt(2.0)
elif lightPosition == LightPosition.TOP_RIGHT:
x = np.linspace(1, 0, width)**multiplier
y = np.linspace(0, 1, height)**multiplier
x_mesh, y_mesh = np.meshgrid(x, y)
gradient = np.sqrt(x_mesh**2 + y_mesh**2) / np.sqrt(2.0)
elif lightPosition == LightPosition.TOP_LEFT:
x = np.linspace(0, 1, width)**multiplier
y = np.linspace(0, 1, height)**multiplier
x_mesh, y_mesh = np.meshgrid(x, y)
gradient = np.sqrt(x_mesh**2 + y_mesh**2) / np.sqrt(2.0)
else:
raise ValueError(f"Unsupported position. Choose from {', '.join([member.value for member in LightPosition])}.")
# Interpolate between start_color and end_color based on the gradient
gradient_img = np.zeros((height, width, 3), dtype=np.float32)
for i in range(3):
gradient_img[..., i] = start_color[i] + (end_color[i] - start_color[i]) * gradient
gradient_img = np.clip(gradient_img, 0, 255).astype(np.uint8)
return gradient_img
def numpy_to_tensor(array: np.ndarray) -> torch.Tensor:
"""Convert a numpy array to a tensor and scale its values from 0-255 to 0-1."""
array = array.astype(np.float32) / 255.0
return torch.from_numpy(array)[None,]
class ImageGradient:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"light_position": ([member.value for member in LightPosition],),
"multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"start_color": ("STRING", {"default": "#FFFFFF"}),
"end_color": ("STRING", {"default": "#000000"}),
"width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
"height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, })
}
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "execute"
CATEGORY = "LightGradient"
DESCRIPTION = """Simple Light Gradient"""
def execute(self, light_position, multiplier, start_color, end_color, width, height):
lightPosition = LightPosition(light_position)
start_color_rgb = toRgb(start_color)
end_color_rgb = toRgb(end_color)
image = generate_gradient_image(width, height, start_color_rgb, end_color_rgb, multiplier, lightPosition)
image = numpy_to_tensor(image)
mask = generate_gradient_image(width, height, multiplier=multiplier, lightPosition=lightPosition)
mask = numpy_to_tensor(mask)
mask = mask[:, :, :, 0]
return (image,mask,)
class MaskGradient:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"light_position": ([member.value for member in LightPosition],),
"multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
"height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, })
}
}
RETURN_TYPES = ("MASK",)
FUNCTION = "execute"
CATEGORY = "LightGradient"
DESCRIPTION = """Mask Gradient"""
def execute(self, light_position, multiplier, width, height):
lightPosition = LightPosition(light_position)
mask = generate_gradient_image(width, height, multiplier=multiplier, lightPosition=lightPosition)
mask = numpy_to_tensor(mask)
mask = mask[:, :, :, 0]
return (mask,)
NODE_CLASS_MAPPINGS = {
"ImageGradient": ImageGradient,
"MaskGradient": MaskGradient
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ImageGradient": "Image Gradient",
"MaskGradient": "Mask Gradient"
}
| 8,770 | Python | .py | 199 | 38.015075 | 179 | 0.649589 | huagetai/ComfyUI_LightGradient | 8 | 3 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,026 | setup.py | facebookresearch_chai/setup.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from setuptools import setup, find_packages
setup(name="llama", version="0.0.0", packages=find_packages())
| 274 | Python | .py | 4 | 67 | 111 | 0.783582 | facebookresearch/chai | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,027 | example_chai.py | facebookresearch_chai/example_chai.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Tuple
import os
import sys
import torch
import fire
import time
import json
from pathlib import Path
from fairscale.nn.model_parallel.initialize import initialize_model_parallel
import os, sys
sys.path.insert(0,'llama')
from llama import ModelArgs, Transformer, Tokenizer, LLaMA
def setup_model_parallel() -> Tuple[int, int]:
local_rank = int(os.environ.get("LOCAL_RANK", -1))
world_size = int(os.environ.get("WORLD_SIZE", -1))
torch.distributed.init_process_group("nccl")
initialize_model_parallel(world_size)
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
return local_rank, world_size
def load(
ckpt_dir: str,
tokenizer_path: str,
local_rank: int,
world_size: int,
max_seq_len: int,
max_batch_size: int,
) -> LLaMA:
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert world_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {world_size}"
ckpt_path = checkpoints[local_rank]
print("Loading")
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params
)
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model = Transformer(model_args)
torch.set_default_tensor_type(torch.FloatTensor)
model.load_state_dict(checkpoint, strict=False)
generator = LLaMA(model, tokenizer)
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return generator
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.8,
top_p: float = 0.95,
max_seq_len: int = 512,
max_batch_size: int = 32,
):
local_rank, world_size = setup_model_parallel()
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
generator = load(
ckpt_dir, tokenizer_path, local_rank, world_size, max_seq_len, max_batch_size
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"Building a website can be done in 10 simple steps:\n",
# Few shot prompts: https://huggingface.co/blog/few-shot-learning-gpt-neo-and-inference-api
"""Tweet: "I hate it when my phone battery dies."
Sentiment: Negative
###
Tweet: "My day has been �"
Sentiment: Positive
###
Tweet: "This is the link to the article"
Sentiment: Neutral
###
Tweet: "This new music video was incredibile"
Sentiment:""",
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.generate(
prompts, max_gen_len=256, temperature=temperature, top_p=top_p
)
for result in results:
print(result)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
| 3,457 | Python | .py | 98 | 30.816327 | 111 | 0.684053 | facebookresearch/chai | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,028 | perform_offline_clustering.py | facebookresearch_chai/perform_offline_clustering.py | import os
import json
import argparse
import numpy as np
from collections import defaultdict
from collections import Counter
from scipy.spatial.distance import cdist
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def parse_args(parser):
parser.add_argument("--path", type=str, required=True)
parser.add_argument("--num_clusters", type=int, default=4)
parser.add_argument("--num_layers", type=int, default=32)
parser.add_argument("--num_heads", type=int, default=32)
args = parser.parse_args()
return args
# Takes input as attention scores, applies offline cluster and
# generates pdf plots for elbow plot analysis per layer
# to determine number of clusters
def main_kmeans_avg(args):
"""
K-Means
"""
# Take 500 samples concatenate them and create clusters of shape (32, x)
# perform k-means clustering and get an error plot
for layer_num in range(args.num_layers):
print("Working on Layer {}".format(layer_num))
file_name = os.path.join(args.path, f"layer_{layer_num}_scores.jsonl")
concatenate_flag = False
ln_counter = 0
errors = [0] * 32
with open(file_name, "r") as fin:
for line in fin:
try:
scores = json.loads(line)
except:
print("Line")
print(line)
import ipdb
ipdb.set_trace()
ln_counter += 1
scores_numpy = np.array(scores)
first_head = scores_numpy[0, :]
num_nonzero = first_head[first_head != 0].shape[0]
reshaped_data = scores_numpy[:, :num_nonzero]
# if not concatenate_flag:
# # running first time
# concatenate_array = reshaped_data
# concatenate_flag = True
# else:
# concatenate_array = np.concatenate(
# (concatenate_array, reshaped_data), axis=1
# )
# perform clustering
if ln_counter == 500:
break
for num_clusters in range(32):
kmeans = KMeans(
n_clusters=num_clusters + 1, random_state=2, n_init="auto"
)
kmeans.fit(reshaped_data)
print("Num Clusters {}".format(num_clusters))
print("Error {}".format(kmeans.inertia_))
errors[num_clusters] += kmeans.inertia_
# import ipdb
# ipdb.set_trace()
plt.plot(range(1, 33), errors)
out_figure = f"./{layer_num}_plot.pdf"
plt.savefig(out_figure, format="pdf")
plt.close()
def main(args):
"""
Parse arguments for trainers
"""
for layer_num in range(args.num_layers):
print("Working on Layer {}".format(layer_num))
file_name = os.path.join(args.path, f"layer_{layer_num}_scores.jsonl")
group_bins = defaultdict(int)
head_belong_count = defaultdict(list)
with open(file_name, "r") as fin:
for line in fin:
try:
scores = json.loads(line)
except:
print("Line")
print(line)
import ipdb
ipdb.set_trace()
scores_numpy = np.array(scores)
# NOTE: Avoiding the following approach. Sometimes non zeros are no consisten
first_head = scores_numpy[0, :]
num_nonzero = first_head[first_head != 0].shape[0]
reshaped_cluster = scores_numpy[:, :num_nonzero]
# nonzeros_scores_numpy = scores_numpy[scores_numpy != 0]
# reshape_factor = int(nonzeros_scores_numpy.shape[0] / 32)
dist_arr = cdist(reshaped_cluster, reshaped_cluster, metric="cosine")
cluster = AgglomerativeClustering(
n_clusters=args.num_clusters,
metric="precomputed",
linkage="average",
compute_distances=True,
)
cluster = cluster.fit(dist_arr)
cluster_assignment = cluster.labels_
for cluster_idx in range(args.num_clusters):
grouped_heads = np.where(cluster_assignment == cluster_idx)[
0
].tolist()
grouped_heads_str = json.dumps(grouped_heads)
group_bins[grouped_heads_str] += 1
for headnum in grouped_heads:
head_belong_count[headnum].append(grouped_heads_str)
# extract non zero
counted_heads = []
out_cluster = []
# import ipdb
# ipdb.set_trace()
for head_id in range(args.num_heads):
if head_id in counted_heads:
continue
head_membership = head_belong_count[head_id]
most_common = Counter(head_membership).most_common(1)[0][0]
most_common_list = json.loads(most_common)
counted_heads.extend(most_common_list)
out_cluster.append(most_common_list)
out_file = os.path.join(args.path, f"layer_{layer_num}_out_cluster.json")
with open(out_file, "w") as fout:
json.dump(out_cluster, fout)
# out_file_name_group_bins = os.path.join(
# args.path, f"layer_{layer_num}_group_bins_.jsonl"
# )
if __name__ == "__main__":
args = parse_args(
argparse.ArgumentParser(description="Parse Arguments for static clustering")
)
main_kmeans_avg(args)
| 5,838 | Python | .py | 137 | 30.021898 | 93 | 0.548427 | facebookresearch/chai | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,029 | tetramesh.py | mxhulab_cryotrans/src/cryotrans/tetramesh.py | __all__ = [
'tetramesh',
'MeshLoss'
]
import numpy as np
import torch
from numpy.typing import NDArray
def tetramesh(xs : NDArray[np.float32], n : NDArray[np.float32]) -> NDArray[np.float32]:
vertices = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 1, 0],
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]
]) / n
tetras = vertices[np.array([
[0, 4, 6, 7],
[0, 4, 5, 7],
[0, 1, 5, 7],
[0, 1, 3, 7],
[0, 2, 3, 7],
[0, 2, 6, 7]
], dtype = np.int32)]
return np.reshape(xs[:, None, None] + tetras, (-1, 4, 3))
class MeshLoss(torch.nn.Module):
def __init__(self, mesh : torch.Tensor):
super().__init__()
self.n = len(mesh)
self.mesh = mesh
self.inv = torch.linalg.inv(mesh[..., 1:4, :] - mesh[..., 0:1, :])
def forward(self, odenet : torch.nn.Module) -> torch.Tensor:
mesh_out = odenet(self.mesh)
afftrans = self.inv @ (mesh_out[..., 1:4, :] - mesh_out[..., 0:1, :])
svdvals = torch.linalg.svdvals(afftrans)
return torch.sum(torch.square(svdvals ** 2 - 1)) / self.n
| 1,172 | Python | .py | 38 | 24.131579 | 88 | 0.492471 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,030 | ode_net.py | mxhulab_cryotrans/src/cryotrans/ode_net.py | __all__ = [
'ODENet'
]
import torch
from .velocity_net import VelocityNet
class ODENet(torch.nn.Module):
def __init__(self, n_frames : int = 10, **vnet_kwargs):
super().__init__()
self.n_frames = n_frames
self.v = VelocityNet(**vnet_kwargs)
def forward(self, x : torch.Tensor) -> torch.Tensor:
for _ in range(self.n_frames):
dx = self.v(x) / self.n_frames
x = x + dx
return x
def trajectory(self, x : torch.Tensor):
yield x
for _ in range(self.n_frames):
dx = self.v(x) / self.n_frames
x = x + dx
yield x
| 642 | Python | .py | 21 | 23.047619 | 59 | 0.543831 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,031 | velocity_net.py | mxhulab_cryotrans/src/cryotrans/velocity_net.py | __all__ = [
'VelocityNet'
]
import torch
class VelocityNet(torch.nn.Module):
def __init__(self, depth : int = 3, width : int = 100, dim : int = 3):
super().__init__()
self.depth = depth
for i in range(depth):
layer = torch.nn.Linear(dim if i == 0 else width, dim if i == depth - 1 else width)
torch.nn.init.normal_(layer.weight, std = 0.01)
torch.nn.init.zeros_(layer.bias)
self.__setattr__(f'n{i}', layer)
def forward(self, x : torch.Tensor) -> torch.Tensor:
for i in range(self.depth):
x = self.__getattr__(f'n{i}')(x)
if i < self.depth - 1:
x = torch.nn.functional.leaky_relu(x)
return x
| 735 | Python | .py | 19 | 30.105263 | 95 | 0.54073 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,032 | particles.py | mxhulab_cryotrans/src/cryotrans/particles.py | __all__ = [
'particles'
]
import numpy as np
from typing import Tuple
from numpy.typing import NDArray
def particles(vol : NDArray[np.float32], threshold : float = 1e-6) -> Tuple[NDArray[np.float32]]:
n = vol.shape[0]
vol = np.where(vol < threshold, 0, vol)
xs = np.nonzero(vol)
rho = vol[xs]
xs = np.vstack(xs)
xs = (xs.transpose().copy() + 0.5) / n
return rho, xs
| 400 | Python | .py | 14 | 25.142857 | 97 | 0.640625 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,033 | gridding.py | mxhulab_cryotrans/src/cryotrans/gridding.py | __all__ = [
'Gridding'
]
import cupy as cp
import torch
from typing import Tuple
from .utility import torch_to_cupy, cupy_to_torch, BLOCKDIM, BLOCKSIZE
ker_gridding3d = cp.RawKernel(r'''
extern "C" __global__ void gridding3d(
const float* rho,
const float* xs,
int m,
float* vol,
int n1,
int n2,
int n3,
float pixel_size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < m) {
float x_ = (xs[tid * 3 ] - pixel_size / 2) / pixel_size;
float y_ = (xs[tid * 3 + 1] - pixel_size / 2) / pixel_size;
float z_ = (xs[tid * 3 + 2] - pixel_size / 2) / pixel_size;
float wt = rho[tid];
int x = floorf(x_);
int y = floorf(y_);
int z = floorf(z_);
float dx = x_ - x;
float dy = y_ - y;
float dz = z_ - z;
if (0 <= x && x < n1 && 0 <= y && y < n2 && 0 <= z && z < n3) atomicAdd(vol + ( x * n2 + y ) * n3 + z , wt * (1 - dx) * (1 - dy) * (1 - dz));
if (0 <= x + 1 && x + 1 < n1 && 0 <= y && y < n2 && 0 <= z && z < n3) atomicAdd(vol + ((x + 1) * n2 + y ) * n3 + z , wt * dx * (1 - dy) * (1 - dz));
if (0 <= x && x < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z && z < n3) atomicAdd(vol + ( x * n2 + y + 1) * n3 + z , wt * (1 - dx) * dy * (1 - dz));
if (0 <= x + 1 && x + 1 < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z && z < n3) atomicAdd(vol + ((x + 1) * n2 + y + 1) * n3 + z , wt * dx * dy * (1 - dz));
if (0 <= x && x < n1 && 0 <= y && y < n2 && 0 <= z + 1 && z + 1 < n3) atomicAdd(vol + ( x * n2 + y ) * n3 + z + 1, wt * (1 - dx) * (1 - dy) * dz );
if (0 <= x + 1 && x + 1 < n1 && 0 <= y && y < n2 && 0 <= z + 1 && z + 1 < n3) atomicAdd(vol + ((x + 1) * n2 + y ) * n3 + z + 1, wt * dx * (1 - dy) * dz );
if (0 <= x && x < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z + 1 && z + 1 < n3) atomicAdd(vol + ( x * n2 + y + 1) * n3 + z + 1, wt * (1 - dx) * dy * dz );
if (0 <= x + 1 && x + 1 < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z + 1 && z + 1 < n3) atomicAdd(vol + ((x + 1) * n2 + y + 1) * n3 + z + 1, wt * dx * dy * dz );
}
}
''', 'gridding3d')
ker_grad_gridding3d = cp.RawKernel(r'''
extern "C" __global__ void grad_gridding3d(
const float* rho,
const float* xs,
int m,
const float* dvol,
int n1,
int n2,
int n3,
float pixel_size,
float* vs)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < m) {
float x_ = (xs[tid * 3 ] - pixel_size / 2) / pixel_size;
float y_ = (xs[tid * 3 + 1] - pixel_size / 2) / pixel_size;
float z_ = (xs[tid * 3 + 2] - pixel_size / 2) / pixel_size;
float wt = rho[tid];
int x = floorf(x_);
int y = floorf(y_);
int z = floorf(z_);
float dx = x_ - x;
float dy = y_ - y;
float dz = z_ - z;
float vx = 0;
float vy = 0;
float vz = 0;
if (0 <= x && x < n1 && 0 <= y && y < n2 && 0 <= z && z < n3) {
float grad = dvol[( x * n2 + y ) * n3 + z ];
vx += wt * - 1 * (1 - dy) * (1 - dz) * grad;
vy += wt * (1 - dx) * - 1 * (1 - dz) * grad;
vz += wt * (1 - dx) * (1 - dy) * - 1 * grad;
}
if (0 <= x + 1 && x + 1 < n1 && 0 <= y && y < n2 && 0 <= z && z < n3) {
float grad = dvol[((x + 1) * n2 + y ) * n3 + z ];
vx += wt * 1 * (1 - dy) * (1 - dz) * grad;
vy += wt * dx * - 1 * (1 - dz) * grad;
vz += wt * dx * (1 - dy) * - 1 * grad;
}
if (0 <= x && x < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z && z < n3) {
float grad = dvol[( x * n2 + y + 1) * n3 + z ];
vx += wt * - 1 * dy * (1 - dz) * grad;
vy += wt * (1 - dx) * 1 * (1 - dz) * grad;
vz += wt * (1 - dx) * dy * - 1 * grad;
}
if (0 <= x + 1 && x + 1 < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z && z < n3) {
float grad = dvol[((x + 1) * n2 + y + 1) * n3 + z ];
vx += wt * 1 * dy * (1 - dz) * grad;
vy += wt * dx * 1 * (1 - dz) * grad;
vz += wt * dx * dy * - 1 * grad;
}
if (0 <= x && x < n1 && 0 <= y && y < n2 && 0 <= z + 1 && z + 1 < n3) {
float grad = dvol[( x * n2 + y ) * n3 + z + 1];
vx += wt * - 1 * (1 - dy) * dz * grad;
vy += wt * (1 - dx) * - 1 * dz * grad;
vz += wt * (1 - dx) * (1 - dy) * 1 * grad;
}
if (0 <= x + 1 && x + 1 < n1 && 0 <= y && y < n2 && 0 <= z + 1 && z + 1 < n3) {
float grad = dvol[((x + 1) * n2 + y ) * n3 + z + 1];
vx += wt * 1 * (1 - dy) * dz * grad;
vy += wt * dx * - 1 * dz * grad;
vz += wt * dx * (1 - dy) * 1 * grad;
}
if (0 <= x && x < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z + 1 && z + 1 < n3) {
float grad = dvol[( x * n2 + y + 1) * n3 + z + 1];
vx += wt * - 1 * dy * dz * grad;
vy += wt * (1 - dx) * 1 * dz * grad;
vz += wt * (1 - dx) * dy * 1 * grad;
}
if (0 <= x + 1 && x + 1 < n1 && 0 <= y + 1 && y + 1 < n2 && 0 <= z + 1 && z + 1 < n3) {
float grad = dvol[((x + 1) * n2 + y + 1) * n3 + z + 1];
vx += wt * 1 * dy * dz * grad;
vy += wt * dx * 1 * dz * grad;
vz += wt * dx * dy * 1 * grad;
}
vs[tid * 3 ] = vx;
vs[tid * 3 + 1] = vy;
vs[tid * 3 + 2] = vz;
}
}
''', 'grad_gridding3d')
class GriddingFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, rho : torch.Tensor, xs : torch.Tensor, shape : Tuple[int]) -> torch.Tensor:
ctx.rho = rho
ctx.xs = xs
rho = torch_to_cupy(rho)
xs = torch_to_cupy(xs)
m = len(rho)
pixel_size = 1 / max(shape)
assert xs.shape == (m, 3) and rho.shape == (m, ) and len(shape) == 3
assert rho.device == xs.device
with rho.device:
vol = cp.zeros(shape, dtype = cp.float32)
ker_gridding3d((BLOCKDIM(m), ), (BLOCKSIZE, ), (rho, xs, m, vol, shape[0], shape[1], shape[2], cp.float32(pixel_size)))
return cupy_to_torch(vol)
@staticmethod
def backward(ctx, grad_output : torch.Tensor) -> Tuple[torch.Tensor]:
rho = torch_to_cupy(ctx.rho)
xs = torch_to_cupy(ctx.xs)
grad_output = torch_to_cupy(grad_output)
shape = grad_output.shape
m = len(rho)
pixel_size = 1 / max(shape)
assert xs.shape == (m, 3) and rho.shape == (m, ) and len(shape) == 3
assert rho.device == xs.device == grad_output.device
with rho.device:
vs = cp.empty_like(xs, dtype = cp.float32)
ker_grad_gridding3d((BLOCKDIM(m), ), (BLOCKSIZE, ), (rho, xs, m, grad_output, shape[0], shape[1], shape[2], cp.float32(pixel_size), vs))
return None, cupy_to_torch(vs), None
class Gridding(torch.nn.Module):
def __init__(self, shape : Tuple[int]):
super().__init__()
self.shape = shape
def forward(self, rho : torch.Tensor, xs : torch.Tensor) -> torch.Tensor:
return GriddingFunction.apply(rho, xs, self.shape)
| 7,804 | Python | .py | 157 | 41.33758 | 184 | 0.379618 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,034 | predict.py | mxhulab_cryotrans/src/cryotrans/predict.py | import argparse
import mrcfile
import sys
import torch
import numpy as np
from numpy.typing import NDArray
from pathlib import Path
from .ode_net import ODENet
from .particles import particles
from .gridding import Gridding
def parse_args():
parser = argparse.ArgumentParser(description = 'CryoTRANS: Predicting high-resolution maps of rare conformations using neural ODEs in cryo-EM.')
basic_group = parser.add_argument_group('Basic arguments')
basic_group.add_argument('-i', '--initial-map', type = str, help = 'Path of initial map.')
basic_group.add_argument('-t', '--initial-threshold', type = float, default = 0., help = 'Threshold for the initial map.')
basic_group.add_argument('-d', '--directory', type = str, help = 'Working directory.')
basic_group.add_argument('-p', '--prefix', type = str, default = 'frame', help = 'Prefix for output movie.')
basic_group.add_argument('-g', '--gpu', type = int, default = 0, help = 'Which gpu to use, 0 by default.')
basic_group.add_argument('-w', '--weight', type = str, help = 'Path of network weight file as initial model.')
advanced_group = parser.add_argument_group('Advanced arguments')
advanced_group.add_argument('--depth', type = int, default = 3, help = 'Depth of velocity net (MLP).')
advanced_group.add_argument('--width', type = int, default = 100, help = 'Width of velocity net (MLP).')
if len(sys.argv) == 1:
parser.print_help()
exit()
return parser.parse_args()
def predict(
a0 : NDArray[np.float32],
voxel_size : float,
odenet : ODENet,
directory : Path,
prefix : str
):
print('------------------------Predicting-------------------------')
rho, xs = particles(a0)
rho = torch.tensor(rho, dtype = torch.float32, device = 'cuda')
xs = torch.tensor(xs , dtype = torch.float32, device = 'cuda')
grd = Gridding(a0.shape)
with torch.no_grad():
for i, xi in enumerate(odenet.trajectory(xs)):
print(f'Processing frame {i:02d} ...', end = ' ')
vol = grd(rho, xi).to('cpu').numpy()
with mrcfile.new(directory.joinpath(f'{prefix}_{i:02d}.mrc'), data = vol, overwrite = True) as mrc:
mrc.voxel_size = voxel_size
print('done.')
def main():
args = parse_args()
with mrcfile.open(args.initial_map, permissive = True) as mrc:
a0 : NDArray[np.float32] = mrc.data
voxel_size = mrc.voxel_size
print(f'Voxelsize read from input map: {voxel_size} Angstrom.')
n = a0.shape[0]
assert a0.dtype == np.float32
assert a0.shape == (n, n, n)
a0 = np.where(a0 < args.initial_threshold, 0, a0)
directory = Path(args.directory).absolute()
directory.mkdir(parents = True, exist_ok = True)
if not directory.is_dir():
raise RuntimeError(f'Invalid working directory: {args.directory}.')
torch.cuda.set_device(torch.device('cuda', args.gpu))
odenet = ODENet(depth = args.depth, width = args.width)
print(f'Try loading input weight file {args.weight} ...', end = ' ')
odenet.load_state_dict(torch.load(args.weight))
print('succeeded!')
odenet.to('cuda')
predict(a0, voxel_size, odenet, directory, args.prefix)
| 3,368 | Python | .py | 66 | 45.606061 | 148 | 0.6231 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,035 | train.py | mxhulab_cryotrans/src/cryotrans/train.py | import argparse
import mrcfile
import sys
import torch
import numpy as np
from pathlib import Path
from skimage.transform import downscale_local_mean
from time import time
from typing import Optional
from numpy.typing import NDArray
from .ode_net import ODENet
from .particles import particles
from .gridding import Gridding
from .wasserstein2_loss import Wasserstein2Loss
from .tetramesh import tetramesh, MeshLoss
def parse_args():
parser = argparse.ArgumentParser(description = 'CryoTRANS: Predicting high-resolution maps of rare conformations using neural ODEs in cryo-EM.')
basic_group = parser.add_argument_group('Basic arguments')
basic_group.add_argument('-i0', '--initial-map', type = str, help = 'Path of initial map.')
basic_group.add_argument('-t0', '--initial-threshold', type = float, help = 'Threshold for the initial map.')
basic_group.add_argument('-i1', '--target-map', type = str, help = 'Path of target map.')
basic_group.add_argument('-t1', '--target-threshold', type = float, help = 'Threshold for the target map.')
basic_group.add_argument('-d', '--directory', type = str, help = 'Working directory.')
basic_group.add_argument('-g', '--gpu', type = int, default = 0, help = 'Which gpu to use, 0 by default.')
basic_group.add_argument('-w', '--weight', type = str, required = False, help = 'Path of network weight file as initial model.')
basic_group.add_argument('-b', '--binning', type = int, default = 1, help = 'Binning level, 1 by default.')
basic_group.add_argument('-n', '--n_steps', type = int, help = 'Number of training steps.')
basic_group.add_argument('-p', '--period', type = int, help = 'For periodic report.')
advanced_group = parser.add_argument_group('Advanced arguments')
advanced_group.add_argument('--depth', type = int, default = 3, help = 'Depth of velocity net (MLP).')
advanced_group.add_argument('--width', type = int, default = 100, help = 'Width of velocity net (MLP).')
advanced_group.add_argument('--w2_eps', type = float, default = 1e-4, help = 'Entropic regularisation parameter for W2 loss.')
advanced_group.add_argument('--w2_iter', type = int, default = 5, help = 'Number of Sinkhorn iteration for computing W2 loss.')
advanced_group.add_argument('--l2', action = 'store_true', help = 'Use L2 refine instead of W2.')
advanced_group.add_argument('--lr', type = float, default = 1e-3, help = 'Learning rate. Suggest 1e-4 when doing L2 refine.')
advanced_group.add_argument('--mu_mesh', type = float, help = 'Regularization parameter for tetrahedral mesh loss.')
if len(sys.argv) == 1:
parser.print_help()
exit()
return parser.parse_args()
def train(
a0 : NDArray[np.float32],
a1 : NDArray[np.float32],
odenet : ODENet,
n_steps : int,
period : int,
directory : Path,
w2_eps : float = 1e-4,
w2_iter : int = 5,
l2 : bool = False,
lr : float = 1e-3,
mu_mesh : Optional[float] = None,
):
rho, xs = particles(a0)
if mu_mesh is not None:
mesh = tetramesh(xs, a0.shape[0])
mesh = torch.tensor(mesh, dtype = torch.float32, device = 'cuda')
mesh = MeshLoss(mesh)
rho = torch.tensor(rho, dtype = torch.float32, device = 'cuda')
xs = torch.tensor(xs, dtype = torch.float32, device = 'cuda')
a1 = torch.tensor(a1, dtype = torch.float32, device = 'cuda')
grd = Gridding(a0.shape)
if not l2:
w2 = Wasserstein2Loss(eps = w2_eps, maxiter = w2_iter)
optimizer = torch.optim.Adam(odenet.parameters(), lr = lr)
losses = []
loss_file = directory.joinpath('loss.log')
loss_file.write_text('')
time0 = time()
print('|-----------------------------------------|')
print('| iter | loss | time |')
print('|----------|------------------|-----------|')
i_period = 0
for i_step in range(n_steps + 1):
optimizer.zero_grad()
b1 = grd(rho, odenet(xs))
loss = torch.sum(torch.square(b1 - a1)) if l2 else w2(b1, a1)
if mu_mesh is not None:
loss += mu_mesh * mesh(odenet)
losses.append(loss.item())
if i_step % period == 0:
print(f'|{i_step:^10d}|{loss.item():^18.9e}|{time() - time0:^11.3e}|')
with loss_file.open('+a') as loss_out:
np.savetxt(loss_out, losses)
losses.clear()
torch.save(odenet.state_dict(), directory.joinpath(f'net_{i_period}.pt'))
i_period += 1
if i_step != n_steps:
loss.backward()
optimizer.step()
print('|-----------------------------------------|')
print('Save network ...', end = ' ')
torch.save(odenet.state_dict(), directory.joinpath('net.pt'))
print('done.')
def main():
args = parse_args()
a0 : NDArray[np.float32] = mrcfile.read(args.initial_map)
a1 : NDArray[np.float32] = mrcfile.read(args.target_map)
n = a0.shape[0]
assert a0.dtype == a1.dtype == np.float32
assert a0.shape == a1.shape == (n, n, n)
t0 = args.initial_threshold
t1 = args.target_threshold
a0 = np.where(a0 < t0, 0, a0)
a1 = np.where(a1 < t1, 0, a1)
a0 /= a0.max()
a1 *= a0.sum() / a1.sum()
directory = Path(args.directory).absolute()
directory.mkdir(parents = True, exist_ok = True)
if not directory.is_dir():
raise RuntimeError(f'Invalid working directory: {args.directory}.')
torch.cuda.set_device(torch.device('cuda', args.gpu))
odenet = ODENet(depth = args.depth, width = args.width)
if args.weight is not None:
try:
print(f'Try loading input weight file {args.weight} ...', end = ' ')
odenet.load_state_dict(torch.load(args.weight))
print('succeeded!')
except:
print('failed!')
print('Random initialization applied.')
odenet.to('cuda')
binning = args.binning
if binning <= 0:
raise ValueError(f'Binning level should be a positive integer.')
if n % binning != 0:
raise ValueError(f'Binning level {binning} dost not divide boxsize.')
if binning > 1:
print('Bin maps ...', end = ' ')
a0 = downscale_local_mean(a0, (binning, binning, binning))
a1 = downscale_local_mean(a1, (binning, binning, binning))
print('done.')
train(
a0, a1, odenet, args.n_steps, args.period, directory,
w2_eps = args.w2_eps,
w2_iter = args.w2_iter,
l2 = args.l2,
lr = args.lr,
mu_mesh = args.mu_mesh
)
| 6,840 | Python | .py | 140 | 42.607143 | 148 | 0.591977 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,036 | wasserstein2_loss.py | mxhulab_cryotrans/src/cryotrans/wasserstein2_loss.py | __all__ = [
'Wasserstein2Loss'
]
import cupy as cp
import torch
from typing import Dict, Tuple
from .utility import torch_to_cupy, cupy_to_torch, BLOCKDIM, BLOCKSIZE
ker_blur = cp.RawKernel(r'''
extern "C" __global__ void blur(
const float* src,
int size,
int n,
float B,
float* dst)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
int offset = tid % n;
int base = tid - offset;
float w = src[tid];
for (int i = 0; i < n; ++i)
w = fmaxf(w, src[base + i] - (i - offset) * (i - offset) / B);
float v = 0;
for (int i = 0; i < n; ++i)
v += expf(src[base + i] - (i - offset) * (i - offset) / B - w);
dst[tid] = logf(v) + w;
}
}
''', 'blur')
def _blur(b, x):
tmp = cp.empty_like(x, dtype = cp.float32)
for i in range(x.ndim):
# move i-th axis to last.
for j in range(i, x.ndim - 1): x = x.swapaxes(j, j + 1).copy()
tmp = tmp.reshape(x.shape)
# blur along last axis.
ker_blur((BLOCKDIM(x.size), ), (BLOCKSIZE, ), (x, x.size, x.shape[-1], cp.float32(b), tmp))
x, tmp = tmp, x
# move last axis back to i.
for j in range(x.ndim - 1, i, -1): x = x.swapaxes(j - 1, j).copy()
return x
class Wasserstein2LossFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, a0 : torch.Tensor, a1 : torch.Tensor, paras : Dict) -> torch.Tensor:
assert torch.is_tensor(a0) and torch.is_tensor(a1)
assert a0.shape == a1.shape
assert a0.is_cuda and a1.is_cuda and a0.device == a1.device
eps = paras.get('eps', 1e-3)
delta = paras.get('delta', 1e-3)
maxiter = paras.get('maxiter', 1000)
batch = paras.get('batch', 100)
pixel_size = 1 / max(a0.shape)
a0 = torch_to_cupy(a0)
a1 = torch_to_cupy(a1)
with a0.device:
b = eps / (pixel_size * pixel_size)
la0 = cp.log(cp.fmax(a0, 1e-20))
la1 = cp.log(cp.fmax(a1, 1e-20))
phi0 = cp.zeros_like(a0, dtype = cp.float32)
phi1 = cp.zeros_like(a1, dtype = cp.float32)
norm_1 = lambda x : cp.sum(cp.abs(x)).item()
na0 = norm_1(a0)
for rd in range(maxiter):
phi0 = la0 - _blur(b, phi1)
phi1 = la1 - _blur(b, phi0)
if rd == 0 or (rd + 1) % batch == 0:
err = norm_1(a0 - cp.exp(phi0 + _blur(b, phi1))) / na0
# print(f' Round {rd + 1}, |i0 - P1|_1 / |i0|_1 = {err:.6f}.')
if err < delta: break
loss = -eps * cp.sum(cp.exp(phi0 + _blur(b, phi1)))
phi0 *= eps
phi1 *= eps
loss += cp.vdot(phi0, a0) + cp.vdot(phi1, a1)
ctx.phi0 = cupy_to_torch(phi0)
ctx.phi1 = cupy_to_torch(phi1)
return cupy_to_torch(loss)
@staticmethod
def backward(ctx, grad_output : torch.Tensor) -> Tuple:
return grad_output * ctx.phi0, grad_output * ctx.phi1, None
class Wasserstein2Loss(torch.nn.Module):
def __init__(self, **paras):
super().__init__()
self.paras = paras
def forward(self, a : torch.Tensor, b : torch.Tensor) -> torch.Tensor:
return Wasserstein2LossFunction.apply(a, b, self.paras)
| 3,331 | Python | .py | 85 | 30.8 | 99 | 0.535913 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,037 | utility.py | mxhulab_cryotrans/src/cryotrans/utility.py | import cupy as cp
import torch
from torch.utils.dlpack import to_dlpack, from_dlpack
BLOCKSIZE = 1024
BLOCKDIM = lambda x : (x - 1) // BLOCKSIZE + 1
def cupy_to_torch(x : cp.ndarray) -> torch.Tensor:
return from_dlpack(x.toDlpack())
def torch_to_cupy(x : torch.Tensor) -> cp.ndarray:
return cp.fromDlpack(to_dlpack(x))
| 330 | Python | .py | 9 | 34.444444 | 53 | 0.726415 | mxhulab/cryotrans | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,290,038 | logging.py | nyaoouo_NyLib2/nylib/logging.py | import io
import logging
import os.path
import pathlib
import sys
import threading
import time
import typing
import zipfile
Verbose1 = 9
Verbose2 = 8
Verbose3 = 7
class AsciiFormat:
end = 0
bold = 1
italic = 2
underline = 4
strikethrough = 9
grey = 90
red = 91
green = 92
yellow = 93
blue = 94
purple = 95
cyan = 96
light_grey = 97
grey_f = 100
red_f = 101
green_f = 102
yellow_f = 103
blue_f = 104
purple_f = 105
cyan_f = 106
light_grey_f = 107
_control_map = [
f'\x1b[{AsciiFormat.light_grey}m', # logging.NOTSET
f'\x1b[{AsciiFormat.grey}m', # logging.DEBUG
f'\x1b[{AsciiFormat.green}m', # logging.INFO
f'\x1b[{AsciiFormat.yellow}m', # logging.WARNING
f'\x1b[{AsciiFormat.red}m', # logging.ERROR
f'\x1b[{AsciiFormat.red}m\x1b[{AsciiFormat.bold}m', # logging.CRITICAL
]
_end_line = f'\x1b[{AsciiFormat.end}m'
def install(
level=logging.DEBUG,
format='[%(asctime)s]\t[%(levelname)s]\t[%(name)s]\t%(message)s',
use_color=True,
multiline_process=True,
std_out=True,
file_name=None,
file_size=1024 * 1024 * 10,
archive_zip=None,
):
logging.addLevelName(Verbose1, 'Verbose1')
logging.addLevelName(Verbose2, 'Verbose2')
logging.addLevelName(Verbose3, 'Verbose3')
if use_color:
import platform
if platform.system() == 'Windows':
# enable windows console color
import ctypes
kernel32 = ctypes.WinDLL('kernel32')
hStdOut = kernel32.GetStdHandle(-11)
mode = ctypes.c_ulong()
kernel32.GetConsoleMode(hStdOut, ctypes.byref(mode))
mode.value |= 4
kernel32.SetConsoleMode(hStdOut, mode)
old_stream_handler_format = logging.StreamHandler.format
logging.StreamHandler.format = lambda obj, record: _control_map[min(record.levelno // 10, 5)] + old_stream_handler_format(obj, record) + _end_line
if multiline_process:
old_formatter_format = logging.Formatter.format
def new_formatter_format(self, record: logging.LogRecord):
exc_text = None
if o_exc_info := record.exc_info:
exc_text = self.formatException(o_exc_info)
record.exc_info = None
s_text = None
if o_stack_info := record.stack_info:
s_text = self.formatStack(o_stack_info)
record.stack_info = None
o_msg = record.msg
res = ''
i = 0
to_loop = str(o_msg).split('\n')
if exc_text: to_loop += exc_text.split('\n')
if s_text: to_loop += s_text.split('\n')
for i, line in enumerate(to_loop):
record.msg = line
if i:
res += '\n' + old_formatter_format(self, record)
else:
res += old_formatter_format(self, record)
if i:
record.msg = '----------------------------------------'
s = old_formatter_format(self, record)
res = s + '\n' + res + '\n' + s
record.msg = o_msg
record.exc_info = o_exc_info
record.stack_info = o_stack_info
return res
logging.Formatter.format = new_formatter_format
handlers = []
if std_out:
std_handler = logging.StreamHandler(sys.stdout)
std_handler.setLevel(level)
handlers.append(std_handler)
if file_name:
file_handler = logging.StreamHandler(_Std2FileWriter(file_name, max_size=file_size, archive_zip=archive_zip))
file_handler.setLevel(level)
handlers.append(file_handler)
logging.basicConfig(level=level, format=format, handlers=handlers)
STDERR = 1
STDOUT = 2
STDALL = 3
class _Std2FileWriter(io.IOBase):
logger = logging.getLogger('Std2FileWriter')
def __init__(self, file_name, max_size=1024 * 1024 * 10, archive_zip=None, another_output: typing.Iterable[io.IOBase] = None):
self.file_name = pathlib.Path(file_name) if isinstance(file_name, str) else file_name
self.file_name = self.file_name.absolute()
self.max_size = max_size
self.archive_zip = pathlib.Path(archive_zip) if isinstance(archive_zip, str) else archive_zip
self.another_output = another_output
self.archive_fmt = f'{self.file_name.stem}_%Y_%m_%d_%H_%M_%S{self.file_name.suffix}'
self.file = None
self._open()
self.lock = threading.Lock()
def __del__(self):
self._close()
def _open(self):
self._close()
self.file_name.parent.mkdir(parents=True, exist_ok=True)
self.file = open(self.file_name, 'a', encoding='utf-8', buffering=1)
def _close(self):
if self.file:
self.file.close()
self.file = None
return True
return False
def archive(self):
if not self.archive_zip: return
self._close()
if os.path.exists(self.file_name):
self.archive_zip.parent.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(self.archive_zip, 'a') as zip_file:
zip_file.write(self.file_name, time.strftime(self.archive_fmt), compress_type=zipfile.ZIP_DEFLATED)
os.remove(self.file_name)
def write(self, s):
# with self.lock:
if not self.file: self._open()
if self.file.tell() > self.max_size:
self.archive()
self._open()
if self.another_output:
for another in self.another_output:
another.write(s)
self.file.write(s)
def std2file(file_name, max_size=1024 * 1024 * 10, archive_zip=None, select_type=0):
writer = _Std2FileWriter(file_name, max_size, archive_zip)
if select_type & STDERR:
sys.stderr = writer
if select_type & STDOUT:
sys.stdout = writer
| 5,976 | Python | .py | 162 | 28.419753 | 154 | 0.597856 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,039 | pattern.py | nyaoouo_NyLib2/nylib/pattern.py | # xx xx xx xx 空格分割
# [xx:yy] 单字节从 xx 到 yy
# [xx|yy|zz] 单字节 xx 或 yy 或 zz
# ? ? ? ? 视作变量(默认不储存)
# ^ ^ ^ ^ 视作字串(默认储存)
# * * * * 视作跳转(默认储存)
# ?{n} / *{n} 视作匹配n次
# ?{n:m} / *{n:m} 视作匹配n-m次
# (xx xx xx xx) 不存储的分组
# <xx xx xx xx> 储存的分组
# <* * * *: yy yy yy yy> 对分组数据二级匹配
# <* * * *: yy yy yy yy <* * * *:zz zz zz zz>> 对分组数据多级匹配,仅适用于跳转
import io
import re
import typing
if typing.TYPE_CHECKING:
from .process import Process
fl_is_ref = 1 << 0
fl_is_byes = 1 << 1
fl_store = 1 << 2
hex_chars = set(b'0123456789abcdefABCDEF')
dec_chars = set(b'0123456789')
special_chars_map = {i for i in b'()[]{}?*+-|^$\\.&~# \t\n\r\v\f'}
def take_dec_number(pattern: str, i: int):
assert i < len(pattern) and ord(pattern[i]) in dec_chars
j = i + 1
while j < len(pattern) and ord(pattern[j]) in dec_chars:
j += 1
return int(pattern[i:j]), j
def take_cnt(pattern: str, i: int, regex_pattern: bytearray):
if i < len(pattern) and pattern[i] == '{':
regex_pattern.append(123) # {
n1, i = take_dec_number(pattern, i + 1)
regex_pattern.extend(str(n1).encode())
if pattern[i] == ':':
n2, i = take_dec_number(pattern, i + 1)
assert n1 <= n2
regex_pattern.append(44) # ,
regex_pattern.extend(str(n2).encode())
assert pattern[i] == '}'
regex_pattern.append(125) # }
i += 1
return i
def take_byte(pattern: str, i: int, regex_pattern: bytearray):
assert i + 2 <= len(pattern)
next_byte = int(pattern[i:i + 2], 16)
if next_byte in special_chars_map:
regex_pattern.append(92) # \
regex_pattern.append(next_byte)
return i + 2
def _take_unk(pattern: str, i: int):
start_chr = pattern[i]
assert start_chr in ('?', '*', '^')
if i + 1 < len(pattern) and pattern[i + 1] == start_chr:
i += 1
return start_chr, i + 1
def take_unk(pattern: str, i: int, regex_pattern: bytearray):
start_unk, i = _take_unk(pattern, i)
regex_pattern.append(46)
i = take_cnt(pattern, i, regex_pattern)
while i < len(pattern):
match pattern[i]:
case ' ':
i += 1
case c if c == start_unk:
start_unk, i = _take_unk(pattern, i)
regex_pattern.append(46)
i = take_cnt(pattern, i, regex_pattern)
case _:
break
return start_unk, i
def _compile_pattern(pattern: str, i=0, ret_at=None):
_i = i
regex_pattern = bytearray()
sub_matches = []
group_flags = []
while i < len(pattern):
match pattern[i]:
case ' ':
i += 1
case '[':
regex_pattern.append(91) # [
i += 1
i = take_byte(pattern, i, regex_pattern)
while True:
match pattern[i]:
case ' ':
i += 1
case ']':
regex_pattern.append(93) # ]
i += 1
break
case '|':
i = take_byte(pattern, i + 1, regex_pattern)
case ':':
regex_pattern.append(45) # -
i = take_byte(pattern, i + 1, regex_pattern)
case c:
raise ValueError(f'Invalid character {c} in pattern {pattern!r} at {i}')
case '(':
base_flag = 0 # not fl_store
regex_pattern.append(40) # (
unk_type, i = take_unk(pattern, i + 1, regex_pattern)
if unk_type == '*':
base_flag |= fl_is_ref
elif unk_type == '^':
base_flag |= fl_is_byes
sub_pattern = None
while True:
match pattern[i]:
case ' ':
i += 1
case ')':
regex_pattern.append(41) # )
i += 1
break
case ':':
sub_pattern, i = _compile_pattern(pattern, i + 1, ret_at=')')
assert pattern[i] == ')', f'Expected ) get {pattern[i]} at {i} in pattern {pattern!r}'
regex_pattern.append(41)
i += 1
break
case c:
raise ValueError(f'Invalid character {c} in pattern {pattern!r} at {i}')
group_flags.append(base_flag)
sub_matches.append(sub_pattern)
case '<':
base_flag = fl_store
regex_pattern.append(40)
unk_type, i = take_unk(pattern, i + 1, regex_pattern)
if unk_type == '*':
base_flag |= fl_is_ref
elif unk_type == '^':
base_flag |= fl_is_byes
sub_pattern = None
while True:
match pattern[i]:
case ' ':
i += 1
case '>':
regex_pattern.append(41)
i += 1
break
case ':':
sub_pattern, i = _compile_pattern(pattern, i + 1, ret_at='>')
assert pattern[i] == '>', f'Expected > get {pattern[i]} at {i} in pattern {pattern!r}'
regex_pattern.append(41)
i += 1
break
case c:
raise ValueError(f'Invalid character {c} in pattern {pattern!r} at {i}')
group_flags.append(base_flag)
sub_matches.append(sub_pattern)
case '?' | '*' | '^' as c:
regex_pattern.append(40)
unk_type, i = take_unk(pattern, i, regex_pattern)
regex_pattern.append(41)
if c == '?':
group_flags.append(0)
elif c == '*':
group_flags.append(fl_is_ref | fl_store)
elif c == '^':
group_flags.append(fl_is_byes | fl_store)
else:
raise ValueError(f'Invalid character {c} in pattern {pattern!r} at {i}')
sub_matches.append(None)
case c if ord(c) in hex_chars:
i = take_byte(pattern, i, regex_pattern)
i = take_cnt(pattern, i, regex_pattern)
case c if c == ret_at:
break
case c:
fmt_pattern = pattern[:i] + '_' + pattern[i] + '_' + pattern[i + 1:]
raise ValueError(f'Invalid character {c} in pattern {fmt_pattern!r} at {i} (ret_at={ret_at})')
try:
regex = re.compile(bytes(regex_pattern), re.DOTALL)
except re.error as e:
raise ValueError(f'{e}: ({pattern!r}, {_i}, {ret_at!r}) -> {bytes(regex_pattern)}')
return Pattern(regex, sub_matches, group_flags, pattern), i
def compile_pattern(pattern: str):
return _compile_pattern(pattern)[0]
class Pattern:
def __init__(self, regex: re.Pattern, sub_matches: 'typing.List[None | Pattern]', group_flags, pattern: str):
self.regex = regex
self.sub_matches = sub_matches
self.group_flags = group_flags
self.pattern = pattern
self.res_is_ref = []
for i, (sub, flag) in enumerate(zip(sub_matches, group_flags)):
if flag & fl_store:
self.res_is_ref.append(flag & fl_is_ref)
if sub is not None:
self.res_is_ref.extend(sub.res_is_ref)
def finditer(self, _data: bytes | bytearray | memoryview, ref_base=0):
data = _data if isinstance(_data, memoryview) else memoryview(_data)
for match in self.regex.finditer(data):
res = []
if self._parse_match(data, match, res, ref_base):
yield match.start(0), res
def _parse_match(self, data: memoryview, match: re.Match, res: list, ref_base=0):
for i, (sub_match, flag) in enumerate(zip(self.sub_matches, self.group_flags)):
if flag & fl_is_byes:
res.append(match.group(i + 1))
else:
val = int.from_bytes(match.group(i + 1), 'little', signed=True)
if flag & fl_is_ref:
val += match.end(i + 1)
if flag & fl_store:
res.append(val)
if sub_match is not None:
start = val if flag & fl_is_ref else val - ref_base
if start < 0 or start >= len(data):
return False
if not sub_match._match(data, start, res, ref_base):
return False
return True
def _match(self, _data: memoryview, start_at: int, res: list, ref_base=0):
if not (match := self.regex.match(_data, start_at)): return False
return self._parse_match(_data, match, res, ref_base)
def fmt(self, ind: str | int = ' ', _ind=0):
if isinstance(ind, int): ind = ' ' * ind
s = io.StringIO()
s.write(ind * _ind)
s.write(fmt_bytes_regex_pattern(self.regex.pattern))
s.write('\n')
s.write(ind * _ind)
s.write('res is ref:')
for flag in self.res_is_ref:
s.write(' ref' if flag else ' val')
s.write('\n')
for i, (sub, flag) in enumerate(zip(self.sub_matches, self.group_flags)):
s.write(ind * _ind)
s.write(f'{i}:{"ref" if flag & fl_is_ref else "val"}{" store" if flag & fl_store else ""}\n')
if sub is not None:
s.write(sub.fmt(ind, _ind + 1))
s.write('\n')
return s.getvalue().rstrip()
def fmt_bytes_regex_pattern(pat: bytes):
s = ''
is_escape = False
is_in_bracket = 0
for b in pat:
if is_escape:
is_escape = False
s += f'\\x{b:02x}'
elif b == 92: # \
is_escape = True
elif b in special_chars_map:
if b == 123: # {
is_in_bracket += 1
elif b == 125: # }
is_in_bracket -= 1
s += chr(b)
elif is_in_bracket:
s += chr(b)
else:
s += f'\\x{b:02x}'
return s
class IPatternScanner:
def search(self, pattern: str | Pattern) -> typing.Generator[tuple[int, list[int]], None, None]:
raise NotImplementedError
def search_unique(self, pattern: str | Pattern) -> tuple[int, list[int]]:
s = self.search(pattern)
try:
res = next(s)
except StopIteration:
raise KeyError('pattern not found')
try:
next(s)
except StopIteration:
return res
raise KeyError('pattern is not unique, at least 2 is found')
def find_addresses(self, pattern: str | Pattern):
for address, _ in self.search(pattern):
yield address
def find_vals(self, pattern: str | Pattern):
for address, args in self.search(pattern):
yield args
def find_address(self, pattern: str | Pattern):
return self.search_unique(pattern)[0]
def find_val(self, pattern: str | Pattern):
return self.search_unique(pattern)[1]
class StaticPatternSearcher(IPatternScanner):
def __init__(self, pe, base_address=0):
from .utils.pip import required
required('pefile')
import pefile
self.pe = pe if isinstance(pe, pefile.PE) else pefile.PE(pe, fast_load=True)
self.text_sections = [sect for sect in self.pe.sections if sect.Name.rstrip(b'\0') == b'.text']
self.section_datas = [sect.get_data() for sect in self.text_sections]
self.section_virtual_addresses = [sect.VirtualAddress for sect in self.text_sections]
self.base_address = base_address
def get_original_text(self, address, size):
i = 0
for i, a in enumerate(self.section_virtual_addresses):
if a > address: break
i -= 1
section_address = address - self.base_address - self.section_virtual_addresses[i]
return self.section_datas[i][section_address:section_address + size]
def search(self, pattern: str | Pattern) -> typing.Generator[tuple[int, list[int]], None, None]:
if isinstance(pattern, str): pattern = compile_pattern(pattern)
for i in range(len(self.text_sections)):
sect_off = self.base_address + self.section_virtual_addresses[i]
for offset, args in pattern.finditer(self.section_datas[i]):
yield sect_off + offset, [a + sect_off if r else a for a, r in zip(args, pattern.res_is_ref)]
class MemoryPatternScanner(IPatternScanner):
def __init__(self, process: 'Process', region_address, region_size):
self.process = process
self.region_address = region_address
self.region_size = region_size
def get_original_text(self, address, size):
start = address - self.region_address
assert size > 0 and start >= 0
return self.get_raw()[start:start + size]
def get_raw(self):
return self.process.read(self.region_address, self.region_size)
def search(self, pattern: str | Pattern) -> typing.Generator[tuple[int, list[int]], None, None]:
if isinstance(pattern, str): pattern = compile_pattern(pattern)
for offset, args in pattern.finditer(self.get_raw()):
yield self.region_address + offset, [a + self.region_address if r else a for a, r in zip(args, pattern.res_is_ref)]
class CachedRawMemoryPatternScanner(MemoryPatternScanner):
def __init__(self, *a):
super().__init__(*a)
self._cached_raw = None
def get_raw(self):
if self._cached_raw is None:
self._cached_raw = super().get_raw()
return self._cached_raw
| 14,375 | Python | .py | 328 | 30.777439 | 127 | 0.5127 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,040 | ntdll.py | nyaoouo_NyLib2/nylib/winapi/ntdll.py | from .utils import *
_dll = ctypes.WinDLL('ntdll.dll')
NtQueryInformationProcess = def_win_api(_dll.NtQueryInformationProcess, ctypes.c_long, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p), error_nt=True)
NtOpenFile = def_win_api(_dll.NtOpenFile, ctypes.c_long, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_ulong), error_nt=True)
NtSetValueKey = def_win_api(_dll.NtSetValueKey, ctypes.c_long, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong), error_nt=True)
RtlOpenCurrentUser = def_win_api(_dll.RtlOpenCurrentUser, ctypes.c_long, (ctypes.c_ulong, ctypes.c_void_p), error_nt=True)
| 718 | Python | .py | 6 | 118.5 | 186 | 0.751055 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,041 | user32.py | nyaoouo_NyLib2/nylib/winapi/user32.py | from .utils import *
_dll = ctypes.WinDLL('user32.dll')
OpenClipboard = def_win_api(_dll.OpenClipboard, ctypes.c_void_p, (ctypes.c_void_p,), error_zero=True)
EmptyClipboard = def_win_api(_dll.EmptyClipboard, ctypes.c_bool, (), error_zero=True)
SetClipboardData = def_win_api(_dll.SetClipboardData, ctypes.c_void_p, (ctypes.c_uint, ctypes.c_void_p), error_zero=True)
CloseClipboard = def_win_api(_dll.CloseClipboard, ctypes.c_bool, (), error_zero=True)
| 453 | Python | .py | 6 | 74.333333 | 121 | 0.748879 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,042 | msvcrt.py | nyaoouo_NyLib2/nylib/winapi/msvcrt.py | from .utils import *
_dll = ctypes.CDLL('msvcrt.dll')
memcpy = def_win_api(_dll.memcpy, ctypes.c_void_p, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t), error_zero=True)
| 176 | Python | .py | 3 | 57.333333 | 120 | 0.709302 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,043 | shell32.py | nyaoouo_NyLib2/nylib/winapi/shell32.py | from .utils import *
_dll = ctypes.WinDLL('Shell32.dll')
SHGetPropertyStoreFromParsingName = def_win_api(_dll.SHGetPropertyStoreFromParsingName, ctypes.c_long, (ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p), error_nonzero=True)
| 269 | Python | .py | 3 | 88 | 209 | 0.780303 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,044 | advapi32.py | nyaoouo_NyLib2/nylib/winapi/advapi32.py | from .utils import *
_dll = ctypes.WinDLL('advapi32.dll')
OpenProcessToken = def_win_api(_dll.OpenProcessToken, ctypes.c_long, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p), error_zero=True)
LookupPrivilegeName = def_win_api(_dll.LookupPrivilegeNameW, ctypes.c_long, (ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_void_p), error_zero=True)
LookupPrivilegeValue = def_win_api(_dll.LookupPrivilegeValueW, ctypes.c_long, (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p), error_zero=True)
AdjustTokenPrivileges = def_win_api(_dll.AdjustTokenPrivileges, ctypes.c_long, (ctypes.c_void_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
OpenSCManagerW = def_win_api(_dll.OpenSCManagerW, ctypes.c_void_p, (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32), error_zero=True)
CloseServiceHandle = def_win_api(_dll.CloseServiceHandle, ctypes.c_void_p, (ctypes.c_void_p,), error_zero=True)
CreateServiceW = def_win_api(_dll.CreateServiceW, ctypes.c_void_p, (
ctypes.c_void_p, # hSCManager
ctypes.c_wchar_p, # lpServiceName
ctypes.c_wchar_p, # lpDisplayName
ctypes.c_ulong, # dwDesiredAccess
ctypes.c_ulong, # dwServiceType
ctypes.c_ulong, # dwStartType
ctypes.c_ulong, # dwErrorControl
ctypes.c_wchar_p, # lpBinaryPathName
ctypes.c_wchar_p, # lpLoadOrderGroup
ctypes.c_void_p, # lpdwTagId
ctypes.c_wchar_p, # lpDependencies
ctypes.c_wchar_p, # lpServiceStartName
ctypes.c_wchar_p, # lpPassword
), error_zero=True)
CreateServiceA = def_win_api(_dll.CreateServiceA, ctypes.c_void_p, (
ctypes.c_void_p, # hSCManager
ctypes.c_char_p, # lpServiceName
ctypes.c_char_p, # lpDisplayName
ctypes.c_ulong, # dwDesiredAccess
ctypes.c_ulong, # dwServiceType
ctypes.c_ulong, # dwStartType
ctypes.c_ulong, # dwErrorControl
ctypes.c_char_p, # lpBinaryPathName
ctypes.c_char_p, # lpLoadOrderGroup
ctypes.c_void_p, # lpdwTagId
ctypes.c_char_p, # lpDependencies
ctypes.c_char_p, # lpServiceStartName
ctypes.c_char_p, # lpPassword
), error_zero=True)
ChangeServiceConfigW = def_win_api(_dll.ChangeServiceConfigW, ctypes.c_bool, (
ctypes.c_void_p, # hService
ctypes.c_ulong, # dwServiceType
ctypes.c_ulong, # dwStartType
ctypes.c_ulong, # dwErrorControl
ctypes.c_wchar_p, # lpBinaryPathName
ctypes.c_wchar_p, # lpLoadOrderGroup
ctypes.c_void_p, # lpdwTagId
ctypes.c_wchar_p, # lpDependencies
ctypes.c_wchar_p, # lpServiceStartName
ctypes.c_wchar_p, # lpPassword
ctypes.c_wchar_p, # lpDisplayName
), error_zero=True)
OpenServiceW = def_win_api(_dll.OpenServiceW, ctypes.c_void_p, (ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_ulong), error_zero=True)
ControlService = def_win_api(_dll.ControlService, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p), error_zero=True)
StartService = def_win_api(_dll.StartServiceW, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p), error_zero=True)
DeleteService = def_win_api(_dll.DeleteService, ctypes.c_bool, (ctypes.c_void_p,), error_zero=True)
| 3,148 | Python | .py | 55 | 53.527273 | 196 | 0.724774 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,045 | kernel32.py | nyaoouo_NyLib2/nylib/winapi/kernel32.py | from .utils import *
_dll = ctypes.WinDLL('kernel32.dll')
GetCurrentProcess = def_win_api(_dll.GetCurrentProcess, ctypes.c_void_p, (), error_zero=True)
CreateToolhelp32Snapshot = def_win_api(_dll.CreateToolhelp32Snapshot, HANDLE, (ctypes.c_ulong, ctypes.c_ulong), error_val=INVALID_HANDLE_VALUE)
Process32First = def_win_api(_dll.Process32First, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
Process32Next = def_win_api(_dll.Process32Next, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
CloseHandle = def_win_api(_dll.CloseHandle, ctypes.c_bool, (ctypes.c_void_p,), error_zero=True)
OpenProcess = def_win_api(_dll.OpenProcess, ctypes.c_void_p, (ctypes.c_ulong, ctypes.c_bool, ctypes.c_ulong), error_zero=True)
CreateRemoteThread = def_win_api(_dll.CreateRemoteThread, ctypes.c_void_p, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p), error_zero=True)
ReadProcessMemory = def_win_api(_dll.ReadProcessMemory, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p), error_zero=True)
WriteProcessMemory = def_win_api(_dll.WriteProcessMemory, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p), error_zero=True)
VirtualAllocEx = def_win_api(_dll.VirtualAllocEx, ctypes.c_void_p, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong), error_zero=True)
VirtualFreeEx = def_win_api(_dll.VirtualFreeEx, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong), error_zero=True)
VirtualProtectEx = def_win_api(_dll.VirtualProtectEx, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_void_p), error_zero=True)
VirtualQueryEx = def_win_api(_dll.VirtualQueryEx, ctypes.c_size_t, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t), error_zero=True)
GetProcAddress = def_win_api(_dll.GetProcAddress, ctypes.c_void_p, (ctypes.c_void_p, ctypes.c_char_p), error_zero=True)
GetModuleHandle = def_win_api(_dll.GetModuleHandleW, ctypes.c_size_t, (ctypes.c_wchar_p,), error_zero=True)
GetCurrentProcessId = def_win_api(_dll.GetCurrentProcessId, ctypes.c_ulong, (), error_zero=True)
WaitForSingleObject = def_win_api(_dll.WaitForSingleObject, HANDLE, (ctypes.c_void_p, ctypes.c_ulong), error_val=INVALID_HANDLE_VALUE)
CreateEvent = def_win_api(_dll.CreateEventW, HANDLE, (ctypes.c_void_p, ctypes.c_bool, ctypes.c_bool, ctypes.c_wchar_p), error_val=INVALID_HANDLE_VALUE)
WriteFile = def_win_api(_dll.WriteFile, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
ReadFile = def_win_api(_dll.ReadFile, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
GetOverlappedResult = def_win_api(_dll.GetOverlappedResult, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_bool), error_zero=True)
CreateNamedPipe = def_win_api(_dll.CreateNamedPipeW, ctypes.c_void_p, (ctypes.c_wchar_p, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p), error_val=INVALID_HANDLE_VALUE)
ConnectNamedPipe = def_win_api(_dll.ConnectNamedPipe, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
CreateFile = def_win_api(_dll.CreateFileW, ctypes.c_void_p, (ctypes.c_wchar_p, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p), error_val=INVALID_HANDLE_VALUE)
SetNamedPipeHandleState = def_win_api(_dll.SetNamedPipeHandleState, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
DeviceIoControl = def_win_api(_dll.DeviceIoControl, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p,), error_zero=True)
GetFileAttributesW = def_win_api(_dll.GetFileAttributesW, ctypes.c_ulong, (ctypes.c_wchar_p,), error_val=0xFFFFFFFF)
GetExitCodeProcess = def_win_api(_dll.GetExitCodeProcess, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
GlobalAlloc = def_win_api(_dll.GlobalAlloc, ctypes.c_void_p, (ctypes.c_uint, ctypes.c_size_t), error_zero=True)
GlobalLock = def_win_api(_dll.GlobalLock, ctypes.c_void_p, (ctypes.c_void_p,), error_zero=True)
GlobalUnlock = def_win_api(_dll.GlobalUnlock, ctypes.c_bool, (ctypes.c_void_p,))
TerminateProcess = def_win_api(_dll.TerminateProcess, ctypes.c_bool, (ctypes.c_void_p, ctypes.c_uint), error_zero=True)
ExpandEnvironmentStringsW = def_win_api(_dll.ExpandEnvironmentStringsW, ctypes.c_ulong, (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_ulong), error_zero=True)
CreateProcessA = def_win_api(_dll.CreateProcessA, ctypes.c_bool, (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_bool, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p), error_zero=True)
ResumeThread = def_win_api(_dll.ResumeThread, ctypes.c_ulong, (ctypes.c_void_p,))
| 5,149 | Python | .py | 37 | 138.135135 | 250 | 0.75269 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,046 | utils.py | nyaoouo_NyLib2/nylib/winapi/utils.py | import typing
from .defs import *
_NULL = type('NULL', (), {})
_SetLastError = ctypes.windll.kernel32.SetLastError
def def_win_api(func, res_type: typing.Any = ctypes.c_void_p, arg_types=(), error_zero=False, error_nonzero=False, error_val: typing.Any = _NULL, error_nt=False):
func.argtypes = arg_types
func.restype = res_type
if error_zero:
def wrapper(*args, **kwargs):
_SetLastError(0)
res = func(*args, **kwargs)
if not res:
raise ctypes.WinError()
return res
return wrapper
if error_nonzero:
def wrapper(*args, **kwargs):
_SetLastError(0)
res = func(*args, **kwargs)
if res:
raise ctypes.WinError()
return res
return wrapper
if error_val is not _NULL:
def wrapper(*args, **kwargs):
_SetLastError(0)
res = func(*args, **kwargs)
if res == error_val:
raise ctypes.WinError()
return res
return wrapper
if error_nt:
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if not NT_SUCCESS(res):
raise OSError(f'NtStatus: {res:#x}')
return res
return wrapper
return func
| 1,321 | Python | .py | 39 | 23.974359 | 162 | 0.549961 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,047 | __init__.py | nyaoouo_NyLib2/nylib/winapi/__init__.py | from .kernel32 import *
from .advapi32 import *
from .ntdll import *
from .user32 import *
from .msvcrt import *
from .shell32 import *
| 136 | Python | .py | 6 | 21.666667 | 23 | 0.769231 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,048 | defs.py | nyaoouo_NyLib2/nylib/winapi/defs.py | import ctypes
import locale
import ctypes.wintypes
IS_64BIT = ctypes.sizeof(ctypes.c_void_p) == 8
DEFAULT_ENCODING = locale.getpreferredencoding()
NT_SUCCESS = lambda res: 0 <= res < 0x80000000
NT_STATUS = ctypes.c_ulong
HANDLE = ctypes.c_uint64 if IS_64BIT else ctypes.c_uint32
INVALID_HANDLE_VALUE = (1 << 64) - 1 if IS_64BIT else (1 << 32) - 1
class SERVICE_STATUS(ctypes.Structure):
_fields_ = [
('dwServiceType', ctypes.c_ulong),
('dwCurrentState', ctypes.c_ulong),
('dwControlsAccepted', ctypes.c_ulong),
('dwWin32ExitCode', ctypes.c_ulong),
('dwServiceSpecificExitCode', ctypes.c_ulong),
('dwCheckPoint', ctypes.c_ulong),
('dwWaitHint', ctypes.c_ulong),
]
class OBJECT_ATTRIBUTES(ctypes.Structure):
_fields_ = [
('Length', ctypes.c_ulong),
('RootDirectory', ctypes.c_void_p),
('ObjectName', ctypes.c_void_p),
('Attributes', ctypes.c_ulong),
('SecurityDescriptor', ctypes.c_void_p),
('SecurityQualityOfService', ctypes.c_void_p),
]
class IO_STATUS_BLOCK(ctypes.Structure):
_fields_ = [
('Status', ctypes.c_ulong),
('Information', ctypes.c_void_p),
]
class MEMORY_BASIC_INFORMATION(ctypes.Structure):
_fields_ = [
("BaseAddress", ctypes.c_ulonglong),
("AllocationBase", ctypes.c_ulonglong),
("AllocationProtect", ctypes.c_ulong),
("RegionSize", ctypes.c_ulonglong),
("State", ctypes.c_ulong),
("Protect", ctypes.c_ulong),
("Type", ctypes.c_ulong)
]
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", ctypes.c_ulong),
("HighPart", ctypes.c_long)
]
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", ctypes.c_ulong),
]
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("count", ctypes.c_ulong),
("Privileges", LUID_AND_ATTRIBUTES * 1)
]
class LIST_ENTRY(ctypes.Structure):
_fields_ = [
("Flink", ctypes.c_size_t),
("Blink", ctypes.c_size_t),
]
class UNICODE_STRING(ctypes.Structure):
_fields_ = [
('Length', ctypes.c_ushort),
('MaximumLength', ctypes.c_ushort),
('Buffer', ctypes.c_size_t),
]
@classmethod
def from_str(cls, s: str):
length = len(s) * 2
_s = cls(length, length + 2, ctypes.addressof(_buf := ctypes.create_unicode_buffer(s)))
setattr(_s, '_buf', _buf)
return _s
@property
def value(self):
return ctypes.cast(self.Buffer, ctypes.c_wchar_p).value
def remote_value(self, process: 'Process'):
return process.read(self.Buffer, self.Length).decode('utf-16-le', 'ignore')
class LDR_DATA_TABLE_ENTRY(LIST_ENTRY):
_fields_ = [
("InLoadOrderLinks", LIST_ENTRY),
("InMemoryOrderLinks", LIST_ENTRY),
("InInitializationOrderLinks", LIST_ENTRY),
("DllBase", ctypes.c_void_p),
("EntryPoint", ctypes.c_void_p),
("SizeOfImage", ctypes.c_uint32),
("FullDllName", UNICODE_STRING),
("BaseDllName", UNICODE_STRING),
("Flags", ctypes.c_uint32),
("LoadCount", ctypes.c_uint16),
("TlsIndex", ctypes.c_uint16),
("HashLinks", LIST_ENTRY),
("SectionPointer", ctypes.c_void_p),
("CheckSum", ctypes.c_uint32),
("TimeDateStamp", ctypes.c_uint32),
("LoadedImports", ctypes.c_void_p),
("EntryPointActivationContext", ctypes.c_void_p),
("PatchInformation", ctypes.c_void_p),
]
class PEB_LDR_DATA(ctypes.Structure):
_fields_ = [
("Length", ctypes.c_uint32),
("Initialized", ctypes.c_uint8),
("SsHandle", ctypes.c_void_p),
("InLoadOrderModuleList", LIST_ENTRY),
("InMemoryOrderModuleList", LIST_ENTRY),
("InInitializationOrderModuleList", LIST_ENTRY),
("EntryInProgress", ctypes.c_void_p),
]
class PROCESS_BASIC_INFORMATION(ctypes.Structure):
_fields_ = [
("ExitStatus", ctypes.c_ulong),
("PebBaseAddress", ctypes.c_void_p),
("AffinityMask", ctypes.c_void_p),
("BasePriority", ctypes.c_void_p),
("UniqueProcessId", ctypes.c_void_p),
("InheritedFromUniqueProcessId", ctypes.c_void_p)
]
class PEB(ctypes.Structure):
_fields_ = [
("InheritedAddressSpace", ctypes.c_uint8),
("ReadImageFileExecOptions", ctypes.c_uint8),
("BeingDebugged", ctypes.c_uint8),
("SpareBool", ctypes.c_uint8),
("Mutant", ctypes.c_void_p),
("ImageBaseAddress", ctypes.c_void_p),
("Ldr", ctypes.c_void_p),
# ...
]
class OVERLAPPED(ctypes.Structure):
_fields_ = [
("Internal", ctypes.c_void_p),
("InternalHigh", ctypes.c_void_p),
("Offset", ctypes.c_ulong),
("OffsetHigh", ctypes.c_ulong),
("hEvent", ctypes.c_void_p)
]
class ProcessEntry32(ctypes.Structure):
_fields_ = [
('dwSize', ctypes.c_ulong),
('cntUsage', ctypes.c_ulong),
('th32ProcessID', ctypes.c_ulong),
('th32DefaultHeapID', ctypes.POINTER(ctypes.c_ulong)),
('th32ModuleID', ctypes.c_ulong),
('cntThreads', ctypes.c_ulong),
('th32ParentProcessID', ctypes.c_ulong),
('pcPriClassBase', ctypes.c_ulong),
('dwFlags', ctypes.c_ulong),
('szExeFile', ctypes.c_char * ctypes.wintypes.MAX_PATH)
]
class STARTUPINFOA(ctypes.Structure):
_fields_ = [
('cb', ctypes.c_ulong),
('lpReserved', ctypes.c_char_p),
('lpDesktop', ctypes.c_char_p),
('lpTitle', ctypes.c_char_p),
('dwX', ctypes.c_ulong),
('dwY', ctypes.c_ulong),
('dwXSize', ctypes.c_ulong),
('dwYSize', ctypes.c_ulong),
('dwXCountChars', ctypes.c_ulong),
('dwYCountChars', ctypes.c_ulong),
('dwFillAttribute', ctypes.c_ulong),
('dwFlags', ctypes.c_ulong),
('wShowWindow', ctypes.wintypes.WORD),
('cbReserved2', ctypes.wintypes.WORD),
('lpReserved2', ctypes.c_char_p),
('hStdInput', ctypes.c_void_p),
('hStdOutput', ctypes.c_void_p),
('hStdError', ctypes.c_void_p),
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cb = ctypes.sizeof(self)
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
('hProcess', ctypes.c_void_p),
('hThread', ctypes.c_void_p),
('dwProcessId', ctypes.c_ulong),
('dwThreadId', ctypes.c_ulong),
]
| 6,600 | Python | .py | 183 | 28.852459 | 95 | 0.599906 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,049 | __init__.py | nyaoouo_NyLib2/nylib/ctype/__init__.py | """
A module to access memory with ctypes, but different from ctypes,
this module allow to use custom native accessors, so it can be used to access memory of other process
"""
import ctypes
import functools
import operator
import struct
import typing
from .memory_manage import MemoryManager
from .. import winapi
from ..utils import mv_from_mem
if typing.TYPE_CHECKING:
from ..process import Process
_T = typing.TypeVar("_T")
struct_u64 = struct.Struct("Q")
struct_ptr = struct_u64
def size_padded(size: int, pad_size: int) -> int:
if pad_size < 2: return size
v = pad_size - 1
return (size + v) & ~v
class CDataMeta(type):
def __mul__(cls: typing.Type[_T], n: int) -> 'typing.Type[Array[_T]]':
return Array[cls, n]
class CData(metaclass=CDataMeta):
_accessor_: 'CAccessor'
_address_: int
_size_: int
_pad_size_: int
_can_self_handle_: bool = False
_is_self_allocated_: bool = False
def __del__(self):
if self._is_self_allocated_:
self._accessor_.free(self._address_)
def __init__(self, *args, _address_=None, _accessor_=None, **kwargs):
self._accessor_ = _accessor_ or CAccessorLocal.get_instance()
if _address_ is not None:
self._address_ = _address_
elif self._can_self_handle_:
self._address_ = self._accessor_.alloc(self._size_)
self._is_self_allocated_ = True
else:
raise ValueError("Can't self handle")
def check_finalize(t: typing.Type[CData] | CData):
if not isinstance(t, type):
t = type(t)
if issubclass(t, Struct):
if not hasattr(t, "_fields_"):
finalize_struct(t)
def sizeof(t: typing.Type[CData] | CData) -> int:
check_finalize(t)
return t._size_
def padsizeof(t: typing.Type[CData] | CData) -> int:
check_finalize(t)
return t._pad_size_
_CData_T = typing.TypeVar("_CData_T", bound=CData)
class SimpleCData(CData, typing.Generic[_T]):
_can_self_handle_ = True
_struct_: struct.Struct
_struct__: str
_ctype_: typing.Type
_is_xmm_: bool = False
def __init_subclass__(cls, **kwargs):
if s := getattr(cls, "_struct__", None):
cls._struct_ = struct.Struct(s)
if getattr(cls, "_struct_", None):
cls._pad_size_ = cls._size_ = cls._struct_.size
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if args: self.value = args[0]
@property
def value(self) -> _T:
return self._struct_.unpack(self._accessor_.read(self._address_, self._struct_.size))[0]
@value.setter
def value(self, value: _T):
if isinstance(value, bytes):
pass
else:
if isinstance(value, SimpleCData): value = value.value
value = self._struct_.pack(value)
self._accessor_.write(self._address_, value)
def _op_(self, other, op):
return op(self.value, other.value if isinstance(other, SimpleCData) else other)
def _iop_(self, other, op):
self.value = op(self.value, other.value if isinstance(other, SimpleCData) else other)
return self
__eq__ = lambda self, other: self._op_(other, operator.eq)
__ne__ = lambda self, other: self._op_(other, operator.ne)
__lt__ = lambda self, other: self._op_(other, operator.lt)
__le__ = lambda self, other: self._op_(other, operator.le)
__gt__ = lambda self, other: self._op_(other, operator.gt)
__ge__ = lambda self, other: self._op_(other, operator.ge)
__add__ = lambda self, other: self._op_(other, operator.add)
__sub__ = lambda self, other: self._op_(other, operator.sub)
__mul__ = lambda self, other: self._op_(other, operator.mul)
__truediv__ = lambda self, other: self._op_(other, operator.truediv)
__floordiv__ = lambda self, other: self._op_(other, operator.floordiv)
__mod__ = lambda self, other: self._op_(other, operator.mod)
__pow__ = lambda self, other: self._op_(other, operator.pow)
__lshift__ = lambda self, other: self._op_(other, operator.lshift)
__rshift__ = lambda self, other: self._op_(other, operator.rshift)
__and__ = lambda self, other: self._op_(other, operator.and_)
__xor__ = lambda self, other: self._op_(other, operator.xor)
__or__ = lambda self, other: self._op_(other, operator.or_)
__iadd__ = lambda self, other: self._iop_(other, operator.iadd)
__isub__ = lambda self, other: self._iop_(other, operator.isub)
__imul__ = lambda self, other: self._iop_(other, operator.imul)
__itruediv__ = lambda self, other: self._iop_(other, operator.itruediv)
__ifloordiv__ = lambda self, other: self._iop_(other, operator.ifloordiv)
__imod__ = lambda self, other: self._iop_(other, operator.imod)
__ipow__ = lambda self, other: self._iop_(other, operator.ipow)
__ilshift__ = lambda self, other: self._iop_(other, operator.ilshift)
__irshift__ = lambda self, other: self._iop_(other, operator.irshift)
__iand__ = lambda self, other: self._iop_(other, operator.iand)
__ixor__ = lambda self, other: self._iop_(other, operator.ixor)
__ior__ = lambda self, other: self._iop_(other, operator.ior)
class c_uint8(SimpleCData[int]):
_struct__ = "B"
_ctype_ = ctypes.c_uint8
class c_uint16(SimpleCData[int]):
_struct__ = "H"
_ctype_ = ctypes.c_uint16
class c_uint32(SimpleCData[int]):
_struct__ = "I"
_ctype_ = ctypes.c_uint32
class c_uint64(SimpleCData[int]):
_struct__ = "Q"
_ctype_ = ctypes.c_uint64
class c_int8(SimpleCData[int]):
_struct__ = "b"
_ctype_ = ctypes.c_int8
class c_int16(SimpleCData[int]):
_struct__ = "h"
_ctype_ = ctypes.c_int16
class c_int32(SimpleCData[int]):
_struct__ = "i"
_ctype_ = ctypes.c_int32
class c_int64(SimpleCData[int]):
_struct__ = "q"
_ctype_ = ctypes.c_int64
class c_float(SimpleCData[float]):
_struct__ = "f"
_ctype_ = ctypes.c_float
_is_xmm_ = True
class c_double(SimpleCData[float]):
_struct__ = "d"
_ctype_ = ctypes.c_double
_is_xmm_ = True
class c_char(SimpleCData[bytes]):
_pad_size_ = _size_ = 1
def __class_getitem__(cls, size: int):
return type(f'c_char_{size}', (c_char,), {"_size_": size})
@property
def value(self) -> bytes:
return self._accessor_.read(self._address_, self._size_)
@value.setter
def value(self, value: bytes):
if len(value) >= self._size_:
value = value[:self._size_]
self._accessor_.write(self._address_, value)
class c_char_zt(SimpleCData[bytes]):
_pad_size_ = _size_ = 1
def __class_getitem__(cls, size: int):
return type(f'c_char_zt_{size}', (c_char_zt,), {"_size_": size})
@property
def value(self) -> bytes:
res = self._accessor_.read(self._address_, self._size_)
if (i := res.find(0)) >= 0:
res = res[:i]
return res
@value.setter
def value(self, value: bytes):
if len(value) >= self._size_:
value = value[:self._size_]
self._accessor_.write(self._address_, value)
class c_wchar(SimpleCData[str]):
encoding = "utf-16-le"
_pad_size_ = _size_ = 2
def __class_getitem__(cls, item: int | tuple[int, str]):
if isinstance(item, tuple):
size, encoding = item
return type(f'c_wchar_{size}', (c_wchar,), {"_size_": size, "encoding": encoding})
return type(f'c_wchar_{item}', (c_wchar,), {"_size_": item})
@property
def value(self) -> str:
return self._accessor_.read(self._address_, self._size_).decode(self.encoding)
@value.setter
def value(self, value: str):
if len(value) >= self._size_:
value = value[:self._size_]
self._accessor_.write(self._address_, value.encode(self.encoding))
class c_wchar_zt(SimpleCData[str]):
encoding = "utf-16-le"
_pad_size_ = _size_ = 2
def __class_getitem__(cls, item: int | tuple[int, str]):
if isinstance(item, tuple):
size, encoding = item
return type(f'c_wchar_zt_{size}', (c_wchar_zt,), {"_size_": size, "encoding": encoding})
return type(f'c_wchar_zt_{item}', (c_wchar_zt,), {"_size_": item})
@property
def value(self) -> str:
res = self._accessor_.read(self._address_, self._size_)
for i in range(0, len(res), 2):
if res[i:i + 2] == b"\x00\x00":
res = res[:i]
break
return res.decode(self.encoding)
@value.setter
def value(self, value: str):
if len(value) >= self._size_:
value = value[:self._size_]
self._accessor_.write(self._address_, value.encode(self.encoding))
c_size_t = c_uint64
c_longlong = c_int64
c_ulonglong = c_uint64
c_long = c_int32
c_ulong = c_uint32
c_int = c_int32
c_uint = c_uint32
c_short = c_int16
c_ushort = c_uint16
c_void_p = c_uint64
class Pointer(CData, typing.Generic[_CData_T]):
_pad_size_ = _size_ = c_size_t._size_
_type_: typing.Type[_CData_T]
_can_self_handle_ = True
def __init__(self, value=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if value: self.value = value
@property
def value(self) -> _CData_T:
return struct_ptr.unpack(self._accessor_.read(self._address_, struct_ptr.size))[0]
@value.setter
def value(self, value: _CData_T):
if isinstance(value, bytes):
pass
else:
if isinstance(value, SimpleCData): value = value.value
value = struct_ptr.pack(value)
self._accessor_.write(self._address_, value)
@property
def content(self) -> _CData_T:
return self[0]
@functools.cached_property
def element_size_padded(self) -> int:
return size_padded(sizeof(self._type_), padsizeof(self._type_))
def __getitem__(self, item: int) -> _CData_T:
return self._type_(_address_=self.value + item * self.element_size_padded, _accessor_=self._accessor_)
def __class_getitem__(cls, t: typing.Type[_CData_T]) -> 'Pointer[_CData_T]':
return type(f'p_{t.__name__}', (cls,), {"_type_": t})
class Array(CData, typing.Generic[_CData_T]):
_type_: typing.Type[_CData_T]
_length_: int
def __getitem__(self, item: int) -> _CData_T:
if self._length_ >= 0 and item >= self._length_: raise IndexError
return self._type_(_address_=self._address_ + item * self.element_size_padded, _accessor_=self._accessor_)
def __iter__(self) -> typing.Iterator[_CData_T]:
ptr = self._address_
ps = self.element_size_padded
if self._length_ < 0:
while True:
yield self._type_(_address_=ptr, _accessor_=self._accessor_)
ptr += ps
else:
for _ in range(self._length_):
yield self._type_(_address_=ptr, _accessor_=self._accessor_)
ptr += ps
@functools.cached_property
def element_size_padded(self) -> int:
return size_padded(sizeof(self._type_), padsizeof(self._type_))
def __class_getitem__(cls, t: typing.Type[_CData_T] | tuple[typing.Type[_CData_T], int]) -> 'typing.Type[Array[_CData_T]]':
if isinstance(t, tuple):
t, length = t
size = size_padded(sizeof(t), padsizeof(t)) * length
can_self_handle = t._can_self_handle_
else:
length = -1
size = 0
can_self_handle = False
return type(f'a_{t.__name__}', (cls,), {
"_type_": t,
"_length_": length,
"_size_": size,
"_pad_size_": t._pad_size_,
"_can_self_handle_": can_self_handle
})
def finalize_struct(cls):
size = 0
fields = []
pad_size = 1
bit_offset = 0
for name, t in cls.__dict__.items():
if isinstance(t, Field):
assert not hasattr(t, "name"), "Field name is reserved"
t.name = name
if t.offset < 0:
t.offset = size = size_padded(size, padsizeof(t.t))
size += sizeof(t.t)
else:
size = max(t.offset + sizeof(t.t), size)
pad_size = max(pad_size, padsizeof(t.t))
fields.append(t)
cls._fields_ = fields
cls._size_ = max(size, getattr(cls, "_size_", 0))
cls._pad_size_ = pad_size
class Struct(CData):
_fields_: 'list[Field]'
_can_self_handle_ = True
class Field(typing.Generic[_T]):
name: str
def __init__(self, t: typing.Type[_T], offset: int = -1):
assert issubclass(t, CData), "Field type must be subclass of CData"
self.t = t
self.offset = offset
def __get__(self, instance: Struct, owner) -> _T:
if self.offset < 0: finalize_struct(owner)
return self.t(_address_=instance._address_ + self.offset, _accessor_=instance._accessor_)
class SField(Field[_T]):
def __init__(self, t: typing.Type[SimpleCData[_T]], offset: int = -1):
assert issubclass(t, SimpleCData), "Field type must be subclass of SimpleCData"
super().__init__(t, offset)
def __get__(self, instance: Struct, owner) -> _T:
return super().__get__(instance, owner).value
def __set__(self, instance: Struct, value: _T):
super().__get__(instance, instance.__class__).value = value
class BField(Field[int]):
rev_mask: int = -1 # fill when finalize ~(self.bit_mask << self.bit_offset)
def __init__(self, t: typing.Type[SimpleCData[int]], bit_size: int = 1, offset: int = -1, bit_offset: int = -1):
assert issubclass(t, SimpleCData), "Field type must be subclass of SimpleCData"
super().__init__(t, offset)
self.bit_size = bit_size
self.bit_offset = bit_offset
self.bit_mask = (1 << bit_size) - 1
def __get__(self, instance: Struct, owner) -> int:
if self.bit_offset < 0: finalize_struct(owner)
return (super().__get__(instance, owner).value >> self.bit_offset) & self.bit_mask
def __set__(self, instance: Struct, value: int):
if self.bit_offset < 0 or self.bit_size < 0: finalize_struct(instance.__class__)
_value = super().__get__(instance, instance.__class__)
_value.value = (_value.value & self.rev_mask) | ((value & self.bit_mask) << self.bit_offset)
class FuncDecl:
shell: bytes
def __init__(self, restype, *argtypes):
self.restype = restype
self.argtypes = argtypes
def make_param(self, address, *args):
raise NotImplementedError
def __call__(self, address, *args, **kwargs): # TODO: add support for expose python function?
return CFunction(self, _address_=address, *args, **kwargs)
class FastCall(FuncDecl):
# shell = keystone.Ks(keystone.KS_ARCH_X86, keystone.KS_MODE_64).asm(
# "push rbp;"
# "mov rbp, rsp;"
# "push rsi;"
# "push rdi;"
# "mov rbx, rcx;"
# "mov rcx, [rbx + 0x48];"
# "lea rsi, [rbx + 0x50]"
# "sub rsp, rcx;"
# "mov rdi, rsp;"
# "rep movsb;"
# "sub rsp, 0x20;"
# "mov rcx, [rbx + 0x8];"
# "mov rdx, [rbx + 0x10];"
# "mov r8, [rbx + 0x18];"
# "mov r9, [rbx + 0x20];"
# "movq xmm0, [rbx + 0x28];"
# "movq xmm1, [rbx + 0x30];"
# "movq xmm2, [rbx + 0x38];"
# "movq xmm3, [rbx + 0x40];"
# "call [rbx];"
# "pop rdi;"
# "pop rsi;"
# "mov rsp, rbp;"
# "pop rbp;"
# "ret;", as_bytes=True)[0]
shell = bytes.fromhex("554889e556574889cb488b4b48488d73504829cc4889e7f3a44883ec20488b4b08488b53104c8b43184c8b4b20f30f7e4328f30f7e4b30f30f7e5338f30f7e5b40ff135f5e4889ec5dc3")
def __init__(self, restype, *argtypes):
super().__init__(restype, *argtypes)
self.stack_size = 0
if len(argtypes) > 4:
self.stack_size = len(argtypes) * 8
self.stack_size = (self.stack_size + 0xf) & ~0xf
def make_param(self, address, *args):
assert len(args) == len(self.argtypes)
buf = bytearray(0x50 + self.stack_size)
struct_u64.pack_into(buf, 0, address)
if self.stack_size:
struct_u64.pack_into(buf, 0x48, self.stack_size)
for i, (arg, t) in enumerate(zip(args, self.argtypes)):
if i < 4:
if t._is_xmm_:
t._struct_.pack_into(buf, 0x28 + i * 8, arg)
else:
t._struct_.pack_into(buf, 0x8 + i * 8, arg)
else:
t._struct_.pack_into(buf, (0x50 - 0x20) + i * 8, arg)
return buf
class CFunction(CData): # TODO: pointer? or shell?
_size_: int = c_size_t._size_
_pad_size_: int = c_size_t._size_
def __init__(self, func_decl: FuncDecl, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func_decl = func_decl
def __call__(self, *args):
return self._accessor_.call(self.func_decl, self._address_, *args)
class CAccessor:
def read(self, address: int, size: int) -> bytes:
raise NotImplementedError
def write(self, address: int, value: bytes):
raise NotImplementedError
def call(self, func_decl, address: int, *args):
raise NotImplementedError
def alloc(self, size: int) -> int:
raise NotImplementedError
def free(self, address: int):
raise NotImplementedError
def alloc_exec(self, size: int) -> int:
raise NotImplementedError
def free_exec(self, address: int):
raise NotImplementedError
class CAccessorLocal(CAccessor):
@classmethod
def get_instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def __init__(self):
self._alloc = {}
self._alloc_exec = {}
self._shells = {}
self.shell_buffer = MemoryManager(self.alloc_exec, self.free_exec)
def __del__(self):
while self._alloc_exec:
self.free_exec(next(iter(self._alloc_exec)))
def read(self, address: int, size: int) -> bytes:
return bytes(mv_from_mem(address, size, 0x100))
def write(self, address: int, value: bytes):
mv_from_mem(address, len(value), 0x200)[:] = value
def call(self, func_decl: FuncDecl, address: int, *args):
param = func_decl.make_param(address, *args)
buf = (ctypes.c_char * len(param)).from_buffer(param)
func_decl_t = type(func_decl)
key = id(func_decl_t)
if not (ptr := self._shells.get(key)):
shell = func_decl_t.shell
self._shells[key] = ptr = self.shell_buffer.alloc(len(shell))
self.write(ptr, shell)
res_t = func_decl.restype
if res_t is c_float:
return ctypes.CFUNCTYPE(ctypes.c_float, ctypes.c_char_p)(ptr)(buf)
elif res_t is c_double:
return ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_char_p)(ptr)(buf)
else:
res = ctypes.CFUNCTYPE(ctypes.c_uint64, ctypes.c_char_p)(ptr)(buf)
if issubclass(res_t, SimpleCData):
return res_t._struct_.unpack(struct_u64.pack(res))[0]
return res_t(_address_=res, _accessor_=self)
def alloc(self, size: int) -> int:
buf = ctypes.create_string_buffer(size)
address = ctypes.addressof(buf)
self._alloc[address] = buf
return address
def free(self, address: int):
del self._alloc[address]
def alloc_exec(self, size: int) -> int:
address = winapi.VirtualAllocEx(-1, 0, size, 0x3000, 0x40)
self._alloc_exec[address] = size
return address
def free_exec(self, address: int):
del self._alloc_exec[address]
winapi.VirtualFreeEx(-1, address, 0, 0x8000)
class CAccessorProcess(CAccessor):
def __init__(self, process: 'Process'):
self.process = process
self._shells = {}
self.shell_buffer = MemoryManager(self.alloc_exec, self.free_exec)
def read(self, address: int, size: int) -> bytes:
return self.process.read(address, size)
def write(self, address: int, value: bytes):
self.process.write(address, value)
def call(self, func_decl: FuncDecl, address: int, *args):
func_decl_t = type(func_decl)
key = id(func_decl_t)
if not (ptr := self._shells.get(key)):
shell = func_decl_t.shell
self._shells[key] = ptr = self.shell_buffer.alloc(len(shell))
self.write(ptr, shell)
param = func_decl.make_param(address, *args)
self.write(buf := self.alloc(len(param)), param)
res_t = func_decl.restype
res_is_xmm = res_t is c_float or res_t is c_double
# TODO: use better shell
res = self.process.call(ptr, buf, read_xmm=res_is_xmm, get_bytes=True)
if issubclass(res_t, SimpleCData):
return res_t._struct_.unpack_from(res)[0]
return res_t(_address_=res, _accessor_=self)
def alloc(self, size: int) -> int:
return self.process.alloc(size, protect=0x04)
def free(self, address: int):
self.process.free(address)
def alloc_exec(self, size: int) -> int:
return self.process.alloc(size, protect=0x40)
def free_exec(self, address: int):
self.process.free(address)
| 21,333 | Python | .py | 505 | 34.865347 | 177 | 0.596257 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,050 | memory_manage.py | nyaoouo_NyLib2/nylib/ctype/memory_manage.py | import bisect
import io
class ChunkManager:
def __init__(self, ptr, size):
self.ptr = ptr
self.size = size
init_chunk = (0, size) # (offset, size)
self.chunks_by_offset = [init_chunk]
self.chunks_by_size = [init_chunk]
self.allocated = {} # (offset, size)[]
def can_allocate(self, size):
size = 8 if size <= 8 else ((size + 0xf) & ~0xf)
return size if self.chunks_by_size and size <= self.chunks_by_size[-1][1] else 0
def alloc(self, size):
if not (size := self.can_allocate(size)): return None
offset, chunk_size = self.chunks_by_size.pop(bisect.bisect_left(self.chunks_by_size, size, key=lambda x: x[1]))
self.chunks_by_offset.pop(bisect.bisect_left(self.chunks_by_offset, offset, key=lambda x: x[0]))
self.allocated[offset] = size
if size < chunk_size:
new_off = offset + size
new_size = chunk_size - size
item = (offset + size, chunk_size - size)
self.chunks_by_offset.insert(bisect.bisect_left(self.chunks_by_offset, new_off, key=lambda x: x[0]), item)
self.chunks_by_size.insert(bisect.bisect_left(self.chunks_by_size, new_size, key=lambda x: x[1]), item)
return self.ptr + offset
def free(self, ptr):
offset = ptr - self.ptr
if offset not in self.allocated: return False
size = self.allocated.pop(offset)
i = bisect.bisect_left(self.chunks_by_offset, offset, key=lambda x: x[0])
if i < len(self.chunks_by_offset):
_offset, _size = self.chunks_by_offset[i]
if offset + size == _offset:
self.chunks_by_offset.pop(i)
self.chunks_by_size.pop(bisect.bisect_left(self.chunks_by_size, _size, key=lambda x: x[1]))
size += _size
if i > 0:
_offset, _size = self.chunks_by_offset[i - 1]
if _offset + _size == offset:
i -= 1
self.chunks_by_offset.pop(i)
self.chunks_by_size.pop(bisect.bisect_left(self.chunks_by_size, _size, key=lambda x: x[1]))
offset = _offset
size += _size
item = (offset, size)
self.chunks_by_offset.insert(i, item)
self.chunks_by_size.insert(bisect.bisect_left(self.chunks_by_size, size, key=lambda x: x[1]), item)
return True
def fmt_chunks(self):
s = io.StringIO()
s.write(f'=== {len(self.chunks_by_offset)} chunks ===')
for i, (offset, size) in enumerate(self.chunks_by_offset):
s.write(f'\n[{i}] {self.ptr + offset:04x} - {self.ptr + offset + size:04x} ({size:04x})')
return s.getvalue()
class MemoryManager:
def __init__(self, alloc, free):
self._alloc = alloc
self._free = free
self.chunks = []
def __del__(self):
for cm in self.chunks:
self._free(cm.ptr)
def create_chunk(self, size):
size = (size + 0xfffff) & ~0xfffff
ptr = self._alloc(size)
cm = ChunkManager(ptr, size)
i = bisect.bisect_left(self.chunks, ptr, key=lambda x: x.ptr)
self.chunks.insert(i, cm)
return cm
def alloc(self, size):
cm = next((cm for cm in self.chunks if cm.can_allocate(size)), None) or self.create_chunk(size)
return cm.alloc(size)
def free(self, ptr):
i = bisect.bisect_left(self.chunks, ptr, key=lambda x: x.ptr)
if i < 0: return False
return self.chunks[i - 1].free(ptr)
def fmt_chunks(self):
s = io.StringIO()
cnt = 0
for cm in self.chunks:
for i, (offset, size) in enumerate(cm.chunks_by_offset):
s.write(f'\n[{cnt}] {cm.ptr + offset:04x} - {cm.ptr + offset + size:04x} ({size:04x})')
cnt += 1
return f"=== {cnt} chunks ===" + s.getvalue()
def test_chunk():
from nylib.process import Process
cm = MemoryManager(Process.current.alloc, Process.current.free)
# cm = ChunkManager(0x100000, 0x1000)
a1 = cm.alloc(0x10)
print(f"{a1=:x}")
a2 = cm.alloc(0x20)
print(f"{a2=:x}")
a3 = cm.alloc(0x30)
print(f"{a3=:x}")
a4 = cm.alloc(0x30)
print(f"{a4=:x}")
print(cm.fmt_chunks())
assert cm.free(a1)
print(cm.fmt_chunks())
assert cm.free(a3)
print(cm.fmt_chunks())
assert cm.free(a2)
print(cm.fmt_chunks())
assert cm.free(a4)
print(cm.fmt_chunks())
if __name__ == '__main__':
test_chunk()
test_chunk()
| 4,539 | Python | .py | 108 | 33.268519 | 119 | 0.578888 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,051 | pipe.py | nyaoouo_NyLib2/nylib/winutils/pipe.py | import ctypes
import threading
import time
import typing
from .. import winapi
_T = typing.TypeVar('_T')
class PipeHandlerBase:
active_pipe_handler = {}
buf_size = 64 * 1024
handle = None
period = .001
def __init__(self):
self.serve_thread = threading.Thread(target=self.serve, daemon=True)
self.work = False
self.is_connected = threading.Event()
def send(self, s: bytes):
winapi.WriteFile(self.handle, s, len(s), None, ctypes.byref(winapi.OVERLAPPED()))
def _serve(self):
tid = threading.get_ident()
PipeHandlerBase.active_pipe_handler[tid] = self
try:
self.is_connected.set()
self.work = True
buf = ctypes.create_string_buffer(self.buf_size + 0x10)
size = ctypes.c_ulong()
overlapped = winapi.OVERLAPPED()
overlapped.hEvent = winapi.CreateEvent(None, True, False, None)
while self.work:
try:
winapi.ReadFile(self.handle, buf, self.buf_size, 0, ctypes.byref(overlapped))
except WindowsError as e:
if e.winerror != 997: raise
winapi.WaitForSingleObject(overlapped.hEvent, -1)
winapi.GetOverlappedResult(self.handle, ctypes.byref(overlapped), ctypes.byref(size), True)
self.on_data_received(bytes(buf[:size.value]))
finally:
if PipeHandlerBase.active_pipe_handler[tid] is self:
PipeHandlerBase.active_pipe_handler.pop(tid, None)
def serve(self):
try:
self.on_connect()
self._serve()
except Exception as e:
self.on_close(e)
else:
self.on_close(None)
finally:
try:
winapi.CloseHandle(self.handle)
except Exception:
pass
def close(self, block=True):
self.work = False
winapi.CloseHandle(self.handle)
if block: self.serve_thread.join()
def on_connect(self):
pass
def on_close(self, e: Exception | None):
pass
def on_data_received(self, data: bytes):
pass
class PipeServerHandler(PipeHandlerBase):
def __init__(self, server: 'PipeServer', handle, client_id):
self.server = server
self.handle = handle
self.client_id = client_id
self.buf_size = server.buf_size
super().__init__()
def serve(self):
self.server.handlers[self.client_id] = self
super().serve()
self.server.handlers.pop(self.client_id, None)
class PipeServer(typing.Generic[_T]):
handlers: typing.Dict[int, _T]
def __init__(self, name, buf_size=64 * 1024, handler_class=PipeServerHandler):
self.name = name
self.buf_size = buf_size
self.handler_class = handler_class
self.serve_thread = threading.Thread(target=self.serve)
self.client_counter = 0
self.handlers = {}
self.work = False
def serve(self):
self.work = True
while self.work:
handle = winapi.CreateNamedPipe(
self.name,
0x3 | 0x40000000, # PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED
0x4 | 0x2 | 0x0, # PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT
255, # PIPE_UNLIMITED_INSTANCES
self.buf_size, self.buf_size, 0, None
)
winapi.ConnectNamedPipe(handle, None)
c = self.handler_class(self, handle, self.client_counter)
c.buf_size = self.buf_size
c.serve_thread.start()
self.client_counter += 1
def close(self):
self.work = False
while self.handlers:
next_key = next(iter(self.handlers.keys()))
self.handlers.pop(next_key).close(False)
try:
_FlushClient(self.name, timeout=1).serve()
except TimeoutError:
pass
def send_all(self, s):
for c in self.handlers.values():
c.send(s)
class PipeClient(PipeHandlerBase):
def __init__(self, name: str, buf_size=64 * 1024, timeout=0):
self.name = name
self.buf_size = buf_size
self.timeout = timeout
super().__init__()
def _connect(self):
start = time.perf_counter()
while True:
if self.timeout and time.perf_counter() - start > self.timeout:
raise TimeoutError()
try:
self.handle = winapi.CreateFile(
self.name,
0x80000000 | 0x40000000, # GENERIC_READ | GENERIC_WRITE
0, # 0x1 | 0x2, # FILE_SHARE_READ | FILE_SHARE_WRITE
None,
0x3, # OPEN_EXISTING
0x40000000, # FILE_FLAG_OVERLAPPED
None
)
except WindowsError as e:
if e.winerror == 0xe7: # ERROR_PIPE_BUSY
time.sleep(1)
continue
if e.winerror == 0x2: # ERROR_FILE_NOT_FOUND
time.sleep(1)
continue
raise
else:
break
mode = ctypes.c_ulong(0x2) # PIPE_READMODE_MESSAGE
winapi.SetNamedPipeHandleState(self.handle, ctypes.byref(mode), None, None)
def serve(self):
self._connect()
super().serve()
def connect(self):
self.serve_thread.start()
self.is_connected.wait()
def __enter__(self):
if not self.is_connected.is_set():
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class _FlushClient(PipeClient):
def on_connect(self):
self.close()
| 5,828 | Python | .py | 157 | 26.318471 | 107 | 0.563475 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,052 | pipe_rpc.py | nyaoouo_NyLib2/nylib/winutils/pipe_rpc.py | import logging
import pickle
import threading
import traceback
import types
import typing
from .pipe import PipeServer, PipeServerHandler, PipeClient
from ..utils.simple import Counter
from ..utils.threading import ResEventList
_T = typing.TypeVar('_T')
CLIENT_CALL = 0
CLIENT_SUBSCRIBE = 1
CLIENT_UNSUBSCRIBE = 2
SERVER_RETURN = 0
SERVER_EVENT = 1
RETURN_NORMAL = 0
RETURN_EXCEPTION = 1
RETURN_GENERATOR = 2
RETURN_GENERATOR_END = 3
REMOTE_TRACE_KEY = '_remote_trace'
def format_exc(e):
return getattr(e, REMOTE_TRACE_KEY, None) or traceback.format_exc()
def set_exc(e, tb):
setattr(e, REMOTE_TRACE_KEY, tb)
return e
class RpcHandler(PipeServerHandler):
server: 'RpcServer'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.subscribed = set()
def on_data_received(self, data: bytes):
cmd, *arg = pickle.loads(data)
if cmd == CLIENT_CALL: # call
threading.Thread(target=self.handle_call, args=arg).start()
elif cmd == CLIENT_SUBSCRIBE: # subscribe
key, = arg
if key not in self.subscribed:
self.subscribed.add(key)
self.server.add_subscribe(key, self.client_id)
elif cmd == CLIENT_UNSUBSCRIBE: # unsubscribe
key, = arg
if key in self.subscribed:
self.subscribed.remove(key)
self.server.remove_subscribe(key, self.client_id)
def on_close(self, e: Exception | None):
for k in self.subscribed:
self.server.remove_subscribe(k, self.client_id)
def handle_call(self, reply_id, key, arg, kwargs):
try:
res = self.server.call_map[key](*arg, *kwargs)
except Exception as e:
self.reply_call_exc(reply_id, e)
else:
if isinstance(res, types.GeneratorType):
self.reply_call_gen(reply_id, res)
else:
self.reply_call_normal(reply_id, res)
def reply_call_normal(self, reply_id, res):
self.send(pickle.dumps((SERVER_RETURN, reply_id, RETURN_NORMAL, res)))
def reply_call_exc(self, reply_id, exc):
self.send(pickle.dumps((SERVER_RETURN, reply_id, RETURN_EXCEPTION, (exc, traceback.format_exc()))))
def reply_call_gen(self, reply_id, gen):
try:
for res in gen:
self.send(pickle.dumps((SERVER_RETURN, reply_id, RETURN_GENERATOR, res)))
self.send(pickle.dumps((SERVER_RETURN, reply_id, RETURN_GENERATOR_END, None)))
except Exception as e:
self.reply_call_exc(reply_id, e)
def send_event(self, event_id, event):
self.send(pickle.dumps((SERVER_EVENT, event_id, event)))
class RpcServer(PipeServer[RpcHandler]):
def __init__(self, name, call_map, *args, **kwargs):
super().__init__(name, *args, handler_class=RpcHandler, **kwargs)
self.subscribe_map = {}
if isinstance(call_map, (tuple, list,)):
call_map = {i.__name__: i for i in call_map}
self.call_map = call_map
def push_event(self, event_id, data):
cids = self.subscribe_map.get(event_id, set())
for cid in list(cids):
if client := self.handlers.get(cid):
client.send_event(event_id, data)
else:
try:
cids.remove(cid)
except KeyError:
pass
def add_subscribe(self, key, cid):
if not (s := self.subscribe_map.get(key)):
self.subscribe_map[key] = s = set()
s.add(cid)
def remove_subscribe(self, key, cid):
if s := self.subscribe_map.get(key):
try:
s.remove(cid)
except KeyError:
pass
if not s:
self.subscribe_map.pop(key, None)
class RpcClient(PipeClient):
reply_map: typing.Dict[int, ResEventList]
logger = logging.getLogger('RpcClient')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reply_map = {}
self.subscribe_map = {}
self.counter = Counter()
class Rpc:
def __getattr__(_self, item):
def func(*_args, **_kwargs):
return self.remote_call(item, _args, _kwargs)
func.__name__ = item
return func
self.rpc = Rpc()
def on_data_received(self, data: bytes):
cmd, *args = pickle.loads(data)
if cmd == SERVER_RETURN:
reply_id, reply_type, res = args
if l := self.reply_map.get(reply_id):
l.put((reply_type, res))
elif cmd == SERVER_EVENT:
key, data = args
s = self.subscribe_map.get(key, set())
if s:
for c in s:
try:
c(key, data)
except Exception as e:
self.logger.error(f'error in rpc client [{self.name}] event', exc_info=e)
else:
self.send(pickle.dumps((CLIENT_UNSUBSCRIBE, key)))
def subscribe(self, key, call):
if key not in self.subscribe_map:
self.subscribe_map[key] = set()
self.send(pickle.dumps((CLIENT_SUBSCRIBE, key)))
self.subscribe_map[key].add(call)
def unsubscribe(self, key, call):
s = self.subscribe_map.get(key, set())
try:
s.remove(call)
except KeyError:
pass
if not s:
self.subscribe_map.pop(key, None)
self.send(pickle.dumps((CLIENT_UNSUBSCRIBE, key)))
def res_iterator(self, reply_id, evt_list, first_res):
try:
yield first_res
while True:
reply_type, res = evt_list.get()
if reply_type == RETURN_EXCEPTION: raise set_exc(*res)
if reply_type == RETURN_GENERATOR_END: break
yield res
finally:
self.reply_map.pop(reply_id, None)
def remote_call(self, key, args, kwargs):
if not self.is_connected.is_set():
self.connect()
reply_id = self.counter.get()
self.reply_map[reply_id] = evt_list = ResEventList()
self.send(pickle.dumps((CLIENT_CALL, reply_id, key, args, kwargs)))
reply_type, res = evt_list.get()
if reply_type == RETURN_NORMAL: # normal
self.reply_map.pop(reply_id, None)
return res
if reply_type == RETURN_EXCEPTION: # exc
self.reply_map.pop(reply_id, None)
raise set_exc(*res)
if reply_type == RETURN_GENERATOR: # generator
return self.res_iterator(reply_id, evt_list, res)
if reply_type == RETURN_GENERATOR_END: # end of generator
self.reply_map.pop(reply_id, None)
def empty_iterator(): yield from ()
return empty_iterator()
| 6,938 | Python | .py | 174 | 29.729885 | 107 | 0.575167 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,053 | inline_hook.py | nyaoouo_NyLib2/nylib/winutils/inline_hook.py | from ..utils.pip import required
required('setuptools', 'keystone-engine', 'capstone')
import re
import struct
import typing
import capstone
import keystone
if typing.TYPE_CHECKING:
from ..process import Process
ks_ = keystone.Ks(keystone.KS_ARCH_X86, keystone.KS_MODE_64)
cs_ = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
def read_code(p: 'Process', a, min_size=1, min_line=0, cs=None) -> tuple[list[capstone.CsInsn], int]:
cs = cs or cs_
ret = []
proceed = 0
disasm = cs.disasm(p.read(a, max(min_size + 0x10, 0x100)), a)
out_of_code = False
while proceed < min_size or len(ret) < min_line:
if (i := next(disasm, None)) is None:
disasm = cs.disasm(p.read(a + proceed, 0x100), a + proceed)
continue
if not out_of_code:
if i.mnemonic == 'int3':
out_of_code = True
else:
if i.mnemonic != 'int3':
raise ValueError(f'Code at {a + proceed:#x} cross to another function')
ret.append(i)
proceed += i.size
return ret, proceed
def create_inline_hook(p: 'Process', a, hook_bytes, entrance_offset=0, skip_original=0):
try:
alloc = p.alloc_near(0x1000, a)
code_start = alloc + entrance_offset
jump_code = b'\xe9' + struct.pack('<i', code_start - a - 5) # jmp alloc
except ValueError:
alloc = p.alloc(0x1000)
code_start = alloc + entrance_offset
jump_code = b'\xff\x25\x00\x00\x00\x00' + struct.pack('<Q', code_start) # jmp qword ptr [rip];dq alloc
orig_codes, orig_size = read_code(p, a, min_size=len(jump_code), min_line=skip_original)
if (pad := orig_size - len(jump_code)) > 0: jump_code += b'\x90' * pad
return_at = a + len(jump_code)
hook_bytes_ = hook_bytes
for i in orig_codes[skip_original:]:
# todo: rebuild relative address
hook_bytes_ += i.bytes
hook_bytes_ += b'\xff\x25\x00\x00\x00\x00' + struct.pack('<Q', return_at) # jmp qword ptr [rip];dq return_at
p.write(alloc, b'\0' * entrance_offset + hook_bytes_)
return alloc, jump_code, b''.join(i.bytes for i in orig_codes)
def inline_hook(p: 'Process', a, hook_bytes, entrance_offset=0, skip_original=0):
alloc, jump_code, orig_code = create_inline_hook(p, a, hook_bytes, entrance_offset, skip_original)
p.write(a, jump_code)
# to restore, free(alloc) and write orig_code to a
return alloc, orig_code
def asm(code, addr=0, data_size=0, ks=None, cs=None):
codes = [l_ for l_ in (l.strip() for l in re.split(r'[\n;\r]', code)) if l_]
counter = 0
inst_count = 0
inst2lbl = {}
for i in range(len(codes)):
line = codes[i]
if m := re.match(r"(\w+):", line): # label
inst2lbl.setdefault(inst_count, []).append(m.group(1))
continue
inst_count += 1
if m := re.search(r'\W(__data__)\W', line):
counter = (id_ := counter) + 1
prepend_lbl = f'__read_data_{id_}__'
# replace __data__ with rip-prepend_lbl+start-data_size
codes[i] = line[:m.start(1)] + f'(rip-{prepend_lbl}+__start__-{data_size:#x})' + line[m.end(1):] + f';{prepend_lbl}:'
bytecode = (ks or ks_).asm('__start__:;' + ';'.join(codes), addr, True)[0]
labels = {}
if inst2lbl:
for i, inst in enumerate((cs or cs_).disasm(bytecode, addr)):
if i in inst2lbl:
for lbl in inst2lbl.pop(i):
labels[lbl] = inst.address
if not inst2lbl:
break
return bytecode, labels
class InlineHook:
alloc_at = 0
hook_code = b''
orig_code = b''
def __init__(self, process: 'Process', code, addr, data_size=0, skip_original=0):
self.process = process
if isinstance(code, bytes):
self.code = code
self.labels = {}
else:
self.code, self.labels = asm(code, addr, data_size)
self.addr = addr
self.data_size = data_size
self.skip_original = skip_original
self._enabled = False
@property
def data_at(self):
self.alloc()
return self.alloc_at
@property
def code_at(self):
self.alloc()
return self.alloc_at + self.data_size
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
if (value := bool(value)) == self._enabled: return
self.alloc()
if value:
self.process.write(self.addr, self.hook_code)
else:
self.process.write(self.addr, self.orig_code)
self._enabled = value
def alloc(self):
if not self.alloc_at:
self.alloc_at, self.hook_code, self.orig_code = create_inline_hook(self.process, self.addr, self.code, self.data_size, self.skip_original)
def free(self):
if self.alloc_at:
if self._enabled:
self.process.write(self.addr, self.orig_code)
self._enabled = False
self.process.free(self.alloc_at, 0x1000)
self.alloc_at = 0
self.hook_code = b''
self.orig_code = b''
def __del__(self):
self.free()
| 5,232 | Python | .py | 129 | 32.48062 | 150 | 0.587436 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,054 | process.py | nyaoouo_NyLib2/nylib/winutils/process.py | import ctypes
import sys
import shlex
from .. import winapi
def iter_processes():
hSnap = winapi.CreateToolhelp32Snapshot(0x00000002, 0)
process_entry = winapi.ProcessEntry32()
process_entry.dwSize = ctypes.sizeof(process_entry)
winapi.Process32First(hSnap, ctypes.byref(process_entry))
try:
yield process_entry
while 1:
yield process_entry
winapi.Process32Next(hSnap, ctypes.byref(process_entry))
except WindowsError as e:
if e.winerror != 18:
raise
finally:
winapi.CloseHandle(hSnap)
def pid_by_executable(executable_name: bytes | str):
if isinstance(executable_name, str):
executable_name = executable_name.encode(winapi.DEFAULT_ENCODING)
for process in iter_processes():
if process.szExeFile == executable_name:
yield process.th32ProcessID
def run_admin():
try:
if ctypes.windll.shell32.IsUserAnAdmin(): return
except:
pass
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
raise PermissionError("Need admin permission, a new process should be started, if not, please run it as admin manually")
def enable_privilege():
hProcess = ctypes.c_void_p(winapi.GetCurrentProcess())
if winapi.OpenProcessToken(hProcess, 32, ctypes.byref(hProcess)):
tkp = winapi.TOKEN_PRIVILEGES()
winapi.LookupPrivilegeValue(None, "SeDebugPrivilege", ctypes.byref(tkp.Privileges[0].Luid))
tkp.count = 1
tkp.Privileges[0].Attributes = 2
winapi.AdjustTokenPrivileges(hProcess, 0, ctypes.byref(tkp), 0, None, None)
class create_suspend_process:
def __init__(self, cmd, **kwargs):
if isinstance(cmd, (list, tuple)):
cmd = shlex.join(cmd)
if isinstance(cmd, str):
cmd = cmd.encode(winapi.DEFAULT_ENCODING)
assert isinstance(cmd, bytes), type(cmd)
self.cmd = cmd
self.process_information = None
self.startup_info = winapi.STARTUPINFOA(**kwargs)
def start(self):
assert not self.process_information, "Process already started"
self.process_information = winapi.PROCESS_INFORMATION()
winapi.CreateProcessA(
None, self.cmd,
None, None, 0,
4 | 8, # CREATE_SUSPENDED | DETACHED_PROCESS
None, None,
ctypes.byref(self.startup_info), ctypes.byref(self.process_information)
)
return self
def resume(self):
assert self.process_information, "Process not started"
winapi.ResumeThread(self.process_information.hThread)
def wait(self):
assert self.process_information, "Process not started"
winapi.WaitForSingleObject(self.process_information.hProcess, -1)
def __del__(self):
if self.process_information:
winapi.CloseHandle(self.process_information.hProcess)
winapi.CloseHandle(self.process_information.hThread)
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
# self.wait()
self.resume()
| 3,150 | Python | .py | 76 | 33.644737 | 124 | 0.670592 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,055 | __init__.py | nyaoouo_NyLib2/nylib/winutils/__init__.py | from .process import *
from .. import winapi
def write_to_clipboard(text: str):
text = text.encode('utf-16le') + b'\0\0'
if not winapi.OpenClipboard(0):
raise ctypes.WinError()
winapi.EmptyClipboard()
try:
if not (h := winapi.GlobalAlloc(0x0042, len(text))):
raise ctypes.WinError()
if not (p := winapi.GlobalLock(h)):
raise ctypes.WinError()
winapi.memcpy(p, text, len(text))
winapi.GlobalUnlock(h)
winapi.SetClipboardData(13, h)
return True
finally:
winapi.CloseClipboard()
| 586 | Python | .py | 18 | 25.444444 | 60 | 0.621908 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,056 | msvc.py | nyaoouo_NyLib2/nylib/winutils/msvc.py | import contextlib
import functools
import itertools
import os
import os.path
import subprocess
import winreg
@functools.cache
def msvc14_find_vc2017():
if not (root := os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")): return None, None
for component in (
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"Microsoft.VisualStudio.Workload.WDExpress",
):
with contextlib.suppress(subprocess.CalledProcessError, OSError, UnicodeDecodeError):
path = subprocess.check_output([
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest", "-prerelease", "-requires", component, "-property", "installationPath", "-products", "*",
]).decode(encoding="mbcs", errors="strict").strip()
path = os.path.join(path, "VC", "Auxiliary", "Build")
if os.path.isdir(path): return 15, path
path = os.path.join(os.getenv('SystemDrive'), 'BuildTools', 'VC', 'Auxiliary', 'Build') # default path for BuildTools
if os.path.isdir(path): return 15, path
return None, None
@functools.cache
def msvc14_find_vc2015():
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\VisualStudio\SxS\VC7", 0, winreg.KEY_READ | winreg.KEY_WOW64_32KEY, )
except OSError:
return None, None
best_version = 0
best_dir = None
with key:
for i in itertools.count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
return best_version, best_dir
@functools.cache
def msvc14_find_vcvarsall(plat_spec):
vcruntime = None
vcruntime_plat = {
'x86': 'x86',
'x86_amd64': 'x64',
'x86_arm': 'arm',
'x86_arm64': 'arm64',
}.get(plat_spec, 'x64' if 'amd64' in plat_spec else 'x86')
_, best_dir = msvc14_find_vc2017()
if best_dir:
vcredist = os.path.join(best_dir, "..", "..", "redist", "MSVC", "**", vcruntime_plat, "Microsoft.VC14*.CRT", "vcruntime140.dll", )
vcredist = os.path.normpath(vcredist)
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
except (ImportError, OSError, LookupError):
vcruntime = None
else:
best_version, best_dir = msvc14_find_vc2015()
if best_version:
vcruntime = os.path.join(best_dir, 'redist', vcruntime_plat, "Microsoft.VC140.CRT", "vcruntime140.dll", )
if not best_dir:
return None, None
vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
if not os.path.isfile(vcvarsall): return None, None
if not vcruntime or not os.path.isfile(vcruntime): vcruntime = None
return vcvarsall, vcruntime
@functools.cache
def load_vcvarsall(plat_spec):
vcvarsall, _ = msvc14_find_vcvarsall(plat_spec)
if not vcvarsall:
raise FileNotFoundError("vcvarsall.bat not found")
try:
out = subprocess.check_output(f'cmd /u /c "{vcvarsall}" {plat_spec} && set').decode('utf-16le', errors='replace')
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"Error executing {exc.cmd}") from exc
return {
key: value
for key, _, value in (line.partition('=') for line in out.splitlines())
if key and value
}
def where(exe, plat_spec):
paths = load_vcvarsall(plat_spec).get("Path", "").split(os.pathsep)
for path in paths:
if os.path.exists(exe_path := os.path.join(os.path.abspath(path), exe)):
return exe_path
raise FileNotFoundError(f"{exe} not found in PATH")
| 3,989 | Python | .py | 94 | 34.446809 | 146 | 0.626996 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,057 | ensure_env.py | nyaoouo_NyLib2/nylib/winutils/ensure_env.py | import atexit
import ctypes
import itertools
import logging
import os
import os.path
import pathlib
import shlex
import shutil
import subprocess
import tarfile
import tempfile
import time
import winreg
from . import msvc
from ..utils.web import download
logger = logging.getLogger(__name__)
def get_tmpdir():
if hasattr(get_tmpdir, 'path'): return get_tmpdir.path
get_tmpdir.path = tempfile.mkdtemp()
atexit.register(shutil.rmtree, get_tmpdir.path, ignore_errors=True)
return get_tmpdir.path
def get_sys_env(name):
return winreg.QueryValueEx(winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"System\CurrentControlSet\Control\Session Manager\Environment", 0, winreg.KEY_READ), name)[0]
def set_sys_env(name, value):
winreg.SetValueEx(winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"System\CurrentControlSet\Control\Session Manager\Environment", 0, winreg.KEY_SET_VALUE), name, 0, winreg.REG_EXPAND_SZ, value)
def get_user_env(name):
return winreg.QueryValueEx(winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Environment"), name)[0]
def set_user_env(name, value):
winreg.SetValueEx(winreg.CreateKey(winreg.HKEY_CURRENT_USER, r"Environment"), name, 0, winreg.REG_EXPAND_SZ, value)
def reload_env_path():
time.sleep(1) # wait for the environment to update
sys_path = get_sys_env('Path')
user_path = get_user_env('Path')
old_env = os.environ['Path'].split(os.pathsep)
for p in itertools.chain(sys_path.split(os.pathsep), user_path.split(os.pathsep)):
if not p: continue
p = os.path.expandvars(p)
if p not in old_env:
old_env.append(p)
os.environ['Path'] = os.pathsep.join(old_env)
return os.environ['Path']
def find_by_uninstall(name):
key = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall",
0, winreg.KEY_READ
)
try:
i = 0
while True:
try:
subkey = winreg.OpenKey(key, winreg.EnumKey(key, i))
try:
display_name = winreg.QueryValueEx(subkey, "DisplayName")[0]
if name in display_name:
return winreg.QueryValueEx(subkey, "InstallLocation")[0]
except FileNotFoundError:
pass
finally:
winreg.CloseKey(subkey)
except OSError:
break
finally:
i += 1
finally:
winreg.CloseKey(key)
def ensure_winget(tmp_dir=None, shell=True):
if p := shutil.which('winget'): return p
tmp_dir = pathlib.Path(tmp_dir or get_tmpdir())
download('https://aka.ms/getwinget', tmp_dir / 'Microsoft.DesktopAppInstaller_8wekyb3d8bbwe.msixbundle', show_progress=shell)
download('https://aka.ms/Microsoft.VCLibs.x64.14.00.Desktop.appx', tmp_dir / 'Microsoft.VCLibs.x64.14.00.Desktop.appx', show_progress=shell)
download('https://github.com/microsoft/microsoft-ui-xaml/releases/download/v2.8.6/Microsoft.UI.Xaml.2.8.x64.appx', tmp_dir / 'Microsoft.UI.Xaml.2.8.x64.appx', show_progress=shell)
subprocess.check_call(["powershell", "-Command", "Add-AppxPackage", tmp_dir / 'Microsoft.VCLibs.x64.14.00.Desktop.appx'], shell=shell)
subprocess.check_call(["powershell", "-Command", "Add-AppxPackage", tmp_dir / 'Microsoft.UI.Xaml.2.8.x64.appx'], shell=shell)
subprocess.check_call(["powershell", "-Command", "Add-AppxPackage", tmp_dir / 'Microsoft.DesktopAppInstaller_8wekyb3d8bbwe.msixbundle'], shell=shell)
if p := shutil.which('winget'): return p
raise FileNotFoundError('winget not found')
def ensure_git(shell=True):
if p := shutil.which('git'): return p
winget = ensure_winget(shell=shell)
subprocess.check_call([winget, 'install', '--id', 'Git.Git', '-e', '--source', 'winget'], shell=shell)
reload_env_path()
if p := shutil.which('git'): return p
raise FileNotFoundError('git not found')
def ensure_cmake(tmp_dir=None, shell=True):
if p := shutil.which('cmake'): return p
tmp_dir = pathlib.Path(tmp_dir or get_tmpdir())
download(
r'https://github.com/Kitware/CMake/releases/download/v3.29.3/cmake-3.29.3-windows-x86_64.msi',
tmp_dir / 'cmake-3.29.3-windows-x86_64.msi',
show_progress=shell
)
subprocess.check_call([
"msiexec", "/i", tmp_dir / 'cmake-3.29.3-windows-x86_64.msi', "/passive", "/norestart"
], shell=shell)
# msi installer does not update the path if call in passive mode
pg_files = os.environ.get("ProgramFiles") or os.environ.get("ProgramFiles(x86)")
if not pg_files: raise FileNotFoundError('ProgramFiles not found')
cmake_dir = pathlib.Path(pg_files) / 'CMake' / 'bin'
if not cmake_dir.exists(): raise FileNotFoundError('cmmake not found')
if ctypes.windll.shell32.IsUserAnAdmin():
set_sys_env('Path', f'{get_sys_env("Path")};{cmake_dir}')
else:
set_user_env('Path', f'{get_user_env("Path")};{cmake_dir}')
reload_env_path()
if p := shutil.which('cmake'): return p
raise FileNotFoundError('cmake not found')
def ensure_msvc(tmp_dir=None, shell=True):
# https://aka.ms/vs/17/release/vs_buildtools.exe
# vs_buildtools.exe --quiet --wait --norestart --installPath C:\BuildTools --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK
_, p = msvc.msvc14_find_vc2017()
if p: return p
tmp_dir = pathlib.Path(tmp_dir or get_tmpdir())
download('https://aka.ms/vs/17/release/vs_buildtools.exe', tmp_dir / 'vs_buildtools.exe', show_progress=shell)
subprocess.check_call([
tmp_dir / 'vs_buildtools.exe',
'--wait', '--norestart', '--passive',
'--add', 'Microsoft.VisualStudio.Workload.VCTools',
'--add', 'Microsoft.VisualStudio.Component.Windows10SDK.19041',
'--add', 'Microsoft.VisualStudio.Component.Windows11SDK.22000',
'--add', 'Microsoft.VisualStudio.Component.VC.CMake.Project',
], shell=shell)
msvc.msvc14_find_vc2017.cache_clear()
reload_env_path()
_, p = msvc.msvc14_find_vc2017()
if p: return p
raise FileNotFoundError('msvc not found')
def _find_cygwin_dir():
try:
reg = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Cygwin\setup", 0, winreg.KEY_READ)
path, _ = winreg.QueryValueEx(reg, 'rootdir')
except FileNotFoundError:
return None
else:
path = pathlib.Path(path) / 'bin'
if (path / 'cygcheck.exe').is_file(): return path
def ensure_cygwin_dir(tmp_dir=None, shell=True):
# https://www.cygwin.com/setup-x86_64.exe
# at SOFTWARE\Cygwin\setup
if path := _find_cygwin_dir(): return path
tmp_dir = pathlib.Path(tmp_dir or get_tmpdir())
download('https://www.cygwin.com/setup-x86_64.exe', tmp_dir / 'cygwin-setup-x86_64.exe', show_progress=shell)
p = pathlib.Path(f'{os.getenv("SystemDrive")}\\MyBuildTools\\cygwin64')
assert not p.exists()
install_root = p / 'Cygwin64'
local_root = p / 'local'
local_root.mkdir(parents=True, exist_ok=True)
subprocess.check_call([
tmp_dir / 'cygwin-setup-x86_64.exe',
'--quiet-mode', '--wait',
# allow user input?
'--root', install_root,
'--local-package-dir', local_root,
'--site', 'http://mirrors.kernel.org/sourceware/cygwin/',
], shell=shell)
if path := _find_cygwin_dir(): return path
raise FileNotFoundError('cygwin not found')
def _find_msys2_dir():
return find_by_uninstall('MSYS2')
def ensure_msys2(tmp_dir=None, shell=True):
if p := _find_msys2_dir(): return p
tmp_dir = pathlib.Path(tmp_dir or get_tmpdir())
download(
r'https://github.com/msys2/msys2-installer/releases/download/2024-05-07/msys2-x86_64-20240507.exe',
tmp_dir / 'msys2-x86_64-20240507.exe', show_progress=shell
)
p = pathlib.Path(f'{os.getenv("SystemDrive")}\\MyBuildTools\\msys2')
# msys2-x86_64-20240507.exe -t "C:\path\to\installation\location" --al --am --da -c
subprocess.check_call([
tmp_dir / 'msys2-x86_64-20240507.exe',
'--al', '--da', '-c', 'install', '-t', p,
], shell=shell)
if p := _find_msys2_dir(): return p
raise FileNotFoundError('msys2 not found')
def make_msys2_shell(args):
p = _find_msys2_dir()
if not p: raise FileNotFoundError('msys2 not found')
return [pathlib.Path(p) / 'msys2_shell.cmd', '-defterm', '-no-start', '-c', shlex.join(map(str, args))]
def ensure_msys2_file(fp, shell=True):
if not hasattr(ensure_msys2_file, 'cache'):
ensure_msys2_file.cache = {}
elif fp in ensure_msys2_file.cache:
return ensure_msys2_file.cache[fp]
fp_ = os.path.join(_find_msys2_dir(), fp.lstrip('/\\'))
if not os.path.exists(fp_):
ensure_msys2()
if not hasattr(ensure_msys2_file, 'db_loaded'):
subprocess.check_output(make_msys2_shell(['pacman', '-Fy']), shell=shell)
ensure_msys2_file.db_loaded = True
package_ = subprocess.check_output(make_msys2_shell(['pacman', '-F', '-q', fp]))
mode, package = package_.split()[0].decode('utf-8').split('/', 1)
ensure_msys2_package(package, shell=shell)
ensure_msys2_file.cache[fp] = fp_
return fp_
def ensure_msys2_package(pkg, shell=True):
if not hasattr(ensure_msys2_package, 'cache'):
ensure_msys2_package.cache = {}
elif pkg in ensure_msys2_package.cache:
return
ensure_msys2()
try:
subprocess.check_output(make_msys2_shell(['pacman', '-Q', pkg]), shell=shell)
except subprocess.CalledProcessError:
pass
else:
ensure_msys2_package.cache[pkg] = True
return
subprocess.check_output(make_msys2_shell(['pacman', '-S', '--noconfirm', '--needed', pkg]), shell=shell)
ensure_msys2_package.cache[pkg] = True
| 9,873 | Python | .py | 210 | 40.52381 | 192 | 0.664724 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,058 | __init__.py | nyaoouo_NyLib2/nylib/winutils/python_loader/__init__.py | import ctypes
import os
import pathlib
import shutil
import subprocess
import sys
import tempfile
import time
from ...process import Process
def build_loader(dst, build_dir=None):
from .. import msvc, ensure_env
ensure_env.ensure_msvc()
plat_spec = 'x86_amd64' # TODO: check
build_env = msvc.load_vcvarsall(plat_spec)
dst = pathlib.Path(dst).absolute()
assert not dst.exists(), f"File already exists: {dst}"
if build_dir is None:
tmp_dir = pathlib.Path(tempfile.mkdtemp())
else:
tmp_dir = pathlib.Path(build_dir)
tmp_dir.mkdir(exist_ok=True, parents=True)
src_file = pathlib.Path(__file__).parent / 'python_loader.cpp'
try:
subprocess.run([
msvc.where('cl.exe', plat_spec),
'/D_WINDLL', '/std:c++20', '/EHsc', # '/DEBUG', '/Zi',
src_file,
'/link', '/DLL', '/OUT:' + str(dst),
], cwd=tmp_dir, env=build_env, check=True, shell=True)
finally:
if build_dir is None:
shutil.rmtree(tmp_dir, ignore_errors=True)
for file in dst.with_suffix('.exp'), dst.with_suffix('.obj'), dst.with_suffix('.lib'):
if file.exists():
file.unlink()
def run_script(process: Process, main_script, python_dll=None, python_paths=None, loader=None):
if loader is None:
loader = pathlib.Path(__file__).parent / 'python_loader.dll'
if not loader.exists():
build_loader(loader)
loader = process.load_library(loader)
pLoadPython = process.get_proc_address(loader, "LoadPython")
if python_dll is None:
dll_name = f"python{sys.version_info.major}{sys.version_info.minor}.dll"
python_dll = Process.current.get_ldr_data(dll_name).FullDllName.value
else:
python_dll = os.path.abspath(python_dll)
if python_paths is None:
python_paths = os.pathsep.join(sys.path)
main_script = os.path.abspath(main_script)
ws_pyDll = python_dll.encode('utf-16-le') + b'\0\0'
ws_pyMain = main_script.encode('utf-16-le') + b'\0\0'
ws_pyPaths = python_paths.encode('utf-16-le') + b'\0\0'
p_config = process.alloc(3 * 8 + len(ws_pyDll) + len(ws_pyMain) + len(ws_pyPaths))
process.write_ptr(p_config, p_config + 3 * 8)
process.write_ptr(p_config + 8, p_config + 3 * 8 + len(ws_pyDll))
process.write_ptr(p_config + 16, p_config + 3 * 8 + len(ws_pyDll) + len(ws_pyMain))
process.write(p_config + 24, ws_pyDll)
process.write(p_config + 24 + len(ws_pyDll), ws_pyMain)
process.write(p_config + 24 + len(ws_pyDll) + len(ws_pyMain), ws_pyPaths)
process.call(pLoadPython, p_config)
| 2,623 | Python | .py | 61 | 36.934426 | 95 | 0.642773 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,059 | __init__.py | nyaoouo_NyLib2/nylib/winutils/python_hijack/__init__.py | import configparser
import os
import pathlib
import shutil
import subprocess
import sys
import sysconfig
import tempfile
DLLMAIN_TEMPLATE = '''
'''
def iter_pe_exported(pe_path):
from ...utils.pip import required
required('pefile')
import pefile
pe = pefile.PE(pe_path)
for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:
yield exp.name.decode('utf-8'), exp.address, exp.ordinal
pe.close()
def create_src(pe_path, dst_dir, default_config):
names = [(name, ordinal) for name, _, ordinal in iter_pe_exported(pe_path)]
dst = pathlib.Path(dst_dir)
shutil.rmtree(dst, ignore_errors=True)
dst.mkdir(parents=True, exist_ok=True)
addr_var = lambda n: '_pyhijack_val_' + n
func_asm = lambda n: '_pyhijack_func_' + n
current_dir = pathlib.Path(sys.executable if getattr(sys, "frozen", False) else __file__).parent
if (dllmain_template_path := current_dir / 'dllmain.template.cpp').exists():
dllmain_text = dllmain_template_path.read_text('utf-8')
else:
dllmain_text = DLLMAIN_TEMPLATE
dllmain_text = dllmain_text.replace("/*REPLACE_ORIG_DLL_HERE*/", default_config['orig'].replace('\\', '\\\\'))
dllmain_text = dllmain_text.replace("/*REPLACE_PY_DLL_HERE*/", default_config['python_dll'].replace('\\', '\\\\'))
dllmain_text = dllmain_text.replace("/*REPLACE_PY_MAIN_HERE*/", default_config['python_main'].replace('\\', '\\\\'))
buf = ''
for name, ordinal in names:
buf += f'#pragma comment(linker, "/EXPORT:{name}={func_asm(name)},@{ordinal}")\n'
buf += '\nextern "C" {\n'
for name, ordinal in names:
buf += f' PVOID {addr_var(name)};\n'
buf += '}\n'
dllmain_text = dllmain_text.replace("/*REPLACE_DEF_EXPORT_HERE*/", buf)
buf = ''
for name, ordinal in names:
buf += (f' {addr_var(name)} = (PVOID)GetProcAddress(hOrig, "{name}");\n'
f' if ({addr_var(name)} == NULL) {{\n'
f' HANDLE_ERROR(L"GetProcAddress({name}) failed: %d", GetLastError());\n'
f' }}\n')
dllmain_text = dllmain_text.replace("/*REPLACE_SET_EXPORT_HERE*/", buf)
(dst / 'dllmain.cpp').write_text(dllmain_text, 'utf-8')
dllasm_text = '.Data\n'
for name, ordinal in names:
dllasm_text += f'EXTERN {addr_var(name)}:dq;\n'
dllasm_text += '\n.Code\n'
for name, ordinal in names:
dllasm_text += (f'{func_asm(name)} PROC\n'
f' jmp {addr_var(name)}\n'
f'{func_asm(name)} ENDP\n\n')
dllasm_text += '\nEND\n'
(dst / 'dllasm.asm').write_text(dllasm_text, 'utf-8')
def hijack(pe_path, build_dir=None, default_config=None, dst_dir=None):
from .. import msvc, ensure_env
from ...process import Process
ensure_env.ensure_msvc()
need_move_orig = False
orig_path = pe_path = pathlib.Path(pe_path).absolute()
dst_dir = pathlib.Path(dst_dir) if dst_dir is not None else pe_path.parent
dst_path = dst_dir / pe_path.name
if dst_path == orig_path:
need_move_orig = True
orig_path = orig_path.with_suffix('.pyHijack' + orig_path.suffix)
plat_spec = 'x86_amd64' # TODO: check
build_env = msvc.load_vcvarsall(plat_spec)
py_dll = f"python{sys.version_info.major}{sys.version_info.minor}.dll"
if default_config is None:
default_config = {}
default_config = {
'orig': str(orig_path),
'create_console': '1', # empty string to hide console
'python_dll': Process.current.get_ldr_data(py_dll).FullDllName.value,
'python_main': '.\\main.py',
} | default_config
if build_dir is None:
tmp_dir = tempfile.mkdtemp()
else:
tmp_dir = build_dir
try:
tmp_dir = pathlib.Path(tmp_dir)
create_src(pe_path, tmp_dir, default_config)
ml = msvc.where('ml64.exe', plat_spec)
cl = msvc.where('cl.exe', plat_spec)
include_path = sysconfig.get_paths()['include']
libs_path = str(pathlib.Path(include_path).with_name('libs'))
subprocess.run([ml, '/c', '/Fo', tmp_dir / 'dllasm.obj', tmp_dir / 'dllasm.asm'], cwd=tmp_dir, env=build_env, check=True, shell=True)
subprocess.run([
cl,
'/D_USRDLL', '/D_WINDLL',
# '/I', sysconfig.get_paths()['include'], f'/LIBPATH:"{libs_path}"',
tmp_dir / 'dllmain.cpp', tmp_dir / 'dllasm.obj',
'/link', '/DLL', '/OUT:' + str(tmp_dir / 'hijacked.dll')
], cwd=tmp_dir, env=build_env, check=True, shell=True)
if need_move_orig:
pe_path.rename(orig_path)
# (tmp_dir / 'hijacked.dll').rename(dst_path)
with open(dst_path, 'wb') as f:
f.write((tmp_dir / 'hijacked.dll').read_bytes())
finally:
if build_dir is None:
shutil.rmtree(tmp_dir, ignore_errors=True)
cgh_path = dst_path.with_name('pyHijack.ini')
config = configparser.ConfigParser()
config['Hijack'] = default_config
config['Python'] = {
'path': os.pathsep.join(sys.path),
}
with open(cgh_path, 'w') as f:
config.write(f)
| 5,209 | Python | .py | 116 | 37.137931 | 141 | 0.596688 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,060 | external.py | nyaoouo_NyLib2/nylib/structs/external.py | import ctypes
import typing
from nylib.process import Process
_T = typing.TypeVar('_T')
class ExStruct:
_size_: int
_need_free_: bool = False
def __new__(cls, process: Process, address: int = None):
self = object.__new__(cls)
self._process_ = process
if address is None:
self._address_ = process.alloc(self._size_)
self._need_free_ = True
else:
self._address_ = address
return self
def __init__(self, process: Process, address: int = None):
self._process_ = process
self._address_ = address
def _free_(self):
if self._need_free_:
self._process_.free(self._address_, self._size_)
self._need_free_ = False
def __del__(self):
self._free_()
def __repr__(self):
return f'<{self.__class__.__name__} at process={self._process_.process_id} address={self._address_:#X}>'
class ExField:
def __init__(self, type: typing.Type[_T], offset: int | str = None):
self.type = type
self._is_exstruct = issubclass(type, ExStruct)
if isinstance(offset, str):
self._offset = None
self._name = offset
else:
self._offset = offset
self._name = None
def __set_name__(self, owner, name):
if self._name is None:
self._name = name
def get_offset(self, instance: ExStruct):
if self._offset is None:
return getattr(getattr(instance, '_offset_'), self._name)
return self._offset
def __get__(self, instance: ExStruct, owner=None) -> _T:
offset = self.get_offset(instance)
address = instance._address_ + offset
if self._is_exstruct:
return self.type(instance._process_, address)
return instance._process_.read(address, self.type)
def __set__(self, instance: ExStruct, value: _T):
offset = self.get_offset(instance)
address = instance._address_ + offset
if self._is_exstruct:
instance._process_.write(address, value._process_.read(value._address_, value._size_))
instance._process_.write(instance._address_ + offset, value)
class ExFieldSimp(ExField):
def __get__(self, instance: ExStruct, owner=None) -> typing.Any:
return super().__get__(instance, owner).value
class _ExIterable(ExStruct, typing.Generic[_T]):
_type_: typing.Type[_T]
_length_: int
@classmethod
def _type_size_(cls):
return cls._type_._size_ if issubclass(cls._type_, ExStruct) else ctypes.sizeof(cls._type_)
@typing.overload
def __getitem__(self, index: slice) -> tuple[_T, ...]:
...
@typing.overload
def __getitem__(self, index: int) -> _T:
...
def _first_at_(self) -> int:
raise NotImplementedError
def _item_at_(self, index) -> int:
if not (addr := self._first_at_()):
return 0
if index < 0:
raise IndexError('Index out of range')
if index > 0:
addr += index * self._type_size_()
return addr
def __getitem__(self, index) -> _T:
if isinstance(index, slice):
return tuple(self[i] for i in range(*index.indices(self._length_)))
elif isinstance(index, int):
if not (addr := self._item_at_(index)): return None
if issubclass(self._type_, ExStruct):
return self._type_(self._process_, addr)
return self._process_.read(addr, self._type_)
else:
raise TypeError(f'Invalid index type:{type(index)}')
def __setitem__(self, index, value):
if isinstance(index, slice):
for i, v in zip(range(*index.indices(self._length_)), value):
self[i] = v
elif isinstance(index, int):
if not (addr := self._item_at_(index)): raise IndexError('Writing to invalid address')
if issubclass(self._type_, ExStruct):
self._process_.write(addr, value._process_.read(value._address_, value._size_))
else:
self._process_.write(addr, value)
else:
raise TypeError(f'Invalid index type:{type(index)}')
def __iter__(self):
if hasattr(self, '_length_'):
for i in range(self._length_):
yield self[i]
else:
i = 0
while True:
yield self[i]
i += 1
class ExPtr(_ExIterable[_T]):
_size_ = ctypes.sizeof(ctypes.c_void_p)
def __class_getitem__(cls, item) -> 'ExPtr[_T]':
assert not hasattr(cls, '_type_')
return type(f'ExPtr[{item.__name__}]', (cls,), {'_type_': item})
def _first_at_(self) -> int:
return self._process_.read_ptr(self._address_)
def __bool__(self):
return bool(self._first_at_())
@property
def value(self) -> _T:
return self[0]
@value.setter
def value(self, value: _T):
self[0] = value
class ExArr(_ExIterable[_T]):
def __class_getitem__(cls, item) -> 'ExArr[_T]':
assert not hasattr(cls, '_type_')
if isinstance(item, tuple):
item, length = item
return type(f'ExArr[{item.__name__}]', (cls,), {'_type_': item, '_length_': length})
return type(f'ExArr[{item.__name__}]', (cls,), {'_type_': item})
@property
def _size_(self) -> int:
return self._type_size_() * self._length_
def __first_at_(self) -> int:
return self._address_
class ExStringPointer(ExStruct):
_encoding_: str = 'utf-8'
@property
def value(self):
return self._process_.read_string(self._process_.read_ptr(self._address_), encoding=self._encoding_)
class ExVfunc:
def __init__(self, idx: int):
self.idx = idx
def __get__(self, instance, owner):
p_vtbl = instance._process_.read_ptr(instance._address_)
p_func = instance._process_.read_ptr(p_vtbl + self.idx * ctypes.sizeof(ctypes.c_void_p))
return lambda *a: instance._process_.call(p_func, instance._address_, *a)
class ExStaticFunc:
def __init__(self, address_getter):
self.address_getter = address_getter
def get_address(self, process: Process):
if not hasattr(process, '_ex_static_func_cache_'):
process._ex_static_func_cache_ = c = {}
else:
c = process._ex_static_func_cache_
if not id(self) in c:
c[id(self)] = res = self.address_getter(process)
else:
res = c[id(self)]
return res
def __get__(self, instance, owner):
return lambda *a: instance._process_.call(self.get_address(instance._process_), instance._address_, *a)
| 6,731 | Python | .py | 166 | 31.807229 | 112 | 0.574739 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,061 | pkg_archive.py | nyaoouo_NyLib2/nylib/utils/pkg_archive.py | import base64
import pathlib
import pickle
import zlib
finder_identifier = '_IsNyPkgArchiveFinder0001'
template_main = f'''
def __pkg_loader__(archive_code):
import sys
for _finder in sys.meta_path:
if hasattr(_finder, {finder_identifier!r}):
finder = _finder
break
else:
import base64,importlib.abc,importlib.machinery,importlib.util,inspect,pickle,zlib
class _NyPkgArchiveLoader(importlib.abc.Loader):
def __init__(self, code): self.code = code
def create_module(self, spec): return None
def exec_module(self, module): exec(self.code, module.__dict__)
class _NyPkgArchiveFinder(importlib.abc.MetaPathFinder):
def __init__(self): self.archive = {{}}
def reg(self, name, archive):
prefix = '' if name == '__main__' else name + '.'
for _name, data in pickle.loads(zlib.decompress(base64.b85decode(archive))).items(): self.archive[prefix + _name] = data
def exec_pkg(self, name, globals_):
try:
f = inspect.currentframe().f_back
name = f.f_globals['__name__']
while f.f_code.co_name != 'exec_module': f = f.f_back
module = f.f_locals['module']
assert module.__name__ == name
except Exception as e:
pass
else:
module.submodule_search_locations = []
module.__path__ = ''
if _data := self.archive.get('__main__' if name == '__main__' else name + '.__init__'):
exec(_data[0], globals_)
def find_spec(self, fullname, path, target=None):
if fullname in self.archive:
code, is_pkg = self.archive[fullname]
return importlib.util.spec_from_loader(fullname, _NyPkgArchiveLoader(code), is_package=is_pkg)
return None
setattr(_NyPkgArchiveFinder, {finder_identifier!r}, True)
sys.meta_path.append(finder := _NyPkgArchiveFinder())
finder.reg(__name__, archive_code)
globals().pop('__pkg_loader__', None)
finder.exec_pkg(__name__, globals())
'''.strip()
template = f'''
def __pkg_loader__(archive_code):
import sys
for finder in sys.meta_path:
if hasattr(finder, {finder_identifier!r}):
finder.reg(__name__, archive_code)
globals().pop('__pkg_loader__', None)
finder.exec_pkg(__name__, globals())
return
raise Exception('finder not found')
'''.strip()
def pack(p: str | pathlib.Path, o=None, is_main: bool = True):
o = o or (lambda x: x)
p = p if isinstance(p, pathlib.Path) else pathlib.Path(p)
assert p.exists(), 'path not exists'
if p.is_file():
return o(p.read_bytes())
data = {}
for _p in p.iterdir():
if _p.is_file():
if _p.suffix == '.py' or _p.suffix == '.pyw':
data[_p.stem] = o(_p.read_bytes()), False
elif _p.name != '__pycache__' and (_p / '__init__.py').exists():
data[_p.stem] = pack(_p, o, False), True
decoded = base64.b85encode(zlib.compress(pickle.dumps(data))).decode('utf-8')
return (template_main if is_main else template) + f'\n__pkg_loader__({decoded!r})\n'
| 3,362 | Python | .py | 74 | 35.013514 | 136 | 0.560768 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,062 | preprocessor.py | nyaoouo_NyLib2/nylib/utils/preprocessor.py | # $RAISE "Preprocessor is already imported, dont reload it"
# default support macros
# - $SYM_FROM [Name] [Module] [Expr] : define a symbol from a module value
# - $SYM [Name] [Expr] : define a symbol
# - $INCLUDE [Name]: include a file with predefined symbols, absolute name only
# - $IF [Expr] / $ELSE / $ELIF [Expr] / $ENDIF: conditional compilation
# - $RAISE [Message]: throw an exception
# - $REQUIRE [Pip Modules...]: check if the module is available, if not, automatically install it
# - use `__SYM_[Name]` in code to use the symbol
import builtins
import importlib
import re
import sys
old_exec = builtins.exec
old_compile = builtins.compile
_re_is_comment = re.compile(r'^\s*#')
_re_symbol = re.compile(r'__SYM_([a-zA-Z_][a-zA-Z0-9_]*)')
class IProcessors:
symbols: dict
enable = False
_reged = []
def __init_subclass__(cls, **kwargs):
cls._reged.append(cls)
def __init__(self, symbols):
self.symbols = symbols
def process_code(self, code):
return code
def process_comment(self, comment):
return comment
class BasicProcessor(IProcessors):
enable = True
def __init__(self, *a):
super().__init__(*a)
self.cond_stack = []
def process_code(self, code):
if self.cond_stack and not self.cond_stack[-1][0]:
return
return _re_symbol.sub(lambda m: repr(self.symbols.get(m.group(1), m.group(0))), code)
def process_comment(self, comment):
args = comment.strip().split()
if len(args) == 0: return
match args[0]:
case '$IF':
if len(args) < 2:
raise SyntaxError('Invalid $IF statement, expected $IF [Expr]')
is_true = bool(eval(' '.join(args[1:]), None, self.symbols))
self.cond_stack.append((is_true, is_true))
raise StopIteration
case '$ELSE':
if not self.cond_stack:
raise SyntaxError('Invalid $ELSE statement, no matching $IF')
_, is_processed = self.cond_stack[-1]
is_true = not is_processed
self.cond_stack[-1] = (is_true, is_processed)
raise StopIteration
case '$ELIF':
if len(args) < 2:
raise SyntaxError('Invalid $ELIF statement, expected $ELIF [Expr]')
if not self.cond_stack:
raise SyntaxError('Invalid $ELIF statement, no matching $IF')
_, is_processed = self.cond_stack[-1]
if is_processed:
self.cond_stack[-1] = (False, True)
raise StopIteration
is_true = bool(eval(' '.join(args[1:]), None, self.symbols))
self.cond_stack[-1] = (is_true, is_true)
raise StopIteration
case '$ENDIF':
if not self.cond_stack:
raise SyntaxError('Invalid $ENDIF statement, no matching $IF')
self.cond_stack.pop()
raise StopIteration
if self.cond_stack and not self.cond_stack[-1][0]:
raise StopIteration
match args[0]:
case '$SYM_FROM':
if len(args) < 4:
raise SyntaxError('Invalid $DEFINEFROM statement, expected $DEFINEFROM [Name] [Module] [Expr]')
self.symbols[args[1]] = eval(' '.join(args[3:]), None, importlib.import_module(args[2]).__dict__)
raise StopIteration
case '$SYM':
if len(args) < 2:
raise SyntaxError('Invalid $DEFINE statement, expected $DEFINE [Name] [Expr?]')
if len(args) > 2:
val = eval(' '.join(args[2:]), None, self.symbols)
else:
val = None
self.symbols[args[1]] = val
raise StopIteration
case '$INCLUDE':
if len(args) < 2:
raise SyntaxError('Invalid $INCLUDE statement, expected $INCLUDE [Name]')
module = importlib.import_module(args[1])
self.symbols.update(getattr(module, '__preprocess_symbols__', {}))
raise StopIteration
case '$RAISE':
if len(args) < 2:
raise SyntaxError('Invalid $RAISE statement, expected $RAISE [Message]')
raise Exception(' '.join(args[1:]))
case '$REQUIRE':
from . import pip
if len(args) < 2:
raise SyntaxError('Invalid $REQUIRE statement, expected $REQUIRE [Pip Modules...]')
if not pip.is_installed(*args[1:]):
pip.install(*args[1:])
raise StopIteration
return comment
def preprocess(code):
codes = []
prev = ''
is_code = True
for line in code.splitlines():
if m := _re_is_comment.match(line):
if is_code and prev:
codes.append((prev, is_code))
prev = ''
is_code = False
if prev: prev += ' '
prev += line[m.end():].strip()
if prev.endswith('\\'):
prev = prev[:-1]
else:
codes.append((prev, is_code))
prev = ''
else:
if not is_code and prev:
codes.append((prev, is_code))
prev = ''
is_code = True
if prev: prev += '\n'
prev += line
if prev:
codes.append((prev, is_code))
# print(codes)
symbols = {}
processors = [c(symbols) for c in IProcessors._reged if c.enable]
res = ''
for c, is_code in codes:
if is_code:
try:
for p in processors:
if not (c := p.process_code(c)):
raise StopIteration
except StopIteration:
continue
res += c + '\n'
else:
try:
for p in processors:
if not (c := p.process_comment(c)):
raise StopIteration
except StopIteration:
continue
res += '# ' + c + '\n'
res += '__preprocess_symbols__ = ' + repr(symbols)
# print(res)
return res
def new_exec(code, __globals=None, __locals=None):
if isinstance(code, bytes):
code = preprocess(code.decode('utf-8'))
elif isinstance(code, str):
code = preprocess(code)
return old_exec(code, __globals, __locals)
def new_compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1):
if mode == 'exec':
if isinstance(source, bytes):
source = preprocess(source.decode('utf-8'))
elif isinstance(source, str):
source = preprocess(source)
return old_compile(source, filename, mode, flags, dont_inherit, optimize)
def new_cache_from_source(*a, **kw):
raise ValueError # disable cache
builtins.exec = new_exec
builtins.compile = new_compile
sys.dont_write_bytecode = True
def test():
code = """
# $SYM_FROM OS os name
def test():
# $IF OS == "nt"
print('Windows')
# $ELIF OS == "posix"
print('Linux')
# $ELSE
print('Unknown')
# $ENDIF
print(__SYM_OS)
test()
"""
# print(preprocess(code))
exec(code)
if __name__ == '__main__':
test()
| 7,433 | Python | .py | 193 | 27.704663 | 115 | 0.534064 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,063 | prime.py | nyaoouo_NyLib2/nylib/utils/prime.py | import io
rand_engine = 'secrets'
match rand_engine:
case 'os':
import os
randint = lambda a, b: a + int.from_bytes(os.urandom(((b - a).bit_length() + 7) // 8), 'little') % (b - a)
randbytes = os.urandom
case 'secrets':
import secrets
randint = lambda a, b: secrets.randbelow(b - a) + a
randbytes = lambda n: secrets.token_bytes(n)
case 'random':
import random
randint = random.randint
randbytes = random.randbytes
case _:
raise ValueError('invalid rand engine')
def miller_rabin(p):
if p == 1: return False
if p == 2: return True
if p % 2 == 0: return False
m, k, = p - 1, 0
while m % 2 == 0:
m, k = m // 2, k + 1
a = randint(2, p - 1)
x = pow(a, m, p)
if x == 1 or x == p - 1: return True
while k > 1:
x = pow(x, 2, p)
if x == 1: return False
if x == p - 1: return True
k = k - 1
return False
def is_prime(p, r=40):
for i in range(r):
if not miller_rabin(p):
return False
return True
def get_prime_by_max(_max):
s_num = num = randint(_max // 2, _max)
while True:
if is_prime(num):
return num
elif num + 1 >= _max:
break
else:
num += 1
while True:
if is_prime(s_num): return s_num
s_num -= 1
def get_prime(bit_size):
return get_prime_by_max(1 << bit_size)
class SimpRsa:
def __init__(self, n=0, e=0, d=0):
self.n, self.e, self.d = n, e, d
self.default_size = (n.bit_length() + 7) // 8
def encrypt(self, v: int | bytes):
assert v < self.n, f'v={v:#x}, n={self.n:#x}'
return pow(v, self.e, self.n)
def decrypt(self, v: int | bytes):
assert v < self.n, f'v={v:#x}, n={self.n:#x}'
return pow(v, self.d, self.n)
def encrypt_bytes(self, v: bytes, to_size=0):
return self.encrypt(int.from_bytes(v, 'little')).to_bytes(to_size or self.default_size, 'little')
def decrypt_bytes(self, v: bytes, to_size=0):
return self.decrypt(int.from_bytes(v, 'little')).to_bytes(to_size or self.default_size, 'little')
class SimpleChipper:
def __init__(self, n: int, e: int = -1, d: int = -1):
self.n = n
self.e = e
self.d = d
self.size = (n.bit_length() + 7) // 8
self.check_size = self.size - 1
if self.size >> 32 != 0:
raise ValueError('n is too large')
elif self.size >> 16 != 0:
self.p_size = 4
elif self.size >> 8 != 0:
self.p_size = 2
else:
self.p_size = 1
def pad(self, src: bytes):
to_pad = self.check_size - len(src) % self.check_size
if to_pad < self.p_size: to_pad += self.check_size
return src + randbytes(to_pad - self.p_size) + to_pad.to_bytes(self.p_size, 'little')
def unpad(self, src: bytes):
return src[:-int.from_bytes(src[-self.p_size:], 'little')]
def dec(self, src: bytes):
if self.d == -1: raise ValueError('private key is not available')
src_ = io.BytesIO(src)
res = io.BytesIO()
while data := src_.read(self.size):
res.write(pow(int.from_bytes(data, 'little'), self.d, self.n).to_bytes(self.check_size, 'little'))
return self.unpad(res.getvalue())
def enc(self, src: bytes):
if self.e == -1: raise ValueError('public key is not available')
src_ = io.BytesIO(self.pad(src))
res = io.BytesIO()
while data := src_.read(self.check_size):
res.write(pow(int.from_bytes(data, 'little'), self.e, self.n).to_bytes(self.size, 'little'))
return res.getvalue()
def make_rsa_key(bit_size):
p1, p2 = get_prime(bit_size), get_prime(bit_size)
n = p1 * p2
o = (p1 - 1) * (p2 - 1)
e = get_prime_by_max(o)
d = pow(e, -1, o)
return n, e, d
def _rsa_test():
p1, p2 = get_prime(64), get_prime(64)
n = p1 * p2
o = (p1 - 1) * (p2 - 1)
e = get_prime_by_max(o)
d = pow(e, -1, o)
test_rsa = SimpRsa(n, e, d)
print(f'n={n:#x},')
print(f'e={e:#x},')
print(f'd={d:#x},')
print(hex(encr := test_rsa.encrypt(9)))
print(hex(test_rsa.decrypt(encr)))
print((encr := test_rsa.encrypt_bytes(b'test')).hex(' '))
print(test_rsa.decrypt_bytes(encr))
if __name__ == '__main__':
_rsa_test()
| 4,419 | Python | .py | 124 | 28.451613 | 114 | 0.545497 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,064 | mutex.py | nyaoouo_NyLib2/nylib/utils/mutex.py | import os
import pathlib
if os.name == "nt":
import msvcrt
def portable_lock(fp):
fp.seek(0)
msvcrt.locking(fp.fileno(), msvcrt.LK_LOCK, 1)
def is_lock(fp):
fp.seek(0)
try:
msvcrt.locking(fp.fileno(), msvcrt.LK_NBLCK, 1)
except OSError:
return True
else:
msvcrt.locking(fp.fileno(), msvcrt.LK_UNLCK, 1)
return False
def portable_unlock(fp):
fp.seek(0)
msvcrt.locking(fp.fileno(), msvcrt.LK_UNLCK, 1)
else:
import fcntl
def portable_lock(fp):
fcntl.flock(fp.fileno(), fcntl.LOCK_EX)
def is_lock(fp):
try:
fcntl.flock(fp.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
return True
else:
fcntl.flock(fp.fileno(), fcntl.LOCK_UN)
return False
def portable_unlock(fp):
fcntl.flock(fp.fileno(), fcntl.LOCK_UN)
class Mutex:
fp = None
def __init__(self, name):
self.name = pathlib.Path(name).absolute()
def is_lock(self):
if not self.name.exists(): return False
with open(self.name, 'wb') as tmp:
return is_lock(tmp)
def acquire(self):
self.fp = open(self.name, 'wb')
portable_lock(self.fp)
def release(self):
portable_unlock(self.fp)
self.fp.close()
self.name.unlink()
def __enter__(self):
self.acquire()
return self
def __exit__(self, _type, value, tb):
self.release()
| 1,554 | Python | .py | 53 | 21.09434 | 67 | 0.569012 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,065 | simple.py | nyaoouo_NyLib2/nylib/utils/simple.py | import functools
import threading
aligned4 = lambda v: (v + 0x3) & (~0x3)
aligned8 = lambda v: (v + 0x7) & (~0x7)
aligned16 = lambda v: (v + 0xf) & (~0xf)
class Counter:
def __init__(self, start=0):
self.value = start
self.lock = threading.Lock()
def get(self):
with self.lock:
self.value += 1
return self.value
def clean_cached_property(instance):
for k in dir(instance.__class__):
v = getattr(instance.__class__, k)
if isinstance(v, functools.cached_property):
instance.__dict__.pop(k, None)
| 588 | Python | .py | 18 | 26.444444 | 52 | 0.599291 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,066 | __init__.py | nyaoouo_NyLib2/nylib/utils/__init__.py | import ast
import collections
import contextlib
import ctypes
import functools
import inspect
import struct
import threading
import pathlib
import time
import typing
_T = typing.TypeVar('_T')
_T2 = typing.TypeVar('_T2')
def count_func_time(func):
import time
def wrapper(*args, **kwargs):
start = time.perf_counter()
return func(*args, **kwargs), time.perf_counter() - start
return wrapper
def num_arr_to_bytes(arr):
return bytes(arr).split(b'\0', 1)[0]
def is_iterable(v):
try:
iter(v)
except TypeError:
return False
else:
return True
class Counter:
def __init__(self, start=0):
self.count = start - 1
self.lock = threading.Lock()
def get(self):
with self.lock:
self.count += 1
return self.count
def iter_rm(p: pathlib.Path):
if p.exists():
if p.is_file():
p.unlink()
else:
for f in p.iterdir():
iter_rm(f)
p.rmdir()
def safe(func: typing.Callable[[...], _T], *args, _handle=BaseException, _default: _T2 = None, **kwargs) -> _T | _T2:
try:
return func(*args, **kwargs)
except _handle:
return _default
def safe_lazy(func: typing.Callable[[...], _T], *args, _handle=BaseException, _default: _T2 = None,
**kwargs) -> _T | _T2:
try:
return func(*args, **kwargs)
except _handle:
return _default(*args, **kwargs)
time_units = [
(1e-13, "Sv"),
(1e-12, "ps"),
(1e-9, "ns"),
(1e-6, "μs"),
(1e-3, "ms"),
(1, "s"),
(60, "min"),
(60 * 60, "hour"),
(60 * 60 * 24, "day"),
(60 * 60 * 24 * 7, "week"),
]
def fmt_sec(sec: float):
size, name = 1e-13, "Sv"
for _size, _name in time_units:
if sec < _size:
return f'{sec / size:.3f}{name}'
size = _size
name = _name
return f'{sec / size:.3f}{name}'
def test_time(func, cb=None):
if cb is None: return lambda _func: test_time(_func, func)
@functools.wraps(func)
def foo(*args, **kwargs):
start = time.perf_counter()
try:
return func(*args, **kwargs)
finally:
cb(func, args, kwargs, time.perf_counter() - start)
return foo
def extend_list(l: list, size: int, el=None):
if (s := len(l)) < size:
l.extend(el for _ in range(size - s))
def dict_find_key(d: dict, val, strict=False):
try:
if strict:
return next(k for k, v in d.items() if v == val)
else:
return next(k for k, v in d.items() if v is val)
except StopIteration:
raise ValueError(val)
def try_run(try_count, exception_type=Exception, exc_cb=None):
def dec(func):
def wrapper(*args, **kwargs):
_try_count = try_count
while _try_count > 0:
try:
return func(*args, **kwargs)
except exception_type as e:
if _try_count <= 1:
raise e
_try_count -= 1
if exc_cb:
exc_cb(e)
return wrapper
return dec
def wait_until(func, timeout=-1, interval=0.1, *args, **kwargs):
start = time.perf_counter()
while not func(*args, **kwargs):
if 0 < timeout < time.perf_counter() - start:
raise TimeoutError
time.sleep(interval)
def named_tuple_by_struct(t: typing.Type[_T], s: struct.Struct, buffer: bytearray | memoryview | bytes,
offset: int = 0) -> _T:
return t._make(s.unpack_from(buffer, offset))
def dataclass_by_struct(t: typing.Type[_T], s: struct.Struct, buffer: bytearray | memoryview | bytes,
offset: int = 0) -> _T:
return t(*s.unpack_from(buffer, offset))
def wrap_error(cb, exc_type=Exception, default_rtn=None):
def dec(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc_type as e:
cb(e, *args, **kwargs)
return default_rtn
return wrapper
return dec
mv_from_mem = ctypes.pythonapi.PyMemoryView_FromMemory
mv_from_mem.argtypes = (ctypes.c_void_p, ctypes.c_ssize_t, ctypes.c_int)
mv_from_mem.restype = ctypes.py_object
def callable_arg_count(func):
return len(inspect.signature(func).parameters)
class LRU(collections.OrderedDict[_T, _T2]):
def __init__(self, *args, _maxsize=128, _getter: typing.Callable[[_T], _T2] = None, _validate: typing.Callable[[_T, _T2], bool] = None, _threadsafe=False, **kwds):
self.__maxsize = _maxsize
self.__validate = _validate
self.__getter = _getter
self.__lock = (threading.Lock if _threadsafe else contextlib.nullcontext)()
super().__init__(*args, **kwds)
def __missing__(self, key):
if self.__getter:
self.__setitem(key, value := self.__getter(key))
return value
raise KeyError(key)
def __validate__(self, key, value):
if self.__validate:
return self.__validate(key, value)
return True
def __call__(self, key):
return self.__getitem__(key)
def __getitem__(self, key) -> _T2:
with self.__lock:
value = super().__getitem__(key)
if self.__validate__(key, value):
self.move_to_end(key)
return value
else:
del self[key]
value = self.__missing__(key)
self.__setitem(key, value)
return value
@property
def maxsize(self):
return self.__maxsize
@maxsize.setter
def maxsize(self, value):
with self.__lock:
if value < self.__maxsize:
for k, _ in list(zip(self.keys(), range(value))):
del self[k]
self.__maxsize = value
@property
def thread_safe(self):
return not isinstance(self.__lock, contextlib.nullcontext)
@thread_safe.setter
def thread_safe(self, value):
value = bool(value)
if value != self.thread_safe:
self.__lock = (threading.Lock if value else contextlib.nullcontext)()
def __setitem(self, key, value):
super().__setitem__(key, value)
if len(self) > self.__maxsize:
oldest = next(iter(self))
del self[oldest]
def __setitem__(self, key, value):
with self.__lock:
self.__setitem(key, value)
def exec_ret(script, globals=None, locals=None, *, filename="<string>"):
'''Execute a script and return the value of the last expression'''
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return None
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
exec(compile(ast.Module(body=stmts[:-1]), filename=filename, mode="exec"), globals, locals)
# then we eval the last one
return eval(compile(ast.Expression(body=stmts[-1].value), filename=filename, mode="eval"), globals, locals)
else:
# otherwise we just execute the entire code
return exec(compile(script, filename=filename, mode='exec'), globals, locals)
def repeat_add(start, times, step=1):
for _ in range(times):
yield start
start = start + step
def iter_repeat_add(d, add=0):
start, times, step = d
if isinstance(start, int):
for i in range(times):
yield start + add
add += step
else:
for i in range(times):
yield iter_repeat_add(start, add)
add += step
def seq_dif(seq):
if len(seq) < 2:
raise ValueError()
_n = next(it := iter(seq))
dif = (n := next(it)) - _n
while 1:
try:
if (_n := next(it)) - n != dif:
raise ValueError()
except StopIteration:
return dif
n = _n
def seq_to_repeat_add(seq):
return seq[0], len(seq), seq_dif(seq)
def seq_to_range(seq):
if (dif := seq_dif(seq)) == 0:
raise ValueError()
return seq[0], seq[-1] + (1 if dif > 0 else -1), dif
| 8,358 | Python | .py | 241 | 26.427386 | 167 | 0.565817 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,067 | threading.py | nyaoouo_NyLib2/nylib/utils/threading.py | import threading
import typing
import ctypes
_T = typing.TypeVar('_T')
def terminate_thread(t: threading.Thread | int, exc_type=SystemExit):
if isinstance(t, threading.Thread):
if not t.is_alive(): return
try:
t = next(tid for tid, tobj in threading._active.items() if tobj is t)
except StopIteration:
raise ValueError("tid not found")
if ctypes.pythonapi.PyThreadState_SetAsyncExc(t, ctypes.py_object(exc_type)) != 1:
raise SystemError("PyThreadState_SetAsyncExc failed")
class ResEvent(threading.Event, typing.Generic[_T]):
def __init__(self):
super().__init__()
self.res = None
self.is_exc = False
self.is_waiting = False
def set(self, data: _T = None) -> None:
assert not self.is_set()
self.res = data
self.is_exc = False
super().set()
def set_exception(self, exc) -> None:
assert not self.is_set()
self.res = exc
self.is_exc = True
super().set()
def wait(self, timeout: float | None = None) -> _T:
self.is_waiting = True
try:
if super().wait(timeout):
if self.is_exc:
raise self.res
else:
return self.res
else:
raise TimeoutError()
finally:
self.is_waiting = False
class ResEventList(typing.Generic[_T]):
queue: typing.List[ResEvent[_T]]
def __init__(self):
self.queue = [ResEvent()]
self.lock = threading.Lock()
def put(self, data: _T):
with self.lock:
if not self.queue or self.queue[-1].is_set():
self.queue.append(ResEvent())
self.queue[-1].set(data)
def get(self) -> _T:
with self.lock:
if not self.queue:
self.queue.append(ResEvent())
evt = self.queue[0]
res = evt.wait()
with self.lock:
if self.queue and self.queue[0] is evt:
self.queue.pop(0)
return res
| 2,085 | Python | .py | 61 | 24.704918 | 86 | 0.55644 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,068 | web.py | nyaoouo_NyLib2/nylib/utils/web.py | import pathlib
import typing
from .pip import required
if typing.TYPE_CHECKING:
from tqdm import tqdm
def download(url, dst, *requests_args, unlink_if_exists=True, chunk_size=1024 * 1024, show_progress=False, **requests_kwargs):
required('requests')
import requests
dst = pathlib.Path(dst)
if dst.exists():
if unlink_if_exists:
dst.unlink()
else:
raise FileExistsError(dst)
if show_progress:
required('tqdm')
from tqdm import tqdm
else:
tqdm = None
tmp_file = pathlib.Path(dst.parent / (dst.name + '.tmp'))
_i = 0
while tmp_file.exists():
_i += 1
tmp_file = pathlib.Path(dst.parent / (dst.name + f'.tmp.{_i}'))
with requests.get(url, stream=True, *requests_args, **requests_kwargs) as r:
r.raise_for_status()
if show_progress:
total = int(r.headers.get('content-length', 0))
print(f'Downloading {url}')
pbar = tqdm(total=total, unit='B', unit_scale=True, unit_divisor=1024)
else:
pbar = None
with tmp_file.open('wb') as f:
for chunk in r.iter_content(chunk_size):
f.write(chunk)
if pbar:
pbar.update(len(chunk))
tmp_file.rename(dst)
return dst
| 1,331 | Python | .py | 39 | 25.769231 | 126 | 0.590164 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,069 | pip.py | nyaoouo_NyLib2/nylib/utils/pip.py | import logging
import socket
import urllib.request
import urllib.error
import urllib.parse
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning, module=".*packaging\\.version")
PIP_SOURCE = {
'PYPI': 'https://pypi.python.org/simple',
# mirror sites in China...
'阿里云': 'https://mirrors.aliyun.com/pypi/simple/',
'腾讯云': 'https://mirrors.cloud.tencent.com/pypi/simple/',
'北外大学': 'https://mirrors.bfsu.edu.cn/pypi/web/simple',
'清华大学': 'https://pypi.tuna.tsinghua.edu.cn/simple',
'网易': 'https://mirrors.163.com/pypi/simple/',
}
_logger = logging.getLogger(__name__)
def boostrap_pip():
"""
If pip is not installed, it uses the `ensurepip` module to bootstrap it.
"""
try:
import pip
except ImportError:
import ensurepip
ensurepip.bootstrap()
import pip
def _test_pip_source(url):
try:
return urllib.request.urlopen(url, timeout=5).getcode() == 200
except (urllib.error.HTTPError, urllib.error.URLError, socket.timeout):
return False
def _set_pip_default_index(url: str):
import pip._internal.cli.cmdoptions as cmdoptions
options: list = cmdoptions.general_group["options"]
if not hasattr(cmdoptions, '_config_default_index'):
cmdoptions._original_trust_host_getter = cmdoptions.trusted_host
cmdoptions._original_trust_host_index = options.index(cmdoptions._original_trust_host_getter)
cmdoptions._config_default_index = True
cmdoptions.index_url.keywords['default'] = url
def new_trust_host_getter():
res = cmdoptions._original_trust_host_getter()
res.default.append(urllib.parse.urlparse(url).netloc)
return res
options[cmdoptions._original_trust_host_index] = new_trust_host_getter
def set_pip_default_index(manual_url: str = None):
"""
This function sets the default pip index.
If `manual_url` is not None, it will use the specified URL.
Otherwise, it will test the URLs in `PIP_SOURCE` and use the first available URL.
:param manual_url:
:return:
"""
boostrap_pip()
if manual_url is not None:
_set_pip_default_index(manual_url)
set_pip_default_index.is_set = True
return True
if hasattr(set_pip_default_index, 'is_set'): return True
for name, url in PIP_SOURCE.items():
if _test_pip_source(url):
_logger.info(f'Usable pip source: {name} {url}')
_set_pip_default_index(url)
set_pip_default_index.is_set = True
return True
raise Exception('No usable pip source found')
def install(*_a):
"""
This function installs the specified packages using pip.
Use arguments same as `pip install`.
:param _a:
:return: True if the installation is successful.
"""
boostrap_pip()
import pip._internal.commands
import pip._internal.cli.status_codes
set_pip_default_index()
# pip._internal.commands.install.InstallCommand
try:
if pip._internal.commands.create_command('install').main(list(_a)) == pip._internal.cli.status_codes.SUCCESS:
return True
except SystemExit as e:
pass
raise RuntimeError('Failed to install requirements, read the log for more information')
def is_installed(*_a):
"""
This function checks if the specified packages are installed.
Use arguments same as `pip show`.
:param _a:
:return: True if all packages are installed.
"""
boostrap_pip()
import pip._internal.commands.show
required = len(_a)
for _ in pip._internal.commands.show.search_packages_info(_a):
required -= 1
if required == 0: return True
return False
def required(*_a):
"""
This function checks if the specified packages are installed.
If not, it installs them.
Use arguments same as `pip install`.
:param _a:
:return: True if all packages are installed.
"""
if not is_installed(*_a):
return install(*_a)
return True
| 4,063 | Python | .py | 109 | 31.183486 | 117 | 0.678489 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,070 | ks_asm.py | nyaoouo_NyLib2/nylib/utils/ks_asm.py | from . import pip
pip.required('setuptools', 'keystone-engine', 'capstone')
import keystone
import capstone
def comp(code, address, data=None, resolves=None, arch=keystone.KS_ARCH_X86, mode=keystone.KS_MODE_64):
_resolves = {}
if resolves:
for k, v in resolves.items():
if isinstance(k, str): k = k.encode('ascii')
assert isinstance(v, int)
_resolves[k] = v
ks = keystone.Ks(arch, mode)
def resolver(key, p_value):
if key in _resolves:
p_value[0] = _resolves[key]
return True
return False
ks.sym_resolver = resolver
return ks.asm(code, address, True)[0]
| 669 | Python | .py | 19 | 28.157895 | 103 | 0.625194 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,071 | namespace.py | nyaoouo_NyLib2/nylib/process/namespace.py | import typing
if typing.TYPE_CHECKING:
from . import Process
_aligned4 = lambda v: (v + 0x3) & (~0x3)
_aligned16 = lambda v: (v + 0xf) & (~0xf)
class Namespace:
chunk_size = 0x10000
def __init__(self, process: 'Process'):
self.process = process
self.res = []
self.ptr = 0
self.remain = 0
self._protection = 0x40 # PAGE_EXECUTE_READWRITE
@property
def protection(self):
return self._protection
@protection.setter
def protection(self, v):
self._protection = v
for alloc_addr, alloc_size in self.res:
self.process.virtual_protect(alloc_addr, alloc_size, v)
def store(self, data: bytes):
self.process.write(p_buf := self.take(len(data)), data)
return p_buf
def take(self, size):
size = _aligned16(size)
if self.remain < size:
alloc_size = max(self.chunk_size, size)
alloc_addr = self.process.alloc(alloc_size)
self.res.append((alloc_addr, alloc_size))
self.process.virtual_protect(alloc_addr, alloc_size, self.protection)
self.remain = alloc_size - size
self.ptr = alloc_addr + size
return alloc_addr
else:
self.remain -= size
res = self.ptr
self.ptr += size
return res
def free(self):
while self.res:
self.process.free(*self.res.pop())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.free()
| 1,570 | Python | .py | 46 | 25.652174 | 81 | 0.582672 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,072 | __init__.py | nyaoouo_NyLib2/nylib/process/__init__.py | import ctypes
import functools
import pathlib
import struct
import typing
from .namespace import Namespace
from ..pattern import CachedRawMemoryPatternScanner, StaticPatternSearcher
from .. import ctype as m_ctype
from .. import winapi, winutils
_T = typing.TypeVar('_T')
class Process:
current: 'Process'
def __init__(self, process_id: int, handle=None, da=0x1F0FFF):
self.process_id = process_id
self.handle = winapi.OpenProcess(da, False, process_id) if handle is None else handle
self._cached_scanners = {}
self._ldr_cache = {}
self.ctype_accessor = m_ctype.CAccessorProcess(self)
@classmethod
def from_name(cls, name: str | bytes):
if (pid := next(winutils.pid_by_executable(name), None)) is None:
raise ValueError(f'Process {name!r} not found')
return cls(pid)
def alloc(self, size: int, protect=0x40, address=0):
return winapi.VirtualAllocEx(self.handle, address, size, 0x1000 | 0x2000, protect) # MEM_COMMIT|MEM_RESERVE
def free(self, address: int, size: int = 0):
return winapi.VirtualFreeEx(self.handle, address, size, 0x4000) # MEM_DECOMMIT
def virtual_query(self, address: int):
winapi.VirtualQueryEx(self.handle, address, ctypes.byref(mbi := winapi.MEMORY_BASIC_INFORMATION()), ctypes.sizeof(mbi))
return mbi
def virtual_protect(self, address: int, size: int, protect: int):
return winapi.VirtualProtectEx(self.handle, address, size, protect, ctypes.byref(ctypes.c_ulong()))
def iter_memory_region(self, start=0, end=None):
pos = start
while mbi := self.virtual_query(pos):
yield mbi
next_addr = mbi.BaseAddress + mbi.RegionSize
if pos >= next_addr or end is not None and end < next_addr: break
pos = next_addr
def alloc_near(self, size: int, address, protect=0x40):
for mbi in self.iter_memory_region(max(address - 0x7fff0000, 0), address + 0x7fff0000):
if mbi.State & 0x10000: # MEM_FREE
pos = (mbi.BaseAddress + 0xffff) & ~0xffff
if mbi.RegionSize - (pos - mbi.BaseAddress) >= size:
return self.alloc(size, protect, pos)
raise ValueError("No suitable memory region")
def read(self, address, type_: typing.Type[_T] | int) -> _T:
if isinstance(type_, int):
value = (ctypes.c_ubyte * type_)()
try:
winapi.ReadProcessMemory(self.handle, address, ctypes.byref(value), type_, None)
except WindowsError as e:
if e.winerror != 299: raise
return bytes(value)
if issubclass(type_, m_ctype.CData):
return type_(_accessor_=self.ctype_accessor, _address_=address)
value = type_()
success_size = ctypes.c_size_t()
winapi.ReadProcessMemory(self.handle, address, ctypes.byref(value), ctypes.sizeof(value), ctypes.byref(success_size))
return value
def write(self, address, value):
if isinstance(value, (bytes, bytearray)):
if isinstance(value, bytes): value = bytearray(value)
size = len(value)
value = (ctypes.c_ubyte * size).from_buffer(value)
size = ctypes.sizeof(value)
winapi.WriteProcessMemory(self.handle, address, ctypes.byref(value), size, None)
return size
def read_i8(self, address: int) -> int:
return self.read(address, ctypes.c_byte).value
def read_i16(self, address: int) -> int:
return self.read(address, ctypes.c_short).value
def read_i32(self, address: int) -> int:
return self.read(address, ctypes.c_int).value
def read_i64(self, address: int) -> int:
return self.read(address, ctypes.c_longlong).value
def read_i128(self, address: int) -> int:
return int.from_bytes(self.read(address, 16), 'little')
def read_u8(self, address: int) -> int:
return self.read(address, ctypes.c_ubyte).value
def read_u16(self, address: int) -> int:
return self.read(address, ctypes.c_ushort).value
def read_u32(self, address: int) -> int:
return self.read(address, ctypes.c_uint).value
def read_u64(self, address: int) -> int:
return self.read(address, ctypes.c_ulonglong).value
def read_u128(self, address: int) -> int:
return int.from_bytes(self.read(address, 16), 'little')
def read_float(self, address: int) -> float:
return self.read(address, ctypes.c_float).value
def read_double(self, address: int) -> float:
return self.read(address, ctypes.c_double).value
def write_i8(self, address: int, value: int):
return self.write(address, ctypes.c_byte(value))
def write_i16(self, address: int, value: int):
return self.write(address, ctypes.c_short(value))
def write_i32(self, address: int, value: int):
return self.write(address, ctypes.c_int(value))
def write_i64(self, address: int, value: int):
return self.write(address, ctypes.c_longlong(value))
def write_i128(self, address: int, value: int):
return self.write(address, value.to_bytes(16, 'little'))
def write_u8(self, address: int, value: int):
return self.write(address, ctypes.c_ubyte(value))
def write_u16(self, address: int, value: int):
return self.write(address, ctypes.c_ushort(value))
def write_u32(self, address: int, value: int):
return self.write(address, ctypes.c_uint(value))
def write_u64(self, address: int, value: int):
return self.write(address, ctypes.c_ulonglong(value))
def write_u128(self, address: int, value: int):
return self.write(address, value.to_bytes(16, 'little'))
def write_float(self, address: int, value: float):
return self.write(address, ctypes.c_float(value))
def write_double(self, address: int, value: float):
return self.write(address, ctypes.c_double(value))
def read_ptr(self, address: int):
return self.read(address, ctypes.c_size_t).value
def write_ptr(self, address: int, value: int):
return self.write(address, ctypes.c_size_t(value))
def still_alive(self):
exit_code = ctypes.c_ulong()
winapi.GetExitCodeProcess(self.handle, ctypes.byref(exit_code))
return exit_code.value == 259
def read_bytes_zero_trim_unk_size(self, address: int, chunk_size=0x100):
mbi = self.virtual_query(address)
max_addr = mbi.BaseAddress + mbi.RegionSize
buf = bytearray()
while address < max_addr:
read_size = min(chunk_size, max_addr - address)
_buf = self.read(address, read_size)
if (sep := _buf.find(b'\0')) >= 0:
buf.extend(_buf[:sep])
break
buf.extend(_buf)
address += read_size
return bytes(buf)
def read_bytes_zero_trim(self, address: int, max_size: int = 0):
if max_size == 0:
return self.read_bytes_zero_trim_unk_size(address)
res = self.read(address, max_size)
if (sep := res.find(b'\0')) >= 0:
return res[:sep]
return res
def read_string(self, address: int, max_size: int = 0, encoding='utf-8', errors='ignore'):
return self.read_bytes_zero_trim(address, max_size).decode(encoding, errors)
def write_string(self, address: int, value: str | bytes, encoding='utf-8'):
if isinstance(value, str): value = value.encode(encoding)
return self.write(address, value)
@property
def process_basic_information(self):
if not hasattr(self, '_process_basic_information'):
self._process_basic_information = winapi.PROCESS_BASIC_INFORMATION()
winapi.NtQueryInformationProcess(self.handle, 0, ctypes.byref(self._process_basic_information), ctypes.sizeof(self._process_basic_information), None)
return self._process_basic_information
@property
def peb(self):
return self.read(self.process_basic_information.PebBaseAddress, winapi.PEB)
def enum_ldr_data(self):
ldr = self.read(self.peb.Ldr, winapi.PEB_LDR_DATA)
p_data = p_end = ldr.InMemoryOrderModuleList.Flink
offset = winapi.LDR_DATA_TABLE_ENTRY.InMemoryOrderLinks.offset
while True:
data = self.read(p_data - offset, winapi.LDR_DATA_TABLE_ENTRY)
if data.DllBase:
yield data
p_data = data.InMemoryOrderLinks.Flink
if p_data == p_end: break
@functools.cached_property
def base_ldr_data(self):
return next(self.enum_ldr_data())
def get_ldr_data(self, dll_name: str, rescan=False):
dll_name = dll_name.lower()
if dll_name in self._ldr_cache and not rescan:
return self._ldr_cache[dll_name]
self._ldr_cache.pop(dll_name, None)
for data in self.enum_ldr_data():
if data.BaseDllName.remote_value(self).lower() == dll_name:
self._ldr_cache[dll_name] = data
return data
raise KeyError(f'dll {dll_name!r} not found')
def scanner(self, dll_name: str, force_new=False) -> 'CachedRawMemoryPatternScanner':
if dll_name not in self._cached_scanners or force_new:
for data in self.enum_ldr_data():
if data.BaseDllName.remote_value(self) == dll_name:
self._cached_scanners[dll_name] = CachedRawMemoryPatternScanner(self, data.DllBase, data.SizeOfImage)
break
else:
raise KeyError(f'dll {dll_name!r} not found')
return self._cached_scanners[dll_name]
def static_scanner(self, dll_name) -> 'StaticPatternSearcher':
for data in self.enum_ldr_data():
if data.BaseDllName.remote_value(self) == dll_name:
return StaticPatternSearcher(data.FullDllName.remote_value(self), data.DllBase)
raise KeyError(f'dll {dll_name!r} not found')
def base_scanner(self, force_new=False):
return self.scanner(self.base_ldr_data.BaseDllName.remote_value(self), force_new)
def base_static_scanner(self):
return self.static_scanner(self.base_ldr_data.BaseDllName.remote_value(self))
def get_proc_address(self, dll: str | int, func_name: str):
if not hasattr(Process.get_proc_address, 'pGetProcAddress'):
Process.get_proc_address.pGetProcAddress = winapi.GetProcAddress(
self.get_ldr_data('kernel32.dll').DllBase,
b"GetProcAddress"
)
if isinstance(func_name, str): func_name = func_name.encode('utf-8')
if isinstance(dll, str): dll = self.get_ldr_data(dll).DllBase
return self.call(Process.get_proc_address.pGetProcAddress, dll, func_name)
def name_space(self):
return Namespace(self)
def load_library(self, filepath):
if isinstance(filepath, pathlib.Path): filepath = str(filepath)
if isinstance(filepath, str): filepath = filepath.encode(winapi.DEFAULT_ENCODING)
with self.name_space() as name_space:
result_at = name_space.take(0x10)
shell = (
b"\x55" # push rbp
b"\x48\x89\xe5" # mov rbp, rsp
b"\x48\x83\xec\x28" # sub rsp, 0x28
b"\x53" # push rbx
b"\x48\xbb" + struct.pack('<Q', result_at) + # movabs rbx, result_at
b"\x48\xb8" + struct.pack('<Q', self.get_proc_address('kernel32.dll', "LoadLibraryA")) + # movabs rax, LoadLibraryA
b"\x48\xb9" + struct.pack('<Q', name_space.store(filepath + b'\0')) + # movabs rcx, filepath
b"\xff\xd0" # call rax
b"\x48\x85\xc0" # test rax, rax
b"\x74\x0c" # je fail
b"\x48\x89\x43\x08" # mov qword ptr [rbx + 8], rax
b"\x48\x31\xc0" # xor rax, rax
b"\x48\x89\x03" # mov qword ptr [rbx], rax
b"\xeb\x16" # jmp end
# fail:
b"\x48\xb8" + struct.pack('<Q', self.get_proc_address('kernel32.dll', "GetLastError")) + # movabs rax, GetLastError
b"\xff\xd0" # call rax
b"\x48\x89\x03" # mov qword ptr [rbx], rax
b"\x48\x31\xc0" # xor rax, rax
b"\x48\x89\x43\x08" # mov qword ptr [rbx + 8], rax
# end:
b"\x5b" # pop rbx
b"\x48\x83\xc4\x28" # add rsp, 0x28
b"\x5d" # pop rbp
b"\xc3" # ret
)
self._call(name_space.store(shell), block=True)
if err := self.read_u32(result_at): raise ctypes.WinError(err)
return self.read_u64(result_at + 8)
def _call(self, call_address, params=None, block=True):
params = params or 0
thread_h = winapi.CreateRemoteThread(self.handle, None, 0, call_address, params, 0, None)
if block: winapi.WaitForSingleObject(thread_h, -1)
return thread_h
def call(self, func_ptr, *args: int | float | bytes | bool, push_stack_depth=0x28, block=True, read_xmm=False, get_bytes=False):
_MOV_RBX = b'\x48\xBB' # MOV rbx, n
_INT_ARG = (
b'\x48\xB9', # MOV rcx, n
b'\x48\xBA', # MOV rdx, n
b'\x49\xB8', # MOV r8, n
b'\x49\xB9', # MOV r9, n
)
_FLOAT_ARG = (
b'\xF3\x0F\x10\x03', # MOVSS xmm0, [rbx]
b'\xF3\x0F\x10\x0B', # MOVSS xmm1, [rbx]
b'\xF3\x0F\x10\x13', # MOVSS xmm2, [rbx]
b'\xF3\x0F\x10\x1B', # MOVSS xmm3, [rbx]
)
if len(args) > 4:
raise ValueError('not yet handle args more then 4')
with self.name_space() as name_space:
return_address = name_space.take(8)
shell = (
b"\x55" # PUSH rbp
b"\x48\x89\xE5" # MOV rbp, rsp
b"\x48\x83\xec" + struct.pack('B', push_stack_depth) + # SUB rsp, push_stack_depth
b"\x53" # PUSH rbx
b"\x48\x31\xDB" # XOR rbx, rbx
)
for i, a in enumerate(args):
if isinstance(a, bytes):
a = name_space.store(a)
elif isinstance(a, bool):
a = int(a)
if isinstance(a, int):
shell += _INT_ARG[i] + struct.pack('q', a)
elif isinstance(a, float):
shell += _MOV_RBX + struct.pack('f', a) + bytes(4) + _FLOAT_ARG[i]
else:
raise TypeError(f'not support arg type {type(a)} at pos {i}')
shell += (
b"\x48\xBB" + struct.pack('q', func_ptr) + # MOV rbx, func_ptr
b"\xFF\xD3" # CALL rbx
b"\x48\xBB" + struct.pack('q', return_address) # MOV rbx, return_address
) + (
b"\xf2\x0f\x11\x03" # MOVSD [rbx], xmm0
if read_xmm else
b"\x48\x89\x03" # MOV [rbx], rax
) + (
b"\x5B" # POP rbx
b"\x48\x83\xc4" + struct.pack('B', push_stack_depth) + # ADD rsp, 0x28
b"\x48\x89\xEC" # MOV rsp, rbp
b"\x5D" # POP rbp
b"\xC3" # RET
)
code_address = name_space.store(shell)
self._call(code_address, block=block)
if get_bytes:
return self.read(return_address, 8)
return self.read_u64(return_address)
def _local_call(func_ptr, *args: int | float | bytes | bool, push_stack_depth=0x28, block=True):
a = [ctypes.c_size_t]
for v in args:
if isinstance(v, int):
a.append(ctypes.c_size_t)
elif isinstance(v, float):
a.append(ctypes.c_float)
elif isinstance(v, bytes):
a.append(ctypes.c_char_p)
elif isinstance(v, bool):
a.append(ctypes.c_bool)
else:
raise TypeError(f'not support arg type {type(v)}')
return ctypes.CFUNCTYPE(*a)(func_ptr)(*args)
def _local_read(address, type_: typing.Type[_T] | int) -> _T:
if isinstance(type_, int):
return ctypes.string_at(address, type_)
return type_.from_address(address)
Process.current = Process(winapi.GetCurrentProcessId(), winapi.GetCurrentProcess())
Process.current.read = _local_read
Process.current.call = _local_call
| 16,743 | Python | .py | 327 | 39.75841 | 157 | 0.587854 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,073 | imgui_inspect.py | nyaoouo_NyLib2/nylib/mono/imgui_inspect.py | import typing
from nylib import imguiutils
from nylib.pyimgui import imgui
from . import *
_T = typing.TypeVar('_T')
class _MonoInspector(imguiutils.Inspector[_T]):
def item_name(self, item):
return item[0]
def on_item_selected(self, item):
super().on_item_selected(item)
self.selected_inspector = None
if item is None: return
_, t = item
if isinstance(t, str):
imgui.SetClipboardText(t)
elif isinstance(t, MonoType):
if cls := t.cls or t.cls_from_ptr:
self.selected_inspector = MonoClassInspector(cls)
elif isinstance(t, MonoField):
self.selected_inspector = MonoFieldInspector(t)
elif isinstance(t, MonoMethod):
self.selected_inspector = MonoMethodInspector(t)
elif isinstance(t, MonoClass_):
self.selected_inspector = MonoClassInspector(t)
elif isinstance(t, MonoImage):
self.selected_inspector = MonoImageInspector(t)
class MonoMethodInspector(_MonoInspector[MonoMethod]):
def init_items(self):
return [
[
*[
(f"{i}({MonoTypeEnum(param.type.type).name}): {param.type.name} {param.name}", param.type)
for i, param in enumerate(self.target.params)
],
(f"=> ({MonoTypeEnum(self.target.return_type.type)}) {self.target.return_type.name}", self.target.return_type),
]
]
class MonoFieldInspector(_MonoInspector[MonoField]):
def init_items(self):
return [
[
(f"name: {self.target.name}", self.target.name),
(f"type: {self.target.type.name}", self.target.type),
(f"offset: {self.target.offset:#X}", f"{self.target.offset:#x}"),
(f"flags: {self.target.flags:#X}", f"{self.target.flags:#x}")
]
]
class MonoClassInspector(_MonoInspector[MonoClass_]):
def init_items(self):
return [
[(f"{field.type.name} {field.name}", field) for field in self.target.fields],
[(method.name + f"({','.join(p.type.name for p in method.params)})", method) for method in self.target.methods]
]
class MonoImageInspector(_MonoInspector[MonoImage]):
def init_items(self):
return [
[(f"{cls.namespace}::{cls.name}", cls) for cls in self.target.clss]
]
class MonoInspector(_MonoInspector[Mono]):
def init_items(self):
return [
[(asm.image.name, asm.image) for asm in self.target.assemblies]
]
| 2,608 | Python | .py | 63 | 31.714286 | 127 | 0.599209 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,074 | type_cast.py | nyaoouo_NyLib2/nylib/mono/type_cast.py | import typing
from .defines import *
from .defines import _MonoObj
_Mono2Py = {}
_Py2Mono = {}
_SimpleCData: typing.Type = next(t for t in ctypes.c_void_p.__mro__ if '_SimpleCData' in t.__name__)
_CData: typing.Type = next(t for t in ctypes.c_void_p.__mro__ if '_CData' in t.__name__)
def _mono2py(t: MonoTypeEnum):
def _wrapper(func):
_Mono2Py[t] = func
return func
return _wrapper
def _py2mono(t: MonoTypeEnum):
def _wrapper(func):
_Py2Mono[t] = func
return func
return _wrapper
def py2mono(t: MonoTypeEnum | int, v, keeper):
return _Py2Mono[t](v, keeper)
def mono2py(t: MonoTypeEnum | int, v):
return _Mono2Py[t](v) if t in _Mono2Py else v
def _simple_map(t, ct):
@_py2mono(t)
def _(v, keeper):
keeper.append(_v := ct(v))
return ctypes.addressof(_v)
@_mono2py(t)
def _(v):
return ct.from_address(MonoApi.get_instance().mono_object_unbox(v)).value
_simple_map(MonoTypeEnum.BOOLEAN, ctypes.c_bool)
_simple_map(MonoTypeEnum.I1, ctypes.c_int8)
_simple_map(MonoTypeEnum.U1, ctypes.c_uint8)
_simple_map(MonoTypeEnum.I2, ctypes.c_int16)
_simple_map(MonoTypeEnum.U2, ctypes.c_uint16)
_simple_map(MonoTypeEnum.I4, ctypes.c_int32)
_simple_map(MonoTypeEnum.U4, ctypes.c_uint32)
_simple_map(MonoTypeEnum.I8, ctypes.c_int64)
_simple_map(MonoTypeEnum.U8, ctypes.c_uint64)
_simple_map(MonoTypeEnum.R4, ctypes.c_float)
_simple_map(MonoTypeEnum.R8, ctypes.c_double)
@_py2mono(MonoTypeEnum.VOID)
@_py2mono(MonoTypeEnum.OBJECT)
@_py2mono(MonoTypeEnum.PTR)
@_py2mono(MonoTypeEnum.FNPTR)
@_py2mono(MonoTypeEnum.CLASS)
def _(v, keeper):
if isinstance(v, _CData):
return ctypes.addressof(v)
if isinstance(v, _MonoObj):
return v.ptr
keeper.append(_v := ctypes.c_size_t(v))
return ctypes.addressof(_v)
@_mono2py(MonoTypeEnum.VOID)
@_py2mono(MonoTypeEnum.OBJECT)
@_py2mono(MonoTypeEnum.PTR)
@_py2mono(MonoTypeEnum.FNPTR)
@_py2mono(MonoTypeEnum.CLASS)
def _(v):
return v
@_py2mono(MonoTypeEnum.CHAR)
def _(v, keeper):
if isinstance(v, _CData):
return ctypes.addressof(v)
if isinstance(v, str):
v = v.encode('utf-8')
assert isinstance(v, bytes)
keeper.append(_v := ctypes.create_string_buffer(v))
return ctypes.addressof(_v)
@_mono2py(MonoTypeEnum.CHAR)
def _(v):
return ctypes.string_at(v)
@_py2mono(MonoTypeEnum.STRING)
def _(v, keeper):
if isinstance(v, str):
v = v.encode('utf-8')
assert isinstance(v, bytes)
api = MonoApi.get_instance()
keeper.append(_v := ctypes.create_string_buffer(v))
return api.mono_string_new(api.mono_get_root_domain(), _v)
@_mono2py(MonoTypeEnum.STRING)
def _(v):
api = MonoApi.get_instance()
if api.is_il2cpp:
return api.il2cpp_string_chars(v)
else:
return api.mono_string_to_utf8(v).decode('utf-8')
| 2,863 | Python | .py | 86 | 29.302326 | 100 | 0.69214 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,075 | __init__.py | nyaoouo_NyLib2/nylib/mono/__init__.py | import ctypes
import enum
import functools
import typing
from .defines import *
from .defines import _MonoObj
from .type_cast import py2mono, mono2py
class MonoObject_(_MonoObj):
def init(self):
MonoApi.get_instance().mono_runtime_object_init(self.ptr)
@functools.cached_property
def cls(self):
return MonoClass_(MonoApi.get_instance().mono_object_get_class(self.ptr))
def unbox(self):
return MonoApi.get_instance().mono_object_unbox(self.ptr)
class MonoMethodHeader(_MonoObj):
@functools.cached_property
def il_code(self):
res = MonoApi.get_instance().mono_method_header_get_code(self.ptr, ctypes.byref(code := ctypes.c_uint32()), None)
return res, code.value
class MonoReflectionType_(_MonoObj):
pass
class MonoType(_MonoObj):
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_type_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def cls(self):
# mono_class_from_mono_type?
return MonoClass_(MonoApi.get_instance().mono_type_get_class(self.ptr))
@functools.cached_property
def cls_from_ptr(self):
return MonoClass_(MonoApi.get_instance().mono_ptr_class_get(self.ptr))
@functools.cached_property
def type_from_ptr(self):
return MonoType(MonoApi.get_instance().mono_type_get_ptr_type(self.ptr))
@functools.cached_property
def type(self) -> int:
return MonoTypeEnum(MonoApi.get_instance().mono_type_get_type(self.ptr))
@functools.cached_property
def reflect_type(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
return MonoReflectionType_(api.il2cpp_type_get_object(self.ptr))
else:
return MonoReflectionType_(api.mono_type_get_object(api.mono_get_root_domain(), self.ptr))
class MonoField(_MonoObj):
@functools.cached_property
def type(self):
return MonoType(MonoApi.get_instance().mono_field_get_type(self.ptr))
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_field_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def parent(self):
return MonoClass_(MonoApi.get_instance().mono_field_get_parent(self.ptr))
@functools.cached_property
def offset(self):
return MonoApi.get_instance().mono_field_get_offset(self.ptr)
@functools.cached_property
def flags(self):
return MonoApi.get_instance().mono_field_get_flags(self.ptr)
def get_value_addr(self, instance: 'MonoObject_' = None):
if self.flags & MONO_FIELD_ATTR_STATIC:
assert self.offset >= 0, "special static field not supported"
return self.parent.static_field_data + self.offset
else:
if instance is None:
raise ValueError('instance required')
return instance.ptr + self.offset
class MonoProperty(_MonoObj):
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_property_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def flags(self):
return MonoApi.get_instance().mono_property_get_flags(self.ptr)
@functools.cached_property
def get_method(self):
return MonoMethod(MonoApi.get_instance().mono_property_get_get_method(self.ptr))
@functools.cached_property
def set_method(self):
return MonoMethod(MonoApi.get_instance().mono_property_get_set_method(self.ptr))
@functools.cached_property
def parent(self):
return MonoClass_(MonoApi.get_instance().mono_property_get_parent(self.ptr))
class MonoReflectionMethod(_MonoObj):
pass
class MonoMethod(_MonoObj):
class ParamType(typing.NamedTuple):
name: str
type: MonoType
def __repr__(self):
return f"{self.type.name} {self.name}"
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_method_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def full_name(self) -> str:
return MonoApi.get_instance().mono_method_get_full_name(self.ptr).decode('utf-8')
@functools.cached_property
def flags(self):
return MonoApi.get_instance().mono_method_get_flags(self.ptr, None)
@functools.cached_property
def cls(self):
return MonoClass_(MonoApi.get_instance().mono_method_get_class(self.ptr))
@functools.cached_property
def header(self):
return MonoMethodHeader(MonoApi.get_instance().mono_method_get_header(self.ptr))
@functools.cached_property
def param_count(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
return api.il2cpp_method_get_param_count(self.ptr)
sign = api.mono_method_signature(self.ptr)
return api.mono_signature_get_param_count(sign)
@functools.cached_property
def params(self) -> tuple[ParamType, ...]:
api = MonoApi.get_instance()
res = []
if api.is_il2cpp:
for i in range(api.il2cpp_method_get_param_count(self.ptr)):
res.append(self.ParamType(
name=api.il2cpp_method_get_param_name(self.ptr, i).decode('utf-8'),
type=MonoType(api.il2cpp_method_get_param(self.ptr, i))
))
else:
sign = api.mono_method_signature(self.ptr)
param_count = api.mono_signature_get_param_count(sign)
names = (ctypes.c_char_p * param_count)()
api.mono_method_get_param_names(self.ptr, names)
it = ctypes.c_void_p(0)
for i in range(param_count):
res.append(self.ParamType(
name=names[i].decode('utf-8'),
type=MonoType(api.mono_signature_get_params(sign, ctypes.byref(it)))
))
return tuple(res)
@functools.cached_property
def return_type(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
return MonoType(api.il2cpp_method_get_return_type(self.ptr))
return MonoType(api.mono_signature_get_return_type(api.mono_method_signature(self.ptr)))
@functools.cached_property
def signature(self):
s_ret = self.return_type.name
s_params = ', '.join(param.type.name for param in self.params)
return f"{s_ret} {self.name}({s_params})"
def get_reflection_method(self, cls: 'MonoClass_' = None):
api = MonoApi.get_instance()
if api.is_il2cpp:
return api.il2cpp_method_get_object(self.ptr, cls.ptr if cls else None)
return MonoReflectionMethod(api.mono_method_get_object(api.mono_get_root_domain(), self.ptr, cls.ptr if cls else None))
def compile(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
return self.ptr
cls = self.cls
if api.mono_class_is_generic(cls.ptr):
return api.mono_compile_method(self.ptr)
return None
def free(self):
MonoApi.get_instance().mono_free_method(self.ptr)
def disasm(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
raise NotImplementedError('il2cpp disasm not implemented')
il_code, code = self.header.il_code
disassembly = api.mono_disasm_code(None, self.ptr, il_code, il_code + code)
return disassembly.decode('utf-8')
def invoke(self, this: int | MonoObject_ = None, *args):
if this is None:
this = 0
elif isinstance(this, MonoObject_):
this = this.ptr
param_count = self.param_count
if len(args) != param_count:
raise ValueError(f'args length not match, expect {param_count}, got {len(args)}')
p_params = None
if args:
keeper = []
params = (ctypes.c_size_t * (param_count + 1))()
for i, (param, arg) in enumerate(zip(self.params, args)):
params[i] = py2mono(param.type.type, arg, keeper)
p_params = ctypes.cast(params, ctypes.c_void_p)
c_exception = ctypes.c_void_p(0)
api = MonoApi.get_instance()
raw_res = api.mono_runtime_invoke(self.ptr, this, p_params, c_exception)
if c_exception.value:
if exc := api.mono_object_to_string(c_exception, ctypes.byref(c_exception)):
exc = api.mono_string_to_utf8(exc)
raise RuntimeError(exc.decode('utf-8'))
raise RuntimeError('unknown exception')
return mono2py(self.return_type.type, raw_res)
class MonoVtable(_MonoObj):
@functools.cached_property
def static_field_data(self):
return MonoApi.get_instance().mono_vtable_get_static_field_data(self.ptr)
class MonoClass_(_MonoObj):
def new_object(self):
api = MonoApi.get_instance()
if api.is_il2cpp:
return MonoObject_(api.mono_object_new(self.ptr, self.ptr))
else:
domain = (api.mono_get_root_domain or api.mono_domain_get)()
return MonoObject_(api.mono_object_new(domain, self.ptr))
@functools.cached_property
def namespace(self) -> str:
return MonoApi.get_instance().mono_class_get_namespace(self.ptr).decode('utf-8')
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_class_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def image(self):
return MonoImage(MonoApi.get_instance().mono_class_get_image(self.ptr))
@functools.cached_property
def type(self):
return MonoType(MonoApi.get_instance().mono_class_get_type(self.ptr))
@functools.cached_property
def parent(self):
return MonoClass_(MonoApi.get_instance().mono_class_get_parent(self.ptr))
@functools.cached_property
def vtable(self):
return self.get_vtable()
def get_vtable(self, domain: "MonoDomain" = None):
api = MonoApi.get_instance()
return MonoVtable(api.mono_class_vtable(domain.ptr if domain else api.mono_get_root_domain(), self.ptr))
@functools.cached_property
def static_field_data(self):
return self.get_static_field_data()
def get_static_field_data(self, domain: "MonoDomain" = None):
return self.get_vtable(domain).static_field_data
@functools.cached_property
def nesting_type(self):
return MonoClass_(MonoApi.get_instance().mono_class_get_nesting_type(self.ptr))
@functools.cached_property
def rank(self):
return MonoApi.get_instance().mono_class_get_rank(self.ptr)
@functools.cached_property
def element_class(self):
if self.rank:
return MonoClass_(MonoApi.get_instance().mono_class_get_element_class(self.ptr))
@functools.cached_property
def nested_types(self):
api = MonoApi.get_instance()
it = ctypes.c_size_t(0)
res = []
while nested := api.mono_class_get_nested_types(self.ptr, ctypes.byref(it)):
res.append(nested)
return tuple(res)
@functools.cached_property
def fields(self) -> tuple[MonoField, ...]:
api = MonoApi.get_instance()
it = ctypes.c_size_t(0)
res = []
while field := api.mono_class_get_fields(self.ptr, ctypes.byref(it)):
res.append(MonoField(field))
return tuple(res)
@functools.cached_property
def implemented_interfaces(self) -> tuple:
api = MonoApi.get_instance()
it = ctypes.c_size_t(0)
res = []
while interface := api.mono_class_get_interfaces(self.ptr, ctypes.byref(it)):
res.append(interface)
return tuple(res)
@functools.cached_property
def methods(self) -> tuple[MonoMethod, ...]:
api = MonoApi.get_instance()
it = ctypes.c_size_t(0)
res = []
while method := api.mono_class_get_methods(self.ptr, ctypes.byref(it)):
res.append(MonoMethod(method))
return tuple(res)
def find_method(self, methodname: str, param_count: int = -1) -> MonoMethod | None:
return MonoMethod(MonoApi.get_instance().mono_class_get_method_from_name(
self.ptr, methodname.encode('utf-8'), param_count
))
class MonoJitInfo(_MonoObj):
@functools.cached_property
def method(self):
return MonoMethod(MonoApi.get_instance().mono_jit_info_get_method(self.ptr))
@functools.cached_property
def code_start(self):
return MonoApi.get_instance().mono_jit_info_get_code_start(self.ptr)
@functools.cached_property
def code_size(self):
return MonoApi.get_instance().mono_jit_info_get_code_size(self.ptr)
class MonoDomain(_MonoObj):
def set(self):
api = MonoApi.get_instance()
return api.mono_domain_set(self.ptr, False) if not api.mono_domain_set else 0
def get_jit_info(self, address):
return MonoJitInfo(MonoApi.get_instance().mono_jit_info_table_find(self.ptr, address))
class MonoAssembly(_MonoObj):
@functools.cached_property
def image(self):
return MonoImage(MonoApi.get_instance().mono_assembly_get_image(self.ptr))
class MonoImage(_MonoObj):
@functools.cached_property
def name(self) -> str:
return MonoApi.get_instance().mono_image_get_name(self.ptr).decode('utf-8')
@functools.cached_property
def filename(self) -> str:
return MonoApi.get_instance().mono_image_get_filename(self.ptr).decode('utf-8')
def get_rva_map(self, i):
return MonoApi.get_instance().mono_image_rva_map(self.ptr, i)
@functools.cached_property
def clss(self) -> tuple[MonoClass_, ...]:
api = MonoApi.get_instance()
res = []
if api.is_il2cpp:
for i in range(api.il2cpp_image_get_class_count(self.ptr)):
res.append(MonoClass_(api.il2cpp_image_get_class(self.ptr, i)))
else:
tdef = api.mono_image_get_table_info(self.ptr, MONO_TABLE_TYPEDEF)
for i in range(api.mono_table_info_get_rows(tdef)):
res.append(MonoClass_(api.mono_class_get(self.ptr, MONO_TABLE_TYPEDEF | (i + 1))))
return tuple(res)
def find_class(self, classname: str, namespace: str = '') -> MonoClass_ | None:
api = MonoApi.get_instance()
return MonoClass_((api.mono_class_from_name_case or api.mono_class_from_name)(
self.ptr, namespace.encode('utf-8'), classname.encode('utf-8')
))
def find_method_by_desc(self, fqMethodName: str):
api = MonoApi.get_instance()
mmd = api.mono_method_desc_new(fqMethodName.encode('utf-8'), 1)
return MonoMethod(api.mono_method_desc_search_in_image(mmd, self.ptr))
class Mono:
instance: 'Mono'
@classmethod
def get_instance(cls):
if not hasattr(cls, 'instance'):
cls.instance = cls()
return cls.instance
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self, mono_handle=None, is_il2cpp=False):
if hasattr(self, 'api'): return
self.api = MonoApi(mono_handle, is_il2cpp)
self.is_il2cpp = self.api.is_il2cpp
self.domain = self.api.mono_get_root_domain()
# self.mono_selfthread = None
# self.is_attached = False
# self.uwp_mode = False
def connect_thread_to_mono_runtime(self):
return self.api.mono_thread_attach(self.api.mono_get_root_domain())
@functools.cached_property
def domains(self) -> tuple[MonoDomain, ...]:
if self.is_il2cpp:
return MonoDomain(self.api.mono_domain_get()),
domains = []
c_iterator = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(lambda domain: domains.append(MonoDomain(domain)))
self.api.mono_domain_foreach(c_iterator)
return tuple(domains)
@functools.cached_property
def assemblies(self) -> tuple[MonoAssembly, ...]:
res = []
if self.is_il2cpp:
ptr = self.api.il2cpp_domain_get_assemblies(self.api.mono_domain_get(), ctypes.byref(cnt := ctypes.c_size_t()))
for i in range(cnt.value):
res.append(MonoAssembly(ptr[i]))
else:
c_iterator = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(lambda assembly: res.append(MonoAssembly(assembly)))
self.api.mono_assembly_foreach(c_iterator, None)
return tuple(res)
def find_class(self, classname: str, namespace: str = '') -> MonoClass_ | None:
for assembly in self.assemblies:
if cls := assembly.image.find_class(classname, namespace):
return cls
| 16,720 | Python | .py | 373 | 36.495979 | 128 | 0.650317 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,076 | enums.py | nyaoouo_NyLib2/nylib/mono/defines/enums.py | import enum
class MonoTypeEnum(enum.IntEnum):
END = 0x00 # End of List
VOID = 0x01
BOOLEAN = 0x02
CHAR = 0x03
I1 = 0x04
U1 = 0x05
I2 = 0x06
U2 = 0x07
I4 = 0x08
U4 = 0x09
I8 = 0x0a
U8 = 0x0b
R4 = 0x0c
R8 = 0x0d
STRING = 0x0e
PTR = 0x0f # arg: <type> token
BYREF = 0x10 # arg: <type> token
VALUETYPE = 0x11 # arg: <type> token
CLASS = 0x12 # arg: <type> token
VAR = 0x13 # number
ARRAY = 0x14 # type, rank, boundsCount, bound1, loCount, lo1
GENERICINST = 0x15 # <type> <type-arg-count> <type-1> \x{2026} <type-n>
TYPEDBYREF = 0x16
I = 0x18
U = 0x19
FNPTR = 0x1b # arg: full method signature
OBJECT = 0x1c
SZARRAY = 0x1d # 0-based one-dim-array
MVAR = 0x1e # number
CMOD_REQD = 0x1f # arg: typedef or typeref token
CMOD_OPT = 0x20 # optional arg: typedef or typref token
INTERNAL = 0x21 # CLR internal type
MODIFIER = 0x40 # Or with the following types
SENTINEL = 0x41 # Sentinel for varargs method signature
PINNED = 0x45 # Local var that points to pinned object
ENUM = 0x55 # an enumeration
MONO_TYPE_END = MonoTypeEnum.END
MONO_TYPE_VOID = MonoTypeEnum.VOID
MONO_TYPE_BOOLEAN = MonoTypeEnum.BOOLEAN
MONO_TYPE_CHAR = MonoTypeEnum.CHAR
MONO_TYPE_I1 = MonoTypeEnum.I1
MONO_TYPE_U1 = MonoTypeEnum.U1
MONO_TYPE_I2 = MonoTypeEnum.I2
MONO_TYPE_U2 = MonoTypeEnum.U2
MONO_TYPE_I4 = MonoTypeEnum.I4
MONO_TYPE_U4 = MonoTypeEnum.U4
MONO_TYPE_I8 = MonoTypeEnum.I8
MONO_TYPE_U8 = MonoTypeEnum.U8
MONO_TYPE_R4 = MonoTypeEnum.R4
MONO_TYPE_R8 = MonoTypeEnum.R8
MONO_TYPE_STRING = MonoTypeEnum.STRING
MONO_TYPE_PTR = MonoTypeEnum.PTR
MONO_TYPE_BYREF = MonoTypeEnum.BYREF
MONO_TYPE_VALUETYPE = MonoTypeEnum.VALUETYPE
MONO_TYPE_CLASS = MonoTypeEnum.CLASS
MONO_TYPE_VAR = MonoTypeEnum.VAR
MONO_TYPE_ARRAY = MonoTypeEnum.ARRAY
MONO_TYPE_GENERICINST = MonoTypeEnum.GENERICINST
MONO_TYPE_TYPEDBYREF = MonoTypeEnum.TYPEDBYREF
MONO_TYPE_I = MonoTypeEnum.I
MONO_TYPE_U = MonoTypeEnum.U
MONO_TYPE_FNPTR = MonoTypeEnum.FNPTR
MONO_TYPE_OBJECT = MonoTypeEnum.OBJECT
MONO_TYPE_SZARRAY = MonoTypeEnum.SZARRAY
MONO_TYPE_MVAR = MonoTypeEnum.MVAR
MONO_TYPE_CMOD_REQD = MonoTypeEnum.CMOD_REQD
MONO_TYPE_CMOD_OPT = MonoTypeEnum.CMOD_OPT
MONO_TYPE_INTERNAL = MonoTypeEnum.INTERNAL
MONO_TYPE_MODIFIER = MonoTypeEnum.MODIFIER
MONO_TYPE_SENTINEL = MonoTypeEnum.SENTINEL
MONO_TYPE_PINNED = MonoTypeEnum.PINNED
MONO_TYPE_ENUM = MonoTypeEnum.ENUM
class MonoMetaTableEnum(enum.IntEnum):
MODULE = 0
TYPEREF = enum.auto()
TYPEDEF = enum.auto()
FIELD_POINTER = enum.auto()
FIELD = enum.auto()
METHOD_POINTER = enum.auto()
METHOD = enum.auto()
PARAM_POINTER = enum.auto()
PARAM = enum.auto()
INTERFACEIMPL = enum.auto()
MEMBERREF = enum.auto() # 0xa
CONSTANT = enum.auto()
CUSTOMATTRIBUTE = enum.auto()
FIELDMARSHAL = enum.auto()
DECLSECURITY = enum.auto()
CLASSLAYOUT = enum.auto()
FIELDLAYOUT = enum.auto() # 0x10
STANDALONESIG = enum.auto()
EVENTMAP = enum.auto()
EVENT_POINTER = enum.auto()
EVENT = enum.auto()
PROPERTYMAP = enum.auto()
PROPERTY_POINTER = enum.auto()
PROPERTY = enum.auto()
METHODSEMANTICS = enum.auto()
METHODIMPL = enum.auto()
MODULEREF = enum.auto() # 0x1a
TYPESPEC = enum.auto()
IMPLMAP = enum.auto()
FIELDRVA = enum.auto()
ENCLOG = enum.auto()
ENCMAP = enum.auto()
ASSEMBLY = enum.auto() # 0x20
ASSEMBLYPROCESSOR = enum.auto()
ASSEMBLYOS = enum.auto()
ASSEMBLYREF = enum.auto()
ASSEMBLYREFPROCESSOR = enum.auto()
ASSEMBLYREFOS = enum.auto()
FILE = enum.auto()
EXPORTEDTYPE = enum.auto()
MANIFESTRESOURCE = enum.auto()
NESTEDCLASS = enum.auto()
GENERICPARAM = enum.auto() # 0x2a
METHODSPEC = enum.auto()
GENERICPARAMCONSTRAINT = enum.auto()
UNUSED8 = enum.auto()
UNUSED9 = enum.auto()
UNUSED10 = enum.auto()
# Portable PDB tables
DOCUMENT = enum.auto() # 0x30
METHODBODY = enum.auto()
LOCALSCOPE = enum.auto()
LOCALVARIABLE = enum.auto()
LOCALCONSTANT = enum.auto()
IMPORTSCOPE = enum.auto()
STATEMACHINEMETHOD = enum.auto()
CUSTOMDEBUGINFORMATION = enum.auto()
MONO_TABLE_MODULE = MonoMetaTableEnum.MODULE
MONO_TABLE_TYPEREF = MonoMetaTableEnum.TYPEREF
MONO_TABLE_TYPEDEF = MonoMetaTableEnum.TYPEDEF
MONO_TABLE_FIELD_POINTER = MonoMetaTableEnum.FIELD_POINTER
MONO_TABLE_FIELD = MonoMetaTableEnum.FIELD
MONO_TABLE_METHOD_POINTER = MonoMetaTableEnum.METHOD_POINTER
MONO_TABLE_METHOD = MonoMetaTableEnum.METHOD
MONO_TABLE_PARAM_POINTER = MonoMetaTableEnum.PARAM_POINTER
MONO_TABLE_PARAM = MonoMetaTableEnum.PARAM
MONO_TABLE_INTERFACEIMPL = MonoMetaTableEnum.INTERFACEIMPL
MONO_TABLE_MEMBERREF = MonoMetaTableEnum.MEMBERREF
MONO_TABLE_CONSTANT = MonoMetaTableEnum.CONSTANT
MONO_TABLE_CUSTOMATTRIBUTE = MonoMetaTableEnum.CUSTOMATTRIBUTE
MONO_TABLE_FIELDMARSHAL = MonoMetaTableEnum.FIELDMARSHAL
MONO_TABLE_DECLSECURITY = MonoMetaTableEnum.DECLSECURITY
MONO_TABLE_CLASSLAYOUT = MonoMetaTableEnum.CLASSLAYOUT
MONO_TABLE_FIELDLAYOUT = MonoMetaTableEnum.FIELDLAYOUT
MONO_TABLE_STANDALONESIG = MonoMetaTableEnum.STANDALONESIG
MONO_TABLE_EVENTMAP = MonoMetaTableEnum.EVENTMAP
MONO_TABLE_EVENT_POINTER = MonoMetaTableEnum.EVENT_POINTER
MONO_TABLE_EVENT = MonoMetaTableEnum.EVENT
MONO_TABLE_PROPERTYMAP = MonoMetaTableEnum.PROPERTYMAP
MONO_TABLE_PROPERTY_POINTER = MonoMetaTableEnum.PROPERTY_POINTER
MONO_TABLE_PROPERTY = MonoMetaTableEnum.PROPERTY
MONO_TABLE_METHODSEMANTICS = MonoMetaTableEnum.METHODSEMANTICS
MONO_TABLE_METHODIMPL = MonoMetaTableEnum.METHODIMPL
MONO_TABLE_MODULEREF = MonoMetaTableEnum.MODULEREF
MONO_TABLE_TYPESPEC = MonoMetaTableEnum.TYPESPEC
MONO_TABLE_IMPLMAP = MonoMetaTableEnum.IMPLMAP
MONO_TABLE_FIELDRVA = MonoMetaTableEnum.FIELDRVA
MONO_TABLE_ENCLOG = MonoMetaTableEnum.ENCLOG
MONO_TABLE_ENCMAP = MonoMetaTableEnum.ENCMAP
MONO_TABLE_ASSEMBLY = MonoMetaTableEnum.ASSEMBLY
MONO_TABLE_ASSEMBLYPROCESSOR = MonoMetaTableEnum.ASSEMBLYPROCESSOR
MONO_TABLE_ASSEMBLYOS = MonoMetaTableEnum.ASSEMBLYOS
MONO_TABLE_ASSEMBLYREF = MonoMetaTableEnum.ASSEMBLYREF
MONO_TABLE_ASSEMBLYREFPROCESSOR = MonoMetaTableEnum.ASSEMBLYREFPROCESSOR
MONO_TABLE_ASSEMBLYREFOS = MonoMetaTableEnum.ASSEMBLYREFOS
MONO_TABLE_FILE = MonoMetaTableEnum.FILE
MONO_TABLE_EXPORTEDTYPE = MonoMetaTableEnum.EXPORTEDTYPE
MONO_TABLE_MANIFESTRESOURCE = MonoMetaTableEnum.MANIFESTRESOURCE
MONO_TABLE_NESTEDCLASS = MonoMetaTableEnum.NESTEDCLASS
MONO_TABLE_GENERICPARAM = MonoMetaTableEnum.GENERICPARAM
MONO_TABLE_METHODSPEC = MonoMetaTableEnum.METHODSPEC
MONO_TABLE_GENERICPARAMCONSTRAINT = MonoMetaTableEnum.GENERICPARAMCONSTRAINT
MONO_TABLE_UNUSED8 = MonoMetaTableEnum.UNUSED8
MONO_TABLE_UNUSED9 = MonoMetaTableEnum.UNUSED9
MONO_TABLE_UNUSED10 = MonoMetaTableEnum.UNUSED10
MONO_TABLE_DOCUMENT = MonoMetaTableEnum.DOCUMENT
MONO_TABLE_METHODBODY = MonoMetaTableEnum.METHODBODY
MONO_TABLE_LOCALSCOPE = MonoMetaTableEnum.LOCALSCOPE
MONO_TABLE_LOCALVARIABLE = MonoMetaTableEnum.LOCALVARIABLE
MONO_TABLE_LOCALCONSTANT = MonoMetaTableEnum.LOCALCONSTANT
MONO_TABLE_IMPORTSCOPE = MonoMetaTableEnum.IMPORTSCOPE
MONO_TABLE_STATEMACHINEMETHOD = MonoMetaTableEnum.STATEMACHINEMETHOD
MONO_TABLE_CUSTOMDEBUGINFORMATION = MonoMetaTableEnum.CUSTOMDEBUGINFORMATION
class MONO_TYPEDEF_(enum.IntEnum):
FLAGS = 0
NAME = enum.auto()
NAMESPACE = enum.auto()
EXTENDS = enum.auto()
FIELD_LIST = enum.auto()
METHOD_LIST = enum.auto()
SIZE = enum.auto()
MONO_TYPEDEF_FLAGS = MONO_TYPEDEF_.FLAGS
MONO_TYPEDEF_NAME = MONO_TYPEDEF_.NAME
MONO_TYPEDEF_NAMESPACE = MONO_TYPEDEF_.NAMESPACE
MONO_TYPEDEF_EXTENDS = MONO_TYPEDEF_.EXTENDS
MONO_TYPEDEF_FIELD_LIST = MONO_TYPEDEF_.FIELD_LIST
MONO_TYPEDEF_METHOD_LIST = MONO_TYPEDEF_.METHOD_LIST
MONO_TYPEDEF_SIZE = MONO_TYPEDEF_.SIZE
class MONO_METHOD_(enum.IntEnum):
RVA = 0
IMPLFLAGS = enum.auto()
FLAGS = enum.auto()
NAME = enum.auto()
SIGNATURE = enum.auto()
PARAMLIST = enum.auto()
SIZE = enum.auto()
MONO_METHOD_RVA = MONO_METHOD_.RVA
MONO_METHOD_IMPLFLAGS = MONO_METHOD_.IMPLFLAGS
MONO_METHOD_FLAGS = MONO_METHOD_.FLAGS
MONO_METHOD_NAME = MONO_METHOD_.NAME
MONO_METHOD_SIGNATURE = MONO_METHOD_.SIGNATURE
MONO_METHOD_PARAMLIST = MONO_METHOD_.PARAMLIST
MONO_METHOD_SIZE = MONO_METHOD_.SIZE
class MonoTokenType(enum.IntEnum):
MODULE = 0x00000000
TYPE_REF = 0x01000000
TYPE_DEF = 0x02000000
FIELD_DEF = 0x04000000
METHOD_DEF = 0x06000000
PARAM_DEF = 0x08000000
INTERFACE_IMPL = 0x09000000
MEMBER_REF = 0x0a000000
CUSTOM_ATTRIBUTE = 0x0c000000
PERMISSION = 0x0e000000
SIGNATURE = 0x11000000
EVENT = 0x14000000
PROPERTY = 0x17000000
MODULE_REF = 0x1a000000
TYPE_SPEC = 0x1b000000
ASSEMBLY = 0x20000000
ASSEMBLY_REF = 0x23000000
FILE = 0x26000000
EXPORTED_TYPE = 0x27000000
MANIFEST_RESOURCE = 0x28000000
GENERIC_PARAM = 0x2a000000
METHOD_SPEC = 0x2b000000
# These do not match metadata tables directly
STRING = 0x70000000
NAME = 0x71000000
BASE_TYPE = 0x72000000
MONO_TOKEN_MODULE = MonoTokenType.MODULE
MONO_TOKEN_TYPE_REF = MonoTokenType.TYPE_REF
MONO_TOKEN_TYPE_DEF = MonoTokenType.TYPE_DEF
MONO_TOKEN_FIELD_DEF = MonoTokenType.FIELD_DEF
MONO_TOKEN_METHOD_DEF = MonoTokenType.METHOD_DEF
MONO_TOKEN_PARAM_DEF = MonoTokenType.PARAM_DEF
MONO_TOKEN_INTERFACE_IMPL = MonoTokenType.INTERFACE_IMPL
MONO_TOKEN_MEMBER_REF = MonoTokenType.MEMBER_REF
MONO_TOKEN_CUSTOM_ATTRIBUTE = MonoTokenType.CUSTOM_ATTRIBUTE
MONO_TOKEN_PERMISSION = MonoTokenType.PERMISSION
MONO_TOKEN_SIGNATURE = MonoTokenType.SIGNATURE
MONO_TOKEN_EVENT = MonoTokenType.EVENT
MONO_TOKEN_PROPERTY = MonoTokenType.PROPERTY
MONO_TOKEN_MODULE_REF = MonoTokenType.MODULE_REF
MONO_TOKEN_TYPE_SPEC = MonoTokenType.TYPE_SPEC
MONO_TOKEN_ASSEMBLY = MonoTokenType.ASSEMBLY
MONO_TOKEN_ASSEMBLY_REF = MonoTokenType.ASSEMBLY_REF
MONO_TOKEN_FILE = MonoTokenType.FILE
MONO_TOKEN_EXPORTED_TYPE = MonoTokenType.EXPORTED_TYPE
MONO_TOKEN_MANIFEST_RESOURCE = MonoTokenType.MANIFEST_RESOURCE
MONO_TOKEN_GENERIC_PARAM = MonoTokenType.GENERIC_PARAM
MONO_TOKEN_METHOD_SPEC = MonoTokenType.METHOD_SPEC
MONO_TOKEN_STRING = MonoTokenType.STRING
MONO_TOKEN_NAME = MonoTokenType.NAME
MONO_TOKEN_BASE_TYPE = MonoTokenType.BASE_TYPE
class MONO_FIELD_ATTR_(enum.IntEnum):
FIELD_ACCESS_MASK = 0x0007
COMPILER_CONTROLLED = 0x0000
PRIVATE = 0x0001
FAM_AND_ASSEM = 0x0002
ASSEMBLY = 0x0003
FAMILY = 0x0004
FAM_OR_ASSEM = 0x0005
PUBLIC = 0x0006
STATIC = 0x0010
INIT_ONLY = 0x0020
LITERAL = 0x0040
NOT_SERIALIZED = 0x0080
SPECIAL_NAME = 0x0200
PINVOKE_IMPL = 0x2000
# For runtime use only
RESERVED_MASK = 0x9500
RT_SPECIAL_NAME = 0x0400
HAS_MARSHAL = 0x1000
HAS_DEFAULT = 0x8000
HAS_RVA = 0x0100
MONO_FIELD_ATTR_FIELD_ACCESS_MASK = MONO_FIELD_ATTR_.FIELD_ACCESS_MASK
MONO_FIELD_ATTR_COMPILER_CONTROLLED = MONO_FIELD_ATTR_.COMPILER_CONTROLLED
MONO_FIELD_ATTR_PRIVATE = MONO_FIELD_ATTR_.PRIVATE
MONO_FIELD_ATTR_FAM_AND_ASSEM = MONO_FIELD_ATTR_.FAM_AND_ASSEM
MONO_FIELD_ATTR_ASSEMBLY = MONO_FIELD_ATTR_.ASSEMBLY
MONO_FIELD_ATTR_FAMILY = MONO_FIELD_ATTR_.FAMILY
MONO_FIELD_ATTR_FAM_OR_ASSEM = MONO_FIELD_ATTR_.FAM_OR_ASSEM
MONO_FIELD_ATTR_PUBLIC = MONO_FIELD_ATTR_.PUBLIC
MONO_FIELD_ATTR_STATIC = MONO_FIELD_ATTR_.STATIC
MONO_FIELD_ATTR_INIT_ONLY = MONO_FIELD_ATTR_.INIT_ONLY
MONO_FIELD_ATTR_LITERAL = MONO_FIELD_ATTR_.LITERAL
MONO_FIELD_ATTR_NOT_SERIALIZED = MONO_FIELD_ATTR_.NOT_SERIALIZED
MONO_FIELD_ATTR_SPECIAL_NAME = MONO_FIELD_ATTR_.SPECIAL_NAME
MONO_FIELD_ATTR_PINVOKE_IMPL = MONO_FIELD_ATTR_.PINVOKE_IMPL
MONO_FIELD_ATTR_RESERVED_MASK = MONO_FIELD_ATTR_.RESERVED_MASK
MONO_FIELD_ATTR_RT_SPECIAL_NAME = MONO_FIELD_ATTR_.RT_SPECIAL_NAME
MONO_FIELD_ATTR_HAS_MARSHAL = MONO_FIELD_ATTR_.HAS_MARSHAL
MONO_FIELD_ATTR_HAS_DEFAULT = MONO_FIELD_ATTR_.HAS_DEFAULT
MONO_FIELD_ATTR_HAS_RVA = MONO_FIELD_ATTR_.HAS_RVA
| 11,950 | Python | .py | 310 | 35.490323 | 76 | 0.773463 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,077 | __init__.py | nyaoouo_NyLib2/nylib/mono/defines/__init__.py | import ctypes
from nylib import winapi
from .enums import *
class _MonoObj:
def __new__(cls, ptr):
if not ptr:
return None
return super().__new__(cls)
def __init__(self, ptr):
self.ptr = ptr
def __eq__(self, other):
if isinstance(other, _MonoObj):
return self.ptr == other.ptr
return False
def __str__(self):
if hasattr(self, 'name'):
return f"{self.__class__.__name__}({self.name})"
return f"{self.__class__.__name__}({self.ptr:#x})"
class _MonoApi:
_cached_ = {}
def __new__(cls, mono_handle, function, argtypes=None, restype=None):
key = (mono_handle, function)
if key in cls._cached_:
return cls._cached_[key]
else:
obj = super().__new__(cls)
cls._cached_[key] = obj
return obj
def __init__(self, mono_handle, function, argtypes=None, restype=None):
if hasattr(self, "mono_handle"): return
self.mono_handle = mono_handle
self.function = function
if argtypes is None: argtypes = ()
try:
self.func_ptr = winapi.GetProcAddress(mono_handle, function)
except OSError:
self.func_ptr = None
self.c_func = None
else:
if restype is ctypes.c_void_p: restype = ctypes.c_size_t # auto cast to pyint
self.c_func = ctypes.CFUNCTYPE(restype, *argtypes)(self.func_ptr)
def __bool__(self):
return bool(self.func_ptr)
def __call__(self, *args):
if self.c_func is None:
raise OSError(f"Function {self.function} not found in mono")
return self.c_func(*args)
MonoDomainFunc = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], None)
GFunc = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], None)
G_FREE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], None)
MONO_GET_ROOT_DOMAIN = lambda h, f: _MonoApi(h, f, [], ctypes.c_void_p)
MONO_THREAD_ATTACH = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_THREAD_DETACH = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], None)
MONO_THREAD_CLEANUP = lambda h, f: _MonoApi(h, f, [], None)
MONO_OBJECT_GET_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_DOMAIN_FOREACH = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], None)
MONO_DOMAIN_SET = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_bool], ctypes.c_int)
MONO_DOMAIN_GET = lambda h, f: _MonoApi(h, f, [], ctypes.c_void_p)
MONO_ASSEMBLY_FOREACH = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_int)
MONO_ASSEMBLY_GET_IMAGE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_ASSEMBLY_OPEN = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)], ctypes.c_void_p)
MONO_IMAGE_GET_ASSEMBLY = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_IMAGE_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_IMAGE_OPEN = lambda h, f: _MonoApi(h, f, [ctypes.c_char_p, ctypes.POINTER(ctypes.c_int)], ctypes.c_void_p)
MONO_IMAGE_GET_FILENAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_IMAGE_GET_TABLE_INFO = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_void_p)
MONO_TABLE_INFO_GET_ROWS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_METADATA_DECODE_ROW_COL = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int, ctypes.c_uint], ctypes.c_int)
MONO_METADATA_STRING_HEAP = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_uint], ctypes.c_char_p)
MONO_CLASS_FROM_NAME_CASE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p], ctypes.c_void_p)
MONO_CLASS_FROM_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p], ctypes.c_void_p)
MONO_CLASS_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_CLASS_GET_NAMESPACE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_CLASS_GET = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_uint], ctypes.c_void_p)
MONO_CLASS_FROM_TYPEREF = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_uint], ctypes.c_void_p)
MONO_CLASS_NAME_FROM_TOKEN = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_uint], ctypes.c_char_p)
MONO_CLASS_GET_METHODS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_METHOD_FROM_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int], ctypes.c_void_p)
MONO_CLASS_GET_FIELDS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_FIELD_FROM_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p], ctypes.c_void_p)
MONO_CLASS_GET_INTERFACES = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_PROPERTIES = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_PROPERTY_FROM_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p], ctypes.c_void_p)
MONO_CLASS_GET_EVENTS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_PARENT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_IMAGE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_VTABLE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_INSTANCE_SIZE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_CLASS_FROM_MONO_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_ELEMENT_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_IS_GENERIC = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_CLASS_IS_ENUM = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_bool)
MONO_CLASS_IS_VALUETYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_bool)
MONO_CLASS_IS_SUBCLASS_OF = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_bool], ctypes.c_bool)
MONO_CLASS_NUM_FIELDS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_CLASS_NUM_METHODS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_FIELD_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_FIELD_GET_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_FIELD_GET_PARENT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_FIELD_GET_OFFSET = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_FIELD_GET_FLAGS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_FIELD_GET_VALUE_OBJECT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_PROPERTY_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_PROPERTY_GET_GET_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_PROPERTY_GET_SET_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_PROPERTY_GET_PARENT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_PROPERTY_GET_FLAGS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_TYPE_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_TYPE_GET_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_TYPE_GET_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_TYPE_IS_BYREF = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_TYPE_GET_OBJECT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_TYPE_GET_OBJECT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_GET_OBJECT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_METHOD_GET_OBJECT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_PTR_GET_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_TYPE_GET_PTR_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_TYPE_GET_NAME_FULL = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_char_p)
MONO_TYPE_IS_STRUCT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_bool)
MONO_METHOD_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_METHOD_GET_FULL_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_COMPILE_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_FREE_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], None)
MONO_JIT_INFO_TABLE_FIND = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_JIT_INFO_GET_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_JIT_INFO_GET_CODE_START = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_JIT_INFO_GET_CODE_SIZE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_JIT_EXEC = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p)], ctypes.c_int)
MONO_METHOD_GET_FLAGS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint32)], ctypes.c_uint32)
MONO_METHOD_GET_HEADER = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_GET_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_SIG = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_GET_PARAM_NAMES = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)], ctypes.c_void_p)
MONO_METHOD_HEADER_GET_CODE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_uint32)], ctypes.c_void_p)
MONO_DISASM_CODE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_char_p)
MONO_SIGNATURE_GET_DESC = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_char_p)
MONO_SIGNATURE_GET_PARAMS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)], ctypes.c_void_p)
MONO_SIGNATURE_GET_PARAM_COUNT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_SIGNATURE_GET_RETURN_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_IMAGE_RVA_MAP = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_uint32], ctypes.c_void_p)
MONO_VTABLE_GET_STATIC_FIELD_DATA = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_DESC_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_char_p, ctypes.c_int], ctypes.c_void_p)
MONO_METHOD_DESC_FROM_METHOD = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_METHOD_DESC_FREE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], None)
MONO_ASSEMBLY_NAME_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_char_p], ctypes.c_void_p)
MONO_ASSEMBLY_LOADED = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_IMAGE_LOADED = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_STRING_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_char_p], ctypes.c_void_p)
MONO_STRING_TO_UTF8 = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
MONO_ARRAY_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint)], ctypes.c_void_p)
IL2CPP_ARRAY_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint)], ctypes.c_void_p)
MONO_ARRAY_ELEMENT_SIZE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_CLASS_GET_RANK = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
MONO_OBJECT_TO_STRING = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)], ctypes.c_void_p)
MONO_OBJECT_NEW = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_FREE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], None)
MONO_METHOD_DESC_SEARCH_IN_IMAGE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_RUNTIME_INVOKE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_void_p)], ctypes.c_void_p)
MONO_RUNTIME_INVOKE_ARRAY = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)], ctypes.c_void_p)
MONO_RUNTIME_OBJECT_INIT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_FIELD_STATIC_GET_VALUE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_FIELD_STATIC_SET_VALUE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_FIELD_STATIC_GET_VALUE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_FIELD_STATIC_SET_VALUE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_VALUE_BOX = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_OBJECT_UNBOX = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_OBJECT_ISINST = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_GET_ENUM_CLASS = lambda h, f: _MonoApi(h, f, [], ctypes.c_void_p)
MONO_CLASS_GET_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_NESTING_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
MONO_CLASS_GET_NESTED_TYPES = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_void_p], ctypes.c_void_p)
MONO_RUNTIME_IS_SHUTTING_DOWN = lambda h, f: _MonoApi(h, f, [], ctypes.c_int)
# il2cpp:
IL2CPP_DOMAIN_GET_ASSEMBLIES = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.POINTER(ctypes.c_size_t)], ctypes.POINTER(ctypes.POINTER(ctypes.c_uint)))
IL2CPP_IMAGE_GET_CLASS_COUNT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
IL2CPP_IMAGE_GET_CLASS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_void_p)
IL2CPP_TYPE_GET_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
IL2CPP_TYPE_GET_ASSEMBLY_QUALIFIED_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_char_p)
IL2CPP_METHOD_GET_PARAM_COUNT = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_int)
IL2CPP_METHOD_GET_PARAM_NAME = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_char_p)
IL2CPP_METHOD_GET_PARAM = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p, ctypes.c_int], ctypes.c_void_p)
IL2CPP_METHOD_GET_RETURN_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_CLASS_FROM_TYPE = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_void_p)
IL2CPP_STRING_CHARS = lambda h, f: _MonoApi(h, f, [ctypes.c_void_p], ctypes.c_wchar_p)
def _find_mono():
from ...process import Process
for ldr in Process.current.enum_ldr_data():
handle = ldr.DllBase
try:
if winapi.GetProcAddress(handle, b"mono_thread_attach"):
return handle, False
except OSError:
pass
try:
if winapi.GetProcAddress(handle, b"il2cpp_thread_attach"):
return handle, True
except OSError:
pass
else:
raise OSError("mono.dll not found in this process")
class MonoApi:
instance: 'MonoApi'
@classmethod
def get_instance(cls):
if not hasattr(cls, 'instance'):
cls.instance = cls()
return cls.instance
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self, mono_handle=None, is_il2cpp=False):
if mono_handle is None:
try:
self.mono_handle = winapi.GetModuleHandle("mono.dll")
except OSError:
self.mono_handle, self.is_il2cpp = _find_mono()
else:
self.is_il2cpp = False
else:
self.mono_handle = mono_handle
self.is_il2cpp = is_il2cpp
if self.is_il2cpp:
self._init_il2cpp()
else:
self._init_mono()
def _init_il2cpp(self):
self.g_free = (G_FREE(self.mono_handle, b"g_free") or
G_FREE(self.mono_handle, b"il2cpp_free") or
(lambda ptr: None)) # if all else fails, do nothing
self.mono_free = MONO_FREE(self.mono_handle, b"il2cpp_free")
self.mono_get_root_domain = (MONO_GET_ROOT_DOMAIN(self.mono_handle, b"il2cpp_get_root_domain") or
MONO_GET_ROOT_DOMAIN(self.mono_handle, b"mono_get_root_domain"))
self.mono_thread_attach = MONO_THREAD_ATTACH(self.mono_handle, b"il2cpp_thread_attach")
self.mono_thread_detach = MONO_THREAD_DETACH(self.mono_handle, b"il2cpp_thread_detach")
self.mono_object_get_class = MONO_OBJECT_GET_CLASS(self.mono_handle, b"il2cpp_object_get_class")
self.mono_domain_foreach = MONO_DOMAIN_FOREACH(self.mono_handle, b"il2cpp_domain_foreach")
self.mono_domain_set = MONO_DOMAIN_SET(self.mono_handle, b"il2cpp_domain_set")
self.mono_domain_get = MONO_DOMAIN_GET(self.mono_handle, b"il2cpp_domain_get")
self.mono_assembly_foreach = MONO_ASSEMBLY_FOREACH(self.mono_handle, b"il2cpp_assembly_foreach")
self.mono_assembly_get_image = MONO_ASSEMBLY_GET_IMAGE(self.mono_handle, b"il2cpp_assembly_get_image")
self.mono_image_get_assembly = MONO_IMAGE_GET_ASSEMBLY(self.mono_handle, b"il2cpp_image_get_assembly")
self.mono_image_get_name = MONO_IMAGE_GET_NAME(self.mono_handle, b"il2cpp_image_get_name")
self.mono_image_get_table_info = MONO_IMAGE_GET_TABLE_INFO(self.mono_handle, b"il2cpp_image_get_table_info")
self.mono_image_rva_map = MONO_IMAGE_RVA_MAP(self.mono_handle, b"il2cpp_image_rva_map")
self.mono_table_info_get_rows = MONO_TABLE_INFO_GET_ROWS(self.mono_handle, b"il2cpp_table_info_get_rows")
self.mono_metadata_decode_row_col = MONO_METADATA_DECODE_ROW_COL(self.mono_handle, b"il2cpp_metadata_decode_row_col")
self.mono_metadata_string_heap = MONO_METADATA_STRING_HEAP(self.mono_handle, b"il2cpp_metadata_string_heap")
self.mono_class_get = MONO_CLASS_GET(self.mono_handle, b"il2cpp_class_get")
self.mono_class_from_typeref = MONO_CLASS_FROM_TYPEREF(self.mono_handle, b"il2cpp_class_from_typeref")
self.mono_class_name_from_token = MONO_CLASS_NAME_FROM_TOKEN(self.mono_handle, b"il2cpp_class_name_from_token")
self.mono_class_from_name_case = MONO_CLASS_FROM_NAME_CASE(self.mono_handle, b"il2cpp_class_from_name_case")
self.mono_class_from_name = MONO_CLASS_FROM_NAME_CASE(self.mono_handle, b"il2cpp_class_from_name")
self.mono_class_get_name = MONO_CLASS_GET_NAME(self.mono_handle, b"il2cpp_class_get_name")
self.mono_class_get_namespace = MONO_CLASS_GET_NAMESPACE(self.mono_handle, b"il2cpp_class_get_namespace")
self.mono_class_get_methods = MONO_CLASS_GET_METHODS(self.mono_handle, b"il2cpp_class_get_methods")
self.mono_class_get_method_from_name = MONO_CLASS_GET_METHOD_FROM_NAME(self.mono_handle, b"il2cpp_class_get_method_from_name")
self.mono_class_get_fields = MONO_CLASS_GET_FIELDS(self.mono_handle, b"il2cpp_class_get_fields")
self.mono_class_get_field_from_name = MONO_CLASS_GET_FIELD_FROM_NAME(self.mono_handle, b"il2cpp_class_get_field_from_name")
self.mono_class_get_interfaces = MONO_CLASS_GET_INTERFACES(self.mono_handle, b"il2cpp_class_get_interfaces")
self.mono_class_get_properties = MONO_CLASS_GET_PROPERTIES(self.mono_handle, b"il2cpp_class_get_properties")
self.mono_class_get_property_from_name = MONO_CLASS_GET_PROPERTY_FROM_NAME(self.mono_handle, b"il2cpp_class_get_property_from_name")
self.mono_class_get_parent = MONO_CLASS_GET_PARENT(self.mono_handle, b"il2cpp_class_get_parent")
self.mono_class_get_image = MONO_CLASS_GET_IMAGE(self.mono_handle, b"il2cpp_class_get_image")
self.mono_class_is_generic = MONO_CLASS_IS_GENERIC(self.mono_handle, b"il2cpp_class_is_generic")
self.mono_class_is_enum = MONO_CLASS_IS_ENUM(self.mono_handle, b"il2cpp_class_is_enum")
self.mono_class_is_valuetype = MONO_CLASS_IS_VALUETYPE(self.mono_handle, b"il2cpp_class_is_valuetype")
self.mono_class_is_subclass_of = MONO_CLASS_IS_SUBCLASS_OF(self.mono_handle, b"il2cpp_class_is_subclass_of")
self.mono_class_vtable = MONO_CLASS_VTABLE(self.mono_handle, b"il2cpp_class_vtable")
self.mono_class_from_mono_type = MONO_CLASS_FROM_MONO_TYPE(self.mono_handle, b"il2cpp_class_from_mono_type")
self.mono_class_get_element_class = MONO_CLASS_GET_ELEMENT_CLASS(self.mono_handle, b"il2cpp_class_get_element_class")
self.mono_class_instance_size = MONO_CLASS_INSTANCE_SIZE(self.mono_handle, b"il2cpp_class_instance_size")
self.mono_class_num_fields = MONO_CLASS_NUM_FIELDS(self.mono_handle, b"il2cpp_class_num_fields")
self.mono_class_num_methods = MONO_CLASS_NUM_METHODS(self.mono_handle, b"il2cpp_class_num_methods")
self.mono_field_get_name = MONO_FIELD_GET_NAME(self.mono_handle, b"il2cpp_field_get_name")
self.mono_field_get_type = MONO_FIELD_GET_TYPE(self.mono_handle, b"il2cpp_field_get_type")
self.mono_field_get_parent = MONO_FIELD_GET_PARENT(self.mono_handle, b"il2cpp_field_get_parent")
self.mono_field_get_offset = MONO_FIELD_GET_OFFSET(self.mono_handle, b"il2cpp_field_get_offset")
self.mono_field_get_flags = MONO_FIELD_GET_FLAGS(self.mono_handle, b"il2cpp_field_get_flags")
self.mono_field_get_value_object = MONO_FIELD_GET_VALUE_OBJECT(self.mono_handle, b"il2cpp_field_get_value_object")
self.mono_property_get_name = MONO_PROPERTY_GET_NAME(self.mono_handle, b"il2cpp_property_get_name")
self.mono_property_get_get_method = MONO_PROPERTY_GET_GET_METHOD(self.mono_handle, b"il2cpp_property_get_get_method")
self.mono_property_get_set_method = MONO_PROPERTY_GET_SET_METHOD(self.mono_handle, b"il2cpp_property_get_set_method")
self.mono_property_get_parent = MONO_PROPERTY_GET_PARENT(self.mono_handle, b"il2cpp_property_get_parent")
self.mono_property_get_flags = MONO_PROPERTY_GET_FLAGS(self.mono_handle, b"il2cpp_property_get_flags")
self.mono_type_get_name = MONO_TYPE_GET_NAME(self.mono_handle, b"il2cpp_type_get_name")
self.mono_type_get_type = MONO_TYPE_GET_TYPE(self.mono_handle, b"il2cpp_type_get_type")
self.mono_type_get_name_full = MONO_TYPE_GET_NAME_FULL(self.mono_handle, b"il2cpp_type_get_name_full")
self.mono_type_is_byref = MONO_TYPE_IS_BYREF(self.mono_handle, b"il2cpp_type_is_byref")
self.il2cpp_type_get_object = IL2CPP_TYPE_GET_OBJECT(self.mono_handle, b"il2cpp_type_get_object")
self.il2cpp_method_get_object = IL2CPP_METHOD_GET_OBJECT(self.mono_handle, b"il2cpp_method_get_object")
self.mono_method_get_name = MONO_METHOD_GET_NAME(self.mono_handle, b"il2cpp_method_get_name")
self.mono_method_get_full_name = (MONO_METHOD_GET_FULL_NAME(self.mono_handle, b"il2cpp_method_get_full_name") or
MONO_METHOD_GET_FULL_NAME(self.mono_handle, b"mono_method_full_name") or
MONO_METHOD_GET_FULL_NAME(self.mono_handle, b"mono_method_get_full_name"))
self.mono_method_get_class = MONO_METHOD_GET_CLASS(self.mono_handle, b"il2cpp_method_get_class")
self.mono_method_get_header = MONO_METHOD_GET_HEADER(self.mono_handle, b"il2cpp_method_get_header")
self.mono_method_get_flags = MONO_METHOD_GET_FLAGS(self.mono_handle, b"il2cpp_method_get_flags")
self.mono_method_signature = MONO_METHOD_SIG(self.mono_handle, b"il2cpp_method_signature")
self.mono_method_get_param_names = MONO_METHOD_GET_PARAM_NAMES(self.mono_handle, b"il2cpp_method_get_param_names")
self.mono_signature_get_desc = MONO_SIGNATURE_GET_DESC(self.mono_handle, b"il2cpp_signature_get_desc")
self.mono_signature_get_params = MONO_SIGNATURE_GET_PARAMS(self.mono_handle, b"il2cpp_signature_get_params")
self.mono_signature_get_param_count = MONO_SIGNATURE_GET_PARAM_COUNT(self.mono_handle, b"il2cpp_signature_get_param_count")
self.mono_signature_get_return_type = MONO_SIGNATURE_GET_RETURN_TYPE(self.mono_handle, b"il2cpp_signature_get_return_type")
self.mono_compile_method = MONO_COMPILE_METHOD(self.mono_handle, b"il2cpp_compile_method")
self.mono_free_method = MONO_FREE_METHOD(self.mono_handle, b"il2cpp_free_method")
self.mono_jit_info_table_find = MONO_JIT_INFO_TABLE_FIND(self.mono_handle, b"il2cpp_jit_info_table_find")
self.mono_jit_info_get_method = MONO_JIT_INFO_GET_METHOD(self.mono_handle, b"il2cpp_jit_info_get_method")
self.mono_jit_info_get_code_start = MONO_JIT_INFO_GET_CODE_START(self.mono_handle, b"il2cpp_jit_info_get_code_start")
self.mono_jit_info_get_code_size = MONO_JIT_INFO_GET_CODE_SIZE(self.mono_handle, b"il2cpp_jit_info_get_code_size")
self.mono_jit_exec = MONO_JIT_EXEC(self.mono_handle, b"il2cpp_jit_exec")
self.mono_method_header_get_code = MONO_METHOD_HEADER_GET_CODE(self.mono_handle, b"il2cpp_method_header_get_code")
self.mono_disasm_code = MONO_DISASM_CODE(self.mono_handle, b"il2cpp_disasm_code")
self.mono_vtable_get_static_field_data = MONO_VTABLE_GET_STATIC_FIELD_DATA(self.mono_handle, b"il2cpp_vtable_get_static_field_data")
self.mono_method_desc_new = MONO_METHOD_DESC_NEW(self.mono_handle, b"il2cpp_method_desc_new")
self.mono_method_desc_from_method = MONO_METHOD_DESC_FROM_METHOD(self.mono_handle, b"il2cpp_method_desc_from_method")
self.mono_method_desc_free = MONO_METHOD_DESC_FREE(self.mono_handle, b"il2cpp_method_desc_free")
self.mono_string_new = MONO_STRING_NEW(self.mono_handle, b"mono_string_new") or MONO_STRING_NEW(self.mono_handle, b"il2cpp_string_new") # il2cpp also has b"mono_string_new". The il2cpp_string_new is a different function
self.mono_string_to_utf8 = MONO_STRING_TO_UTF8(self.mono_handle, b"il2cpp_string_to_utf8")
self.il2cpp_array_new = IL2CPP_ARRAY_NEW(self.mono_handle, b"il2cpp_array_new")
self.mono_array_element_size = MONO_ARRAY_ELEMENT_SIZE(self.mono_handle, b"il2cpp_array_element_size")
self.mono_class_get_rank = MONO_CLASS_GET_RANK(self.mono_handle, b"il2cpp_class_get_rank")
self.mono_value_box = MONO_VALUE_BOX(self.mono_handle, b"il2cpp_value_box")
self.mono_object_unbox = MONO_OBJECT_UNBOX(self.mono_handle, b"il2cpp_object_unbox")
self.mono_object_new = MONO_OBJECT_NEW(self.mono_handle, b"il2cpp_object_new")
self.mono_object_to_string = MONO_OBJECT_TO_STRING(self.mono_handle, b"il2cpp_object_to_string")
self.mono_class_get_type = MONO_CLASS_GET_TYPE(self.mono_handle, b"il2cpp_class_get_type")
self.mono_type_get_class = MONO_TYPE_GET_CLASS(self.mono_handle, b"il2cpp_type_get_class") or MONO_TYPE_GET_CLASS(self.mono_handle, b"il2cpp_type_get_class_or_element_class")
self.mono_method_desc_search_in_image = MONO_METHOD_DESC_SEARCH_IN_IMAGE(self.mono_handle, b"il2cpp_method_desc_search_in_image")
self.mono_runtime_invoke = MONO_RUNTIME_INVOKE(self.mono_handle, b"il2cpp_runtime_invoke")
self.mono_runtime_object_init = MONO_RUNTIME_OBJECT_INIT(self.mono_handle, b"il2cpp_runtime_object_init")
self.mono_ptr_class_get = MONO_PTR_GET_CLASS(self.mono_handle, b"il2cpp_ptr_class_get") or MONO_PTR_GET_CLASS(self.mono_handle, b"mono_ptr_class_get")
self.mono_type_get_ptr_type = MONO_PTR_GET_CLASS(self.mono_handle, b"il2cpp_type_get_ptr_type") or MONO_PTR_GET_CLASS(self.mono_handle, b"mono_type_get_ptr_type")
self.mono_assembly_name_new = MONO_ASSEMBLY_NAME_NEW(self.mono_handle, b"il2cpp_assembly_name_new")
self.mono_assembly_loaded = MONO_ASSEMBLY_LOADED(self.mono_handle, b"il2cpp_assembly_loaded")
self.mono_assembly_open = MONO_ASSEMBLY_OPEN(self.mono_handle, b"il2cpp_assembly_open")
self.mono_image_open = MONO_IMAGE_OPEN(self.mono_handle, b"il2cpp_image_open")
self.mono_image_get_filename = MONO_IMAGE_GET_FILENAME(self.mono_handle, b"il2cpp_image_get_filename")
self.mono_class_get_nesting_type = MONO_CLASS_GET_NESTING_TYPE(self.mono_handle, b"mono_class_get_nesting_type") or MONO_CLASS_GET_NESTING_TYPE(self.mono_handle, b"il2cpp_class_get_nesting_type")
self.mono_class_get_nested_types = MONO_CLASS_GET_NESTED_TYPES(self.mono_handle, b"mono_class_get_nested_types") or MONO_CLASS_GET_NESTED_TYPES(self.mono_handle, b"l2cpp_class_get_nested_types")
self.il2cpp_field_static_get_value = IL2CPP_FIELD_STATIC_GET_VALUE(self.mono_handle, b"il2cpp_field_static_get_value")
self.il2cpp_field_static_set_value = IL2CPP_FIELD_STATIC_SET_VALUE(self.mono_handle, b"il2cpp_field_static_set_value")
self.il2cpp_domain_get_assemblies = IL2CPP_DOMAIN_GET_ASSEMBLIES(self.mono_handle, b"il2cpp_domain_get_assemblies")
self.il2cpp_image_get_class_count = IL2CPP_IMAGE_GET_CLASS_COUNT(self.mono_handle, b"il2cpp_image_get_class_count")
self.il2cpp_image_get_class = IL2CPP_IMAGE_GET_CLASS(self.mono_handle, b"il2cpp_image_get_class")
self.il2cpp_type_get_name = IL2CPP_TYPE_GET_NAME(self.mono_handle, b"il2cpp_type_get_name")
self.il2cpp_type_get_assembly_qualified_name = IL2CPP_TYPE_GET_ASSEMBLY_QUALIFIED_NAME(self.mono_handle, b"il2cpp_type_get_assembly_qualified_name")
self.il2cpp_method_get_param_count = IL2CPP_METHOD_GET_PARAM_COUNT(self.mono_handle, b"il2cpp_method_get_param_count")
self.il2cpp_method_get_param_name = IL2CPP_METHOD_GET_PARAM_NAME(self.mono_handle, b"il2cpp_method_get_param_name")
self.il2cpp_method_get_param = IL2CPP_METHOD_GET_PARAM(self.mono_handle, b"il2cpp_method_get_param")
self.il2cpp_method_get_return_type = IL2CPP_METHOD_GET_RETURN_TYPE(self.mono_handle, b"il2cpp_method_get_return_type")
self.il2cpp_class_from_type = IL2CPP_CLASS_FROM_TYPE(self.mono_handle, b"il2cpp_class_from_type")
self.il2cpp_string_chars = IL2CPP_STRING_CHARS(self.mono_handle, b"il2cpp_string_chars")
self.mono_runtime_is_shutting_down = MONO_RUNTIME_IS_SHUTTING_DOWN(self.mono_handle, b"il2cpp_runtime_is_shutting_down") or MONO_RUNTIME_IS_SHUTTING_DOWN(self.mono_handle, b"mono_runtime_is_shutting_down")
def _init_mono(self):
self.g_free = (G_FREE(self.mono_handle, b"g_free") or
G_FREE(self.mono_handle, b"mono_unity_g_free") or
(lambda ptr: None)) # if all else fails, do nothing
self.mono_free = MONO_FREE(self.mono_handle, b"mono_free")
self.mono_get_root_domain = MONO_GET_ROOT_DOMAIN(self.mono_handle, b"mono_get_root_domain")
self.mono_thread_attach = MONO_THREAD_ATTACH(self.mono_handle, b"mono_thread_attach")
self.mono_thread_detach = MONO_THREAD_DETACH(self.mono_handle, b"mono_thread_detach")
self.mono_thread_cleanup = MONO_THREAD_CLEANUP(self.mono_handle, b"mono_thread_cleanup")
self.mono_object_get_class = MONO_OBJECT_GET_CLASS(self.mono_handle, b"mono_object_get_class")
self.mono_domain_foreach = MONO_DOMAIN_FOREACH(self.mono_handle, b"mono_domain_foreach")
self.mono_domain_set = MONO_DOMAIN_SET(self.mono_handle, b"mono_domain_set")
self.mono_domain_get = MONO_DOMAIN_GET(self.mono_handle, b"mono_domain_get")
self.mono_assembly_foreach = MONO_ASSEMBLY_FOREACH(self.mono_handle, b"mono_assembly_foreach")
self.mono_assembly_get_image = MONO_ASSEMBLY_GET_IMAGE(self.mono_handle, b"mono_assembly_get_image")
self.mono_image_get_assembly = MONO_IMAGE_GET_ASSEMBLY(self.mono_handle, b"mono_image_get_assembly")
self.mono_image_get_name = MONO_IMAGE_GET_NAME(self.mono_handle, b"mono_image_get_name")
self.mono_image_get_filename = MONO_IMAGE_GET_FILENAME(self.mono_handle, b"mono_image_get_filename")
self.mono_image_get_table_info = MONO_IMAGE_GET_TABLE_INFO(self.mono_handle, b"mono_image_get_table_info")
self.mono_image_rva_map = MONO_IMAGE_RVA_MAP(self.mono_handle, b"mono_image_rva_map")
self.mono_table_info_get_rows = MONO_TABLE_INFO_GET_ROWS(self.mono_handle, b"mono_table_info_get_rows")
self.mono_metadata_decode_row_col = MONO_METADATA_DECODE_ROW_COL(self.mono_handle, b"mono_metadata_decode_row_col")
self.mono_metadata_string_heap = MONO_METADATA_STRING_HEAP(self.mono_handle, b"mono_metadata_string_heap")
self.mono_class_get = MONO_CLASS_GET(self.mono_handle, b"mono_class_get")
self.mono_class_from_typeref = MONO_CLASS_FROM_TYPEREF(self.mono_handle, b"mono_class_from_typeref")
self.mono_class_name_from_token = MONO_CLASS_NAME_FROM_TOKEN(self.mono_handle, b"mono_class_name_from_token")
self.mono_class_from_name_case = MONO_CLASS_FROM_NAME_CASE(self.mono_handle, b"mono_class_from_name_case")
self.mono_class_from_name = MONO_CLASS_FROM_NAME_CASE(self.mono_handle, b"mono_class_from_name")
self.mono_class_get_name = MONO_CLASS_GET_NAME(self.mono_handle, b"mono_class_get_name")
self.mono_class_get_namespace = MONO_CLASS_GET_NAMESPACE(self.mono_handle, b"mono_class_get_namespace")
self.mono_class_get_methods = MONO_CLASS_GET_METHODS(self.mono_handle, b"mono_class_get_methods")
self.mono_class_get_method_from_name = MONO_CLASS_GET_METHOD_FROM_NAME(self.mono_handle, b"mono_class_get_method_from_name")
self.mono_class_get_fields = MONO_CLASS_GET_FIELDS(self.mono_handle, b"mono_class_get_fields")
self.mono_class_get_field_from_name = MONO_CLASS_GET_FIELD_FROM_NAME(self.mono_handle, b"mono_class_get_field_from_name")
self.mono_class_get_interfaces = MONO_CLASS_GET_INTERFACES(self.mono_handle, b"mono_class_get_interfaces")
self.mono_class_get_properties = MONO_CLASS_GET_PROPERTIES(self.mono_handle, b"mono_class_get_properties")
self.mono_class_get_property_from_name = MONO_CLASS_GET_PROPERTY_FROM_NAME(self.mono_handle, b"mono_class_get_property_from_name")
self.mono_class_get_parent = MONO_CLASS_GET_PARENT(self.mono_handle, b"mono_class_get_parent")
self.mono_class_get_image = MONO_CLASS_GET_IMAGE(self.mono_handle, b"mono_class_get_image")
self.mono_class_is_generic = MONO_CLASS_IS_GENERIC(self.mono_handle, b"mono_class_is_generic")
self.mono_class_is_enum = MONO_CLASS_IS_ENUM(self.mono_handle, b"mono_class_is_enum")
self.mono_class_is_valuetype = MONO_CLASS_IS_VALUETYPE(self.mono_handle, b"mono_class_is_valuetype")
self.mono_class_is_subclass_of = MONO_CLASS_IS_SUBCLASS_OF(self.mono_handle, b"mono_class_is_subclass_of")
self.mono_class_vtable = MONO_CLASS_VTABLE(self.mono_handle, b"mono_class_vtable")
self.mono_class_from_mono_type = MONO_CLASS_FROM_MONO_TYPE(self.mono_handle, b"mono_class_from_mono_type")
self.mono_class_get_element_class = MONO_CLASS_GET_ELEMENT_CLASS(self.mono_handle, b"mono_class_get_element_class")
self.mono_class_instance_size = MONO_CLASS_INSTANCE_SIZE(self.mono_handle, b"mono_class_instance_size")
self.mono_class_num_fields = MONO_CLASS_NUM_FIELDS(self.mono_handle, b"mono_class_num_fields")
self.mono_class_num_methods = MONO_CLASS_NUM_METHODS(self.mono_handle, b"mono_class_num_methods")
self.mono_field_get_name = MONO_FIELD_GET_NAME(self.mono_handle, b"mono_field_get_name")
self.mono_field_get_type = MONO_FIELD_GET_TYPE(self.mono_handle, b"mono_field_get_type")
self.mono_field_get_parent = MONO_FIELD_GET_PARENT(self.mono_handle, b"mono_field_get_parent")
self.mono_field_get_offset = MONO_FIELD_GET_OFFSET(self.mono_handle, b"mono_field_get_offset")
self.mono_field_get_flags = MONO_FIELD_GET_FLAGS(self.mono_handle, b"mono_field_get_flags")
self.mono_field_get_value_object = MONO_FIELD_GET_VALUE_OBJECT(self.mono_handle, b"mono_field_get_value_object")
self.mono_property_get_name = MONO_PROPERTY_GET_NAME(self.mono_handle, b"mono_property_get_name")
self.mono_property_get_get_method = MONO_PROPERTY_GET_GET_METHOD(self.mono_handle, b"mono_property_get_get_method")
self.mono_property_get_set_method = MONO_PROPERTY_GET_SET_METHOD(self.mono_handle, b"mono_property_get_set_method")
self.mono_property_get_parent = MONO_PROPERTY_GET_PARENT(self.mono_handle, b"mono_property_get_parent")
self.mono_property_get_flags = MONO_PROPERTY_GET_FLAGS(self.mono_handle, b"mono_property_get_flags")
self.mono_type_get_name = MONO_TYPE_GET_NAME(self.mono_handle, b"mono_type_get_name")
self.mono_type_get_type = MONO_TYPE_GET_TYPE(self.mono_handle, b"mono_type_get_type")
self.mono_type_get_object = MONO_TYPE_GET_OBJECT(self.mono_handle, b"mono_type_get_object")
self.mono_type_get_name_full = MONO_TYPE_GET_NAME_FULL(self.mono_handle, b"mono_type_get_name_full")
self.mono_type_is_byref = MONO_TYPE_IS_BYREF(self.mono_handle, b"mono_type_is_byref")
self.mono_method_get_object = MONO_METHOD_GET_OBJECT(self.mono_handle, b"mono_method_get_object")
self.mono_method_get_name = MONO_METHOD_GET_NAME(self.mono_handle, b"mono_method_get_name")
self.mono_method_get_full_name = MONO_METHOD_GET_FULL_NAME(self.mono_handle, b"mono_method_get_full_name")
self.mono_method_get_class = MONO_METHOD_GET_CLASS(self.mono_handle, b"mono_method_get_class")
self.mono_method_get_header = MONO_METHOD_GET_HEADER(self.mono_handle, b"mono_method_get_header")
self.mono_method_get_flags = MONO_METHOD_GET_FLAGS(self.mono_handle, b"mono_method_get_flags")
self.mono_method_signature = MONO_METHOD_SIG(self.mono_handle, b"mono_method_signature")
self.mono_method_get_param_names = MONO_METHOD_GET_PARAM_NAMES(self.mono_handle, b"mono_method_get_param_names")
self.mono_signature_get_desc = MONO_SIGNATURE_GET_DESC(self.mono_handle, b"mono_signature_get_desc")
self.mono_signature_get_params = MONO_SIGNATURE_GET_PARAMS(self.mono_handle, b"mono_signature_get_params")
self.mono_signature_get_param_count = MONO_SIGNATURE_GET_PARAM_COUNT(self.mono_handle, b"mono_signature_get_param_count")
self.mono_signature_get_return_type = MONO_SIGNATURE_GET_RETURN_TYPE(self.mono_handle, b"mono_signature_get_return_type")
self.mono_compile_method = MONO_COMPILE_METHOD(self.mono_handle, b"mono_compile_method")
self.mono_free_method = MONO_FREE_METHOD(self.mono_handle, b"mono_free_method")
self.mono_jit_info_table_find = MONO_JIT_INFO_TABLE_FIND(self.mono_handle, b"mono_jit_info_table_find")
self.mono_jit_info_get_method = MONO_JIT_INFO_GET_METHOD(self.mono_handle, b"mono_jit_info_get_method")
self.mono_jit_info_get_code_start = MONO_JIT_INFO_GET_CODE_START(self.mono_handle, b"mono_jit_info_get_code_start")
self.mono_jit_info_get_code_size = MONO_JIT_INFO_GET_CODE_SIZE(self.mono_handle, b"mono_jit_info_get_code_size")
self.mono_jit_exec = MONO_JIT_EXEC(self.mono_handle, b"mono_jit_exec")
self.mono_method_header_get_code = MONO_METHOD_HEADER_GET_CODE(self.mono_handle, b"mono_method_header_get_code")
self.mono_disasm_code = MONO_DISASM_CODE(self.mono_handle, b"mono_disasm_code")
self.mono_vtable_get_static_field_data = MONO_VTABLE_GET_STATIC_FIELD_DATA(self.mono_handle, b"mono_vtable_get_static_field_data")
self.mono_method_desc_new = MONO_METHOD_DESC_NEW(self.mono_handle, b"mono_method_desc_new")
self.mono_method_desc_from_method = MONO_METHOD_DESC_FROM_METHOD(self.mono_handle, b"mono_method_desc_from_method")
self.mono_method_desc_free = MONO_METHOD_DESC_FREE(self.mono_handle, b"mono_method_desc_free")
self.mono_string_new = MONO_STRING_NEW(self.mono_handle, b"mono_string_new")
self.mono_string_to_utf8 = MONO_STRING_TO_UTF8(self.mono_handle, b"mono_string_to_utf8")
self.mono_array_new = MONO_ARRAY_NEW(self.mono_handle, b"mono_array_new")
self.mono_array_element_size = MONO_ARRAY_ELEMENT_SIZE(self.mono_handle, b"mono_array_element_size")
self.mono_class_get_rank = MONO_CLASS_GET_RANK(self.mono_handle, b"mono_class_get_rank")
self.mono_value_box = MONO_VALUE_BOX(self.mono_handle, b"mono_value_box")
self.mono_object_unbox = MONO_OBJECT_UNBOX(self.mono_handle, b"mono_object_unbox")
self.mono_object_new = MONO_OBJECT_NEW(self.mono_handle, b"mono_object_new")
self.mono_object_to_string = MONO_OBJECT_TO_STRING(self.mono_handle, b"mono_object_to_string")
self.mono_object_isinst = MONO_OBJECT_ISINST(self.mono_handle, b"mono_object_isinst")
self.mono_get_enum_class = MONO_GET_ENUM_CLASS(self.mono_handle, b"mono_get_enum_class")
self.mono_class_get_type = MONO_CLASS_GET_TYPE(self.mono_handle, b"mono_class_get_type")
self.mono_type_get_class = MONO_TYPE_GET_CLASS(self.mono_handle, b"mono_type_get_class")
self.mono_class_get_nesting_type = MONO_CLASS_GET_NESTING_TYPE(self.mono_handle, b"mono_class_get_nesting_type")
self.mono_class_get_nested_types = MONO_CLASS_GET_NESTED_TYPES(self.mono_handle, b"mono_class_get_nested_types")
self.mono_method_desc_search_in_image = MONO_METHOD_DESC_SEARCH_IN_IMAGE(self.mono_handle, b"mono_method_desc_search_in_image")
self.mono_runtime_invoke = MONO_RUNTIME_INVOKE(self.mono_handle, b"mono_runtime_invoke")
self.mono_runtime_object_init = MONO_RUNTIME_OBJECT_INIT(self.mono_handle, b"mono_runtime_object_init")
self.mono_ptr_class_get = MONO_PTR_GET_CLASS(self.mono_handle, b"mono_ptr_class_get")
self.mono_type_get_ptr_type = MONO_PTR_GET_CLASS(self.mono_handle, b"mono_type_get_ptr_type")
self.mono_assembly_name_new = MONO_ASSEMBLY_NAME_NEW(self.mono_handle, b"mono_assembly_name_new")
self.mono_assembly_loaded = MONO_ASSEMBLY_LOADED(self.mono_handle, b"mono_assembly_loaded")
self.mono_assembly_open = MONO_ASSEMBLY_OPEN(self.mono_handle, b"mono_assembly_open")
self.mono_image_open = MONO_IMAGE_OPEN(self.mono_handle, b"mono_image_open")
self.mono_field_static_get_value = MONO_FIELD_STATIC_GET_VALUE(self.mono_handle, b"mono_field_static_get_value")
self.mono_field_static_set_value = MONO_FIELD_STATIC_SET_VALUE(self.mono_handle, b"mono_field_static_set_value")
self.mono_runtime_is_shutting_down = MONO_RUNTIME_IS_SHUTTING_DOWN(self.mono_handle, b"mono_runtime_is_shutting_down")
def __getattr__(self, item):
# return none if the attribute is not found
return None
def imgui_render_api_table(self):
if getattr(self, '_cached_api_table', None) is None:
self._cached_api_table = {}
for name in dir(self):
o = getattr(self, name)
if isinstance(o, _MonoApi):
self._cached_api_table[name] = o
from nylib.pyimgui import imgui
from nylib.pyimgui.imgui import ctx as imgui_ctx
with imgui_ctx.BeginTable("mono_api_table", 3) as show:
if show:
imgui.TableNextRow()
imgui.TableNextColumn()
imgui.Text("Is IL2CPP")
imgui.TableNextColumn()
imgui.Text(str(self.is_il2cpp))
imgui.TableNextRow()
imgui.TableNextColumn()
imgui.Text("Module Handle")
imgui.TableNextColumn()
imgui.Text(f"{self.mono_handle:X}")
for name, o in self._cached_api_table.items():
imgui.TableNextRow()
imgui.TableNextColumn()
imgui.Text(name)
imgui.TableNextColumn()
imgui.Text(o.function)
imgui.TableNextColumn()
imgui.Text(f"{o.func_ptr or 0:X}")
| 44,558 | Python | .py | 503 | 80.809145 | 228 | 0.691158 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,078 | __init__.py | nyaoouo_NyLib2/nylib/imguiutils/__init__.py | import typing
from ..pyimgui import imgui
from ..pyimgui.imgui import ctx
_T = typing.TypeVar('_T')
def BeginFullScreenBackGround(name: str = '', open: bool = True, flags: int = 0):
viewport = imgui.GetMainViewport()
cls = imgui.ImGuiWindowClass()
cls.DockNodeFlagsOverrideSet = imgui.ImGuiDockNodeFlags_NoDocking
imgui.SetNextWindowClass(cls)
imgui.SetNextWindowPos(viewport.Pos)
imgui.SetNextWindowSize(viewport.Size)
return ctx.Begin(
name,
open,
flags | imgui.ImGuiWindowFlags_NoDecoration |
imgui.ImGuiWindowFlags_NoMove |
imgui.ImGuiWindowFlags_NoSavedSettings |
imgui.ImGuiWindowFlags_NoBringToFrontOnFocus
)
class Inspector(typing.Generic[_T]):
selected_item = None
selected_inspector: 'Inspector|None' = None
def __init__(self, target: _T):
self.filter_string = ''
self.target = target
self.items = self.init_items()
self.displayed_items = self.items
def init_items(self):
return []
def item_name(self, item):
return str(item)
def on_item_selected(self, item):
self.selected_item = item
def is_item_match(self, item):
return self.filter_string.lower() in self.item_name(item).lower()
def update_displayed_items(self):
if not self.filter_string:
self.displayed_items = self.items
else:
self.displayed_items = [[item for item in items if self.is_item_match(item)] for items in self.items]
def render(self):
menu_width = 0
if self.selected_inspector is not None:
# menu_width = imgui.CalcTextSize(self.item_name(self.selected_item)).x + imgui.GetStyle().ItemSpacing.x * 2 + 10
# menu_width = max(menu_width, 200)
menu_width = 200
with ctx.BeginChild('left', imgui.ImVec2(menu_width, 0)):
changed, self.filter_string = imgui.InputText('Filter', self.filter_string)
if changed:
self.update_displayed_items()
if self.displayed_items:
each_height = imgui.GetContentRegionAvail().y // len(self.displayed_items) - imgui.GetStyle().ItemSpacing.y
for i, items in enumerate(self.displayed_items):
with ctx.BeginChild(
f'left_{i}', imgui.ImVec2(0, each_height),
window_flags=imgui.ImGuiWindowFlags_HorizontalScrollbar,
child_flags=imgui.ImGuiChildFlags_Border,
):
clipper = imgui.ImGuiListClipper()
clipper.Begin(len(items))
while clipper.Step():
for j in range(clipper.DisplayStart, clipper.DisplayEnd):
item = items[j]
is_selected = self.selected_item == item
if imgui.Selectable(f"{self.item_name(item)}##item_{j}", is_selected):
self.on_item_selected(None if is_selected else item)
if menu_width:
imgui.SameLine()
with ctx.BeginChild('right'):
if self.selected_inspector:
self.selected_inspector.render()
| 3,307 | Python | .py | 71 | 34.28169 | 125 | 0.595903 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,079 | __init__.py | nyaoouo_NyLib2/nylib/hook/__init__.py | import ctypes
import typing
from . import detours
class Hook:
def __init__(self, at: int, hook_func, restype: typing.Type = ctypes.c_void_p, argtypes: typing.Iterable[typing.Type] = ()):
"""
创建一个 hook, 注意需要手动调用 install()
:param at: 该函数的内存地址
:param hook_func: 钩子函数
:param restype: 返回类型
:param argtypes: 参数类型(列表)
"""
self.at = at
self.interface = ctypes.CFUNCTYPE(restype, *argtypes)
self._hook_function = self.interface(lambda *args: hook_func(self, *args))
self.original = self.interface(at)
self.is_installed = False
def install(self):
if not self.is_installed:
with detours.DetourTransaction():
detours.DetourAttach(ctypes.byref(self.original), self._hook_function)
self.is_installed = True
return self
def uninstall(self):
if self.is_installed:
with detours.DetourTransaction():
detours.DetourDetach(ctypes.byref(self.original), self._hook_function)
self.is_installed = False
return self
def __call__(self, *args):
self.interface(self.at)(*args)
def create_hook(at: int, restype: typing.Type = ctypes.c_void_p, argtypes: typing.Iterable[typing.Type] = (), auto_install=False):
"""
使用装饰器创建一个 hook, 注意需要调用 install()
:param at: 该函数的内存地址
:param restype: 返回类型
:param argtypes: 参数类型(列表)
:param auto_install: 是否自动调用 install
:return:
"""
if auto_install:
return lambda func: Hook(at, func, restype, argtypes).install()
else:
return lambda func: Hook(at, func, restype, argtypes)
def test():
import ctypes.wintypes
t_dll = ctypes.CDLL('User32.dll')
MessageBoxW = ctypes.CFUNCTYPE(ctypes.wintypes.INT, ctypes.wintypes.HWND, ctypes.wintypes.LPCWSTR, ctypes.wintypes.LPCWSTR, ctypes.wintypes.UINT)(t_dll.MessageBoxW)
@create_hook(at=ctypes.cast(MessageBoxW, ctypes.c_void_p).value, restype=ctypes.wintypes.INT, argtypes=[ctypes.wintypes.HWND, ctypes.wintypes.LPCWSTR, ctypes.wintypes.LPCWSTR, ctypes.wintypes.UINT], auto_install=True)
def message_box_hook(_hook, handle, title, message, flag):
res = _hook.original(handle, "hooked " + title, "hooked " + message, flag)
print(f"hooked: {title} - {message}, return {res}")
return res
MessageBoxW(None, 'hi content!', 'hi title!', 0)
message_box_hook.uninstall()
MessageBoxW(None, 'hi content!', 'hi title!', 0)
| 2,670 | Python | .py | 56 | 37.392857 | 221 | 0.659557 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,080 | detours.py | nyaoouo_NyLib2/nylib/hook/detours/detours.py | import ctypes
import os.path
import sys
_dll = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(sys.executable if getattr(sys, "frozen", False) else __file__), 'DetoursEx.dll'))
def _make_api(name, restype, argtypes):
if f := getattr(_dll, name, None):
f.restype = restype
f.argtypes = argtypes
return f
class _GUID(ctypes.Structure):
_fields_ = [
('Data1', ctypes.c_uint32),
('Data2', ctypes.c_uint16),
('Data3', ctypes.c_uint16),
('Data4', ctypes.c_uint8 * 8),
]
class _DETOUR_SECTION_HEADER(ctypes.Structure):
_fields_ = [
('cbHeaderSize', ctypes.c_uint32),
('nSignature', ctypes.c_uint32),
('nDataOffset', ctypes.c_uint32),
('cbDataSize', ctypes.c_uint32),
('nOriginalImportVirtualAddress', ctypes.c_uint32),
('nOriginalImportSize', ctypes.c_uint32),
('nOriginalBoundImportVirtualAddress', ctypes.c_uint32),
('nOriginalBoundImportSize', ctypes.c_uint32),
('nOriginalIatVirtualAddress', ctypes.c_uint32),
('nOriginalIatSize', ctypes.c_uint32),
('nOriginalSizeOfImage', ctypes.c_uint32),
('cbPrePE', ctypes.c_uint32),
('nOriginalClrFlags', ctypes.c_uint32),
('reserved1', ctypes.c_uint32),
('reserved2', ctypes.c_uint32),
('reserved3', ctypes.c_uint32),
]
class _DETOUR_SECTION_RECORD(ctypes.Structure):
_fields_ = [
('cbBytes', ctypes.c_uint32),
('nReserved', ctypes.c_uint32),
('guid', _GUID),
]
class _DETOUR_CLR_HEADER(ctypes.Structure):
_fields_ = [
('cb', ctypes.c_ulong),
('MajorRuntimeVersion', ctypes.c_uint16),
('MinorRuntimeVersion', ctypes.c_uint16),
('MetaData', ctypes.c_uint32 * 2),
('Flags', ctypes.c_ulong),
]
class _DETOUR_EXE_RESTORE(ctypes.Structure):
_fields_ = [
('cb', ctypes.c_uint32),
('cbidh', ctypes.c_uint32),
('cbinh', ctypes.c_uint32),
('cbclr', ctypes.c_uint32),
('pidh', ctypes.POINTER(ctypes.c_uint8)),
('pinh', ctypes.POINTER(ctypes.c_uint8)),
('pclr', ctypes.POINTER(ctypes.c_uint8)),
('idh', ctypes.c_uint32 * 25),
('clr', _DETOUR_CLR_HEADER),
]
class _DETOUR_EXE_HELPER(ctypes.Structure):
_fields_ = [
('cb', ctypes.c_uint32),
('pid', ctypes.c_uint32),
('nDlls', ctypes.c_uint32),
('rDlls', ctypes.c_char * 4),
]
DetourTransactionBegin = _make_api('DetourTransactionBegin', ctypes.c_long, (
))
DetourTransactionAbort = _make_api('DetourTransactionAbort', ctypes.c_long, (
))
DetourTransactionCommit = _make_api('DetourTransactionCommit', ctypes.c_long, (
))
DetourTransactionCommitEx = _make_api('DetourTransactionCommitEx', ctypes.c_long, (
ctypes.c_void_p, # pppFailedPointer
))
DetourUpdateThread = _make_api('DetourUpdateThread', ctypes.c_long, (
ctypes.c_void_p, # hThread
))
DetourAttach = _make_api('DetourAttach', ctypes.c_long, (
ctypes.c_void_p, # ppPointer
ctypes.c_void_p, # pDetour
))
DetourAttachEx = _make_api('DetourAttachEx', ctypes.c_long, (
ctypes.c_void_p, # ppPointer
ctypes.c_void_p, # pDetour
ctypes.c_void_p, # ppRealTrampoline
ctypes.c_void_p, # ppRealTarget
ctypes.c_void_p, # ppRealDetour
))
DetourDetach = _make_api('DetourDetach', ctypes.c_long, (
ctypes.c_void_p, # ppPointer
ctypes.c_void_p, # pDetour
))
DetourSetIgnoreTooSmall = _make_api('DetourSetIgnoreTooSmall', ctypes.c_bool, (
ctypes.c_bool, # fIgnore
))
DetourSetRetainRegions = _make_api('DetourSetRetainRegions', ctypes.c_bool, (
ctypes.c_bool, # fRetain
))
DetourSetSystemRegionLowerBound = _make_api('DetourSetSystemRegionLowerBound', ctypes.c_void_p, (
ctypes.c_void_p, # pSystemRegionLowerBound
))
DetourSetSystemRegionUpperBound = _make_api('DetourSetSystemRegionUpperBound', ctypes.c_void_p, (
ctypes.c_void_p, # pSystemRegionUpperBound
))
DetourFindFunction = _make_api('DetourFindFunction', ctypes.c_void_p, (
ctypes.c_char_p, # pszModule
ctypes.c_char_p, # pszFunction
))
DetourCodeFromPointer = _make_api('DetourCodeFromPointer', ctypes.c_void_p, (
ctypes.c_void_p, # pPointer
ctypes.c_void_p, # ppGlobals
))
DetourCopyInstruction = _make_api('DetourCopyInstruction', ctypes.c_void_p, (
ctypes.c_void_p, # pDst
ctypes.c_void_p, # ppDstPool
ctypes.c_void_p, # pSrc
ctypes.c_void_p, # ppTarget
ctypes.c_void_p, # plExtra
))
DetourSetCodeModule = _make_api('DetourSetCodeModule', ctypes.c_bool, (
ctypes.c_void_p, # hModule
ctypes.c_bool, # fLimitReferencesToModule
))
DetourAllocateRegionWithinJumpBounds = _make_api('DetourAllocateRegionWithinJumpBounds', ctypes.c_void_p, (
ctypes.c_void_p, # pbTarget
ctypes.POINTER(ctypes.c_uint32), # pcbAllocatedSize
))
DetourIsFunctionImported = _make_api('DetourIsFunctionImported', ctypes.c_bool, (
ctypes.POINTER(ctypes.c_uint8), # pbCode
ctypes.POINTER(ctypes.c_uint8), # pbAddress
))
DetourGetContainingModule = _make_api('DetourGetContainingModule', ctypes.c_void_p, (
ctypes.c_void_p, # pvAddr
))
DetourEnumerateModules = _make_api('DetourEnumerateModules', ctypes.c_void_p, (
ctypes.c_void_p, # hModuleLast
))
DetourGetEntryPoint = _make_api('DetourGetEntryPoint', ctypes.c_void_p, (
ctypes.c_void_p, # hModule
))
DetourGetModuleSize = _make_api('DetourGetModuleSize', ctypes.c_ulong, (
ctypes.c_void_p, # hModule
))
DetourEnumerateExports = _make_api('DetourEnumerateExports', ctypes.c_bool, (
ctypes.c_void_p, # hModule
ctypes.c_void_p, # pContext
ctypes.c_void_p, # pfExport
))
DetourEnumerateImports = _make_api('DetourEnumerateImports', ctypes.c_bool, (
ctypes.c_void_p, # hModule
ctypes.c_void_p, # pContext
ctypes.c_void_p, # pfImportFile
ctypes.c_void_p, # pfImportFunc
))
DetourEnumerateImportsEx = _make_api('DetourEnumerateImportsEx', ctypes.c_bool, (
ctypes.c_void_p, # hModule
ctypes.c_void_p, # pContext
ctypes.c_void_p, # pfImportFile
ctypes.c_void_p, # pfImportFuncEx
))
DetourFindPayload = _make_api('DetourFindPayload', ctypes.c_void_p, (
ctypes.c_void_p, # hModule
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pcbData
))
DetourFindPayloadEx = _make_api('DetourFindPayloadEx', ctypes.c_void_p, (
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pcbData
))
DetourGetSizeOfPayloads = _make_api('DetourGetSizeOfPayloads', ctypes.c_uint32, (
ctypes.c_void_p, # hModule
))
DetourFreePayload = _make_api('DetourFreePayload', ctypes.c_bool, (
ctypes.c_void_p, # pvData
))
DetourBinaryOpen = _make_api('DetourBinaryOpen', ctypes.POINTER(None), (
ctypes.c_void_p, # hFile
))
DetourBinaryEnumeratePayloads = _make_api('DetourBinaryEnumeratePayloads', ctypes.c_void_p, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # pGuid
ctypes.c_void_p, # pcbData
ctypes.c_void_p, # pnIterator
))
DetourBinaryFindPayload = _make_api('DetourBinaryFindPayload', ctypes.c_void_p, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pcbData
))
DetourBinarySetPayload = _make_api('DetourBinarySetPayload', ctypes.c_void_p, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pData
ctypes.c_uint32, # cbData
))
DetourBinaryDeletePayload = _make_api('DetourBinaryDeletePayload', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # rguid
))
DetourBinaryPurgePayloads = _make_api('DetourBinaryPurgePayloads', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
))
DetourBinaryResetImports = _make_api('DetourBinaryResetImports', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
))
DetourBinaryEditImports = _make_api('DetourBinaryEditImports', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # pContext
ctypes.c_void_p, # pfByway
ctypes.c_void_p, # pfFile
ctypes.c_void_p, # pfSymbol
ctypes.c_void_p, # pfCommit
))
DetourBinaryWrite = _make_api('DetourBinaryWrite', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
ctypes.c_void_p, # hFile
))
DetourBinaryClose = _make_api('DetourBinaryClose', ctypes.c_bool, (
ctypes.POINTER(None), # pBinary
))
DetourFindRemotePayload = _make_api('DetourFindRemotePayload', ctypes.c_void_p, (
ctypes.c_void_p, # hProcess
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pcbData
))
DetourCreateProcessWithDllA = _make_api('DetourCreateProcessWithDllA', ctypes.c_bool, (
ctypes.c_char_p, # lpApplicationName
ctypes.c_char_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_char_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessA
))
DetourCreateProcessWithDllW = _make_api('DetourCreateProcessWithDllW', ctypes.c_bool, (
ctypes.c_wchar_p, # lpApplicationName
ctypes.c_wchar_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_wchar_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessW
))
DetourCreateProcessWithDllExA = _make_api('DetourCreateProcessWithDllExA', ctypes.c_bool, (
ctypes.c_char_p, # lpApplicationName
ctypes.c_char_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_char_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessA
))
DetourCreateProcessWithDllExW = _make_api('DetourCreateProcessWithDllExW', ctypes.c_bool, (
ctypes.c_wchar_p, # lpApplicationName
ctypes.c_wchar_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_wchar_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessW
))
DetourCreateProcessWithDllsA = _make_api('DetourCreateProcessWithDllsA', ctypes.c_bool, (
ctypes.c_char_p, # lpApplicationName
ctypes.c_char_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_char_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_uint32, # nDlls
ctypes.c_void_p, # rlpDlls
ctypes.c_void_p, # pfCreateProcessA
))
DetourCreateProcessWithDllsW = _make_api('DetourCreateProcessWithDllsW', ctypes.c_bool, (
ctypes.c_wchar_p, # lpApplicationName
ctypes.c_wchar_p, # lpCommandLine
ctypes.c_void_p, # lpProcessAttributes
ctypes.c_void_p, # lpThreadAttributes
ctypes.c_bool, # bInheritHandles
ctypes.c_uint32, # dwCreationFlags
ctypes.c_void_p, # lpEnvironment
ctypes.c_wchar_p, # lpCurrentDirectory
ctypes.c_void_p, # lpStartupInfo
ctypes.c_void_p, # lpProcessInformation
ctypes.c_uint32, # nDlls
ctypes.c_void_p, # rlpDlls
ctypes.c_void_p, # pfCreateProcessW
))
DetourProcessViaHelperA = _make_api('DetourProcessViaHelperA', ctypes.c_bool, (
ctypes.c_uint32, # dwTargetPid
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessA
))
DetourProcessViaHelperW = _make_api('DetourProcessViaHelperW', ctypes.c_bool, (
ctypes.c_uint32, # dwTargetPid
ctypes.c_char_p, # lpDllName
ctypes.c_void_p, # pfCreateProcessW
))
DetourProcessViaHelperDllsA = _make_api('DetourProcessViaHelperDllsA', ctypes.c_bool, (
ctypes.c_uint32, # dwTargetPid
ctypes.c_uint32, # nDlls
ctypes.c_void_p, # rlpDlls
ctypes.c_void_p, # pfCreateProcessA
))
DetourProcessViaHelperDllsW = _make_api('DetourProcessViaHelperDllsW', ctypes.c_bool, (
ctypes.c_uint32, # dwTargetPid
ctypes.c_uint32, # nDlls
ctypes.c_void_p, # rlpDlls
ctypes.c_void_p, # pfCreateProcessW
))
DetourUpdateProcessWithDll = _make_api('DetourUpdateProcessWithDll', ctypes.c_bool, (
ctypes.c_void_p, # hProcess
ctypes.c_void_p, # rlpDlls
ctypes.c_uint32, # nDlls
))
DetourUpdateProcessWithDllEx = _make_api('DetourUpdateProcessWithDllEx', ctypes.c_bool, (
ctypes.c_void_p, # hProcess
ctypes.c_void_p, # hImage
ctypes.c_bool, # bIs32Bit
ctypes.c_void_p, # rlpDlls
ctypes.c_uint32, # nDlls
))
DetourCopyPayloadToProcess = _make_api('DetourCopyPayloadToProcess', ctypes.c_bool, (
ctypes.c_void_p, # hProcess
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pvData
ctypes.c_uint32, # cbData
))
DetourCopyPayloadToProcessEx = _make_api('DetourCopyPayloadToProcessEx', ctypes.c_void_p, (
ctypes.c_void_p, # hProcess
ctypes.c_void_p, # rguid
ctypes.c_void_p, # pvData
ctypes.c_uint32, # cbData
))
DetourRestoreAfterWith = _make_api('DetourRestoreAfterWith', ctypes.c_bool, (
))
DetourRestoreAfterWithEx = _make_api('DetourRestoreAfterWithEx', ctypes.c_bool, (
ctypes.c_void_p, # pvData
ctypes.c_uint32, # cbData
))
DetourIsHelperProcess = _make_api('DetourIsHelperProcess', ctypes.c_bool, (
))
DetourFinishHelperProcess = _make_api('DetourFinishHelperProcess', None, (
ctypes.c_void_p, # a1
ctypes.c_void_p, # a2
ctypes.c_char_p, # a3
ctypes.c_int, # a4
))
| 14,279 | Python | .py | 374 | 33.868984 | 141 | 0.695467 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,081 | __init__.py | nyaoouo_NyLib2/nylib/hook/detours/__init__.py | import ctypes
from .detours import *
def check(c):
if c: raise ctypes.WinError(c)
class DetourTransaction:
def __init__(self, set_thread=None):
self.set_thread = set_thread
def __enter__(self):
check(DetourTransactionBegin())
try:
check(DetourUpdateThread(self.set_thread or ctypes.windll.kernel32.GetCurrentThread()))
except:
check(DetourTransactionAbort())
raise
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
check(DetourTransactionCommit())
else:
check(DetourTransactionAbort())
return False
| 660 | Python | .py | 20 | 24.85 | 99 | 0.627172 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,082 | setup_llvm_dev.py | nyaoouo_NyLib2/scripts/setup_llvm_dev.py | import hashlib
import os
import pathlib
import shutil
import subprocess
from nylib.winutils import ensure_env, msvc
os.environ['Path'] += r';D:\tool\cmake-3.29.0-windows-x86_64\bin;'
def file_last_modified(path: pathlib.Path) -> int:
return path.stat().st_mtime_ns
def file_md5(path: pathlib.Path) -> str:
md5 = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
md5.update(chunk)
return md5.hexdigest()
def main(llvm_path, mode='Debug', tool='msbuild', file_comp='last-modified'):
os.system('chcp 65001')
llvm_path = pathlib.Path(llvm_path).resolve()
tmp_dir = pathlib.Path('.tmp').resolve()
tmp_dir.mkdir(exist_ok=True)
git_at = pathlib.Path(ensure_env.ensure_git(tmp_dir))
cmake_at = pathlib.Path(ensure_env.ensure_cmake(tmp_dir))
ensure_env.ensure_msvc(tmp_dir)
cygwin_dir = ensure_env.ensure_cygwin_dir(tmp_dir)
if not llvm_path.exists():
subprocess.check_call([
git_at, 'clone', '-c', 'core.autocrlf=false',
'https://github.com/llvm/llvm-project.git', llvm_path
], cwd=llvm_path.parent)
else:
assert (llvm_path / '.git').exists(), f'{llvm_path} is not a git repository'
# subprocess.check_call(['git', 'pull'], cwd=llvm_path)
env = msvc.load_vcvarsall('x86_amd64').copy() | {'CC': 'cl', 'CXX': 'cl'}
if tool == 'msbuild':
build_path = llvm_path / 'build'
bin_path = build_path / mode / 'bin'
dist_path = llvm_path / 'dist' / mode
elif tool == 'ninja':
build_path = llvm_path / 'build_ninja'
bin_path = build_path / mode / 'bin'
dist_path = llvm_path / 'dist_ninja' / mode
else:
raise ValueError(f'Unknown tool: {tool}')
# if build_path.exists(): shutil.rmtree(build_path)
if tool == 'msbuild':
msbuild_at = msvc.where('msbuild.exe', 'x86_amd64')
if not build_path.exists():
build_path.mkdir()
subprocess.check_call([
cmake_at, '-E', 'env', 'CXXFLAGS=/utf-8', 'CCFLAGS=/utf-8',
'--', cmake_at, '-G', "Visual Studio 17 2022", '-A', 'x64',
'-Thost=x64', '-DLLVM_ENABLE_PROJECTS=clang', '..\\llvm'
], cwd=build_path, env=env, shell=True)
subprocess.check_call([msbuild_at, build_path / 'ALL_BUILD.vcxproj', f'/p:Configuration={mode}'], cwd=build_path, env=env, shell=True)
elif tool == 'ninja':
ninja_at = msvc.where('ninja.exe', 'x86_amd64')
if not build_path.exists():
build_path.mkdir()
subprocess.check_call([
cmake_at, '-E', 'env', 'CXXFLAGS=/utf-8', 'CCFLAGS=/utf-8',
'--', cmake_at, '-GNinja', '-DLLVM_ENABLE_PROJECTS=clang',
'-DCMAKE_EXE_LINKER_FLAGS=/MAXILKSIZE:0x7FF00000',
'..\\llvm'
], cwd=build_path, env=env, shell=True)
subprocess.check_call([ninja_at, 'clang'], cwd=build_path, env=env, shell=True)
subprocess.check_call([ninja_at, 'check-clang'], cwd=build_path, env=env, shell=True)
else:
raise ValueError(f'Unknown tool: {tool}')
if file_comp == 'last-modified':
comp_func = file_last_modified
elif file_comp == 'md5':
comp_func = file_md5
else:
raise ValueError(f'Unknown file comparison method: {file_comp}')
if not dist_path.exists():
dist_path.mkdir()
update_count = 0
for bin_file in bin_path.iterdir():
dist_file = dist_path / bin_file.relative_to(bin_path)
if dist_file.exists() and comp_func(bin_file) == comp_func(dist_file): continue
print(f'Copy {bin_file} to {dist_file}')
update_count += 1
shutil.copy2(bin_file, dist_file)
print(f'Updated {update_count} files')
if __name__ == '__main__':
main(r'D:\projects\llvm', 'Debug')
| 3,886 | Python | .py | 86 | 37.325581 | 142 | 0.601798 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,083 | pyimgui_generate.py | nyaoouo_NyLib2/scripts/pyimgui/pyimgui_generate.py | import io
import json
import os
import pathlib
import re
import shutil
import subprocess
import sys
from nylib.utils.pip import required
from nylib.winutils import ensure_env
from func_wrappers import wrappers as specified_wrappers
class CodeWriter:
class IndentPopper:
def __init__(self, writer, need_push=True):
self.writer = writer
self.need_push = need_push
def __enter__(self):
if self.need_push:
self.writer.push_indent()
def __exit__(self, exc_type, exc_val, exc_tb):
self.writer.pop_indent()
def __init__(self, init_ind=0, indent_size=4):
self.buf = io.StringIO()
self.indent = init_ind
self.indent_size = indent_size
def push_indent(self):
self.indent += 1
return self.IndentPopper(self, False)
def pop_indent(self):
self.indent -= 1
def write(self, s):
self.buf.write(s.replace('\n', '\n' + ' ' * self.indent_size * self.indent))
def getvalue(self):
return self.buf.getvalue()
def update_generated_files(source_dir, files):
to_remove = set()
if source_dir.is_dir():
for file in source_dir.iterdir():
if file.is_file() and file.name.endswith('.cpp'):
to_remove.add(file)
for file, data in files:
file.parent.mkdir(parents=True, exist_ok=True)
to_remove.discard(file)
old_data = None
if file.is_file():
with open(file, 'r', encoding='utf-8') as f:
old_data = f.read()
if old_data != data:
print(f"Updating {file}...")
with open(file, 'w', encoding='utf-8') as f:
f.write(data)
for file in to_remove:
print(f"Removing {file}...")
os.remove(file)
def generate_pyimgui(cimgui_dir, output_dir, backends):
def_dir = cimgui_dir / 'generator' / 'output'
with open(def_dir / 'structs_and_enums.json', 'r', encoding='utf-8') as f:
struct_and_enums = json.load(f)
with open(def_dir / 'definitions.json', 'r', encoding='utf-8') as f:
func_defs = json.load(f)
with open(def_dir / 'typedefs_dict.json', 'r', encoding='utf-8') as f:
typedefs_dict = json.load(f)
def solve_typedef(t):
while t in typedefs_dict:
t_ = typedefs_dict[t]
if t_.startswith('struct ') or re.search(r"\(\*\)\((.*)\)$", t_):
return t
t = t_
return t
typedefs_dict = {n: solve_typedef(t) for n, t in typedefs_dict.items() if not (t.startswith('struct ') or re.search(r"\(\*\)\((.*)\)$", t))}
enum_defs = CodeWriter(1)
enum_casts = CodeWriter(0)
enum_defs.write("auto IntEnum = py::module_::import(\"enum\").attr(\"IntEnum\");\n")
enum_casts.write("""
#define ENUM_CAST(T) \\
namespace PYBIND11_NAMESPACE { namespace detail { \\
template <> struct type_caster<T> { \\
public: \\
PYBIND11_TYPE_CASTER(T, const_name("T")); \\
bool load(handle src, bool convert) { \\
PyObject *source = src.ptr(); \\
PyObject *tmp = PyNumber_Long(source); \\
if (!tmp) return false; \\
value = (T)PyLong_AsLong(tmp); \\
Py_DECREF(tmp); \\
return !PyErr_Occurred(); \\
} \\
static handle cast(T src, return_value_policy policy, handle parent) { \\
return PyLong_FromLong(src); \\
} \\
}; \\
}}\n
""")
for enum_type, enum_items in struct_and_enums['enums'].items():
# enum_defs.write(f"py::enum_<{enum_type}>(m, \"{enum_type}\")")
# with enum_defs.push_indent():
# for item in enum_items:
# enum_defs.write(f".value(\"{item['name']}\", {item['name']})")
# enum_defs.write(".export_values();")
enum_defs.write(f"m.attr(\"{enum_type}\") = IntEnum(\"{enum_type}\", py::dict(\n")
with enum_defs.push_indent():
for i, item in enumerate(enum_items):
enum_defs.write(f"py::arg(\"{item['name']}\") = {item['name']}")
enum_defs.write(",\n" if i < len(enum_items) - 1 else "\n")
# enum_defs.write(f"py::arg(\"__module__\") = m.attr(\"__name__\")\n")
enum_defs.write("\n));\n")
enum_defs.write(f"auto enum_{enum_type} = m.attr(\"{enum_type}\");\n")
for i, item in enumerate(enum_items):
enum_defs.write(f"m.attr(\"{item['name']}\") = enum_{enum_type}.attr(\"{item['name']}\");\n")
enum_casts.write(f"ENUM_CAST({enum_type});\n")
def export_template_create(writer, template_name):
writer.write(f"template <typename T>\n")
writer.write(f"void pybind_setup_template_cls_{template_name}(py::module &m, const char* type_name) {{")
with writer.push_indent():
writer.write(f"\npy::class_<T>(m, type_name, py::dynamic_attr())")
with writer.push_indent():
# for field in struct_and_enums['templated_structs'][template_name]:
# field_name = field['name']
# try:
# field_name = field_name[:field_name.index('[')]
# except ValueError:
# pass
# writer.write(f"\n.def_readwrite(\"{field_name}\", &T::{field_name})")
if s := specified_wrappers.get(f'_TCLS_EXTRA_{template_name}', ''):
writer.write('\n' + s)
writer.write(";\n")
writer.write("}\n")
template_defs = CodeWriter(0)
for template_name in struct_and_enums['templated_structs'].keys():
export_template_create(template_defs, template_name)
exported_types = set()
template_need_export = set()
def make_func_desc(func):
if (s := specified_wrappers.get(f"_GFUNC_:{func['ov_cimguiname']}")) is not None:
return s, set()
if func.get('isvararg'): return f"\n/* TODO:varg func {func['ov_cimguiname']}*/", set()
if any(arg['type'] == 'va_list' for arg in func['argsT']): return f"\n/* TODO:va_list func {func['ov_cimguiname']}*/", set()
real_arg_off = nonUDT = func.get("nonUDT", 0)
if not func.get('stname'):
func_type = 0 # global function
elif func.get("constructor"):
func_type = 11 # class constructor
elif func.get("destructor"):
func_type = 12 # class destructor
elif func.get("is_static_function"):
func_type = 2 # class static method
else:
func_type = 1 # class method
def_arg_map = {}
if func_type == 12 or func_type == 11:
desc = f"&{func['ov_cimguiname']}"
else:
func_args = []
for argt in func['argsT'][real_arg_off:]:
if re.search(r"\(\*\)\((.*)\)$", argt['type']):
return f"\n/* TODO:func pointer arg {func['ov_cimguiname']} {func.get('ret')} {func['signature']}*/", set()
a = "%s"
c = "%s"
if argt['type'].endswith('*') and (type_ := argt['type'][:-1]).strip().rsplit(' ', 1)[-1] in struct_and_enums['structs']:
if type_ == 'ImGuiKey':
type_ = 'int'
c = f"(ImGuiKey)(%s)"
if func.get("defaults", {}).get(argt['name']) in ('nullptr', 'NULL'):
a %= f"std::optional<{type_}>& {argt['name']}"
c %= f"({argt['name']} ? &*{argt['name']} : nullptr)"
def_arg_map[argt['name']] = "py::none()"
else:
a %= f"{type_}& {argt['name']}"
c %= f"&{argt['name']}"
else:
type_ = argt['type'].strip()
if type_ == 'ImGuiKey':
type_ = 'int'
c = f"(ImGuiKey)(%s)"
a %= f"{type_} {argt['name']}"
c %= argt['name']
func_args.append((a, c))
func_args_s = ', '.join(a for a, c in func_args)
has_return = int(func.get('ret', 'void') != 'void')
call_args = [f"&__out_{i}" for i in range(nonUDT)]
# if func_type == 1: call_args.append("&self")
call_args.extend(c for a, c in func_args)
ret_args = []
if has_return: ret_args.append("__ret")
ret_args.extend(f"__out_{i}" for i in range(nonUDT))
desc = f"[]({func_args_s}){{"
for i, argt in enumerate(func['argsT'][:nonUDT]):
desc += f"{argt['type'][:-1]} __out_{i} = {{}};"
if has_return:
desc += "auto __ret = "
desc += f"{func['ov_cimguiname']}({', '.join(call_args)});"
if len(ret_args) == 1:
desc += f"return {ret_args[0]};"
elif len(ret_args) > 1:
desc += f"return std::make_tuple({', '.join(ret_args)});"
desc += "}"
args = ''
extra_types = set()
if func_type == 1:
real_arg_off += 1
for argt in func['argsT'][real_arg_off:]:
args += f", py::arg(\"{argt['name']}\")"
if (d := def_arg_map.get(argt['name'])) or (d := func.get("defaults", {}).get(argt['name'])) is not None:
args += f" = {d}"
t = argt['type'].rstrip('*&')
if t.startswith("const "):
t = t[6:]
elif t.startswith("struct "):
t = t[7:]
elif t.startswith("enum "):
t = t[5:]
if t in struct_and_enums['structs']:
extra_types.add(t)
if nonUDT:
args += ", py::return_value_policy::move"
if (ret := func.get('ret', '')).endswith('*') and ret[:-1] in struct_and_enums['structs']:
args += ", py::return_value_policy::reference"
match func_type:
case 0: # global function
return f".def(\"{func['funcname']}\", {desc}{args})", extra_types
case 1: # class method
return f".def(\"{func['funcname']}\", {desc}{args})", extra_types
case 2: # class static method
return f".def_static(\"{func['funcname']}\", {desc}{args})", extra_types
case 11: # class constructor
return f".def(py::init({desc}){args})", extra_types
case 12: # class destructor
return None, extra_types
def export_field(writer, type_name, field):
field_name = field['name']
if size := field.get('size'):
field_name = field_name[:field_name.index('[')]
if (s := specified_wrappers.get(f'_CLS_FIELD_:{type_name}::{field_name}')) is not None:
writer.write('\n' + s)
return
field_type = field['type'].strip()
if not field_name and field_type.startswith('union '):
for line_ in field_type[7:-1].split(';'):
line = line_.strip()
if not line: continue
t, n = line.rsplit(' ', 1)
export_field(writer, type_name, {'name': n, 'type': t})
return
if field_type.startswith('const '):
field_type = field_type[6:].strip()
if "template_type" in field:
_template_base = field_type.split('_', 1)[0]
template_need_export.add((_template_base, field['type'].rstrip('&*')))
if m := re.search(r"\(\*\)\((.*)\)$", field_type):
writer.write(f"\n// {field['name']} {field['type']}")
elif size:
# if field['type'].endswith('*'):
# writer.write(f"\n// {field['name']}: {field['type']}[{size}] not support")
# else:
# writer.write(f"\n.def_property_readonly(\"{field_name}\", []({type_name}& self) {{ return self.{field_name}; }}) // {field['type']}[{size}]")
_type = field['type']
while _type in typedefs_dict:
_type = typedefs_dict[_type]
_typeN = _type.strip().replace(' ', '_')
_typeN = ('p_' + _typeN[:-1]) if _typeN.endswith('*') else _typeN
template_need_export.add((f"PyArrayWrapper<{_type}>::pybind_setup(m, \"Arr_{_typeN}\");", None))
writer.write(f"\n.def_property_readonly(\"{field_name}\", []({type_name}& self) {{ return PyArrayWrapper<{field['type']}>(self.{field_name}, {size}); }}) // {field['type']}[{size}]")
else:
writer.write(f"\n.def_property(\"{field_name}\", []({type_name}& self) {{ return self.{field_name}; }}, []({type_name}& self, {field_type} value) {{ self.{field_name} = value; }}) // {field['type']}")
def export_type(writer, type_name):
if type_name in exported_types: return
exported_types.add(type_name)
code = CodeWriter()
code.write(f"py::class_<{type_name}>(m, \"{type_name}\", py::dynamic_attr())")
with code.push_indent():
for field in struct_and_enums['structs'][type_name]:
export_field(code, type_name, field)
for func in struct_funcs.get(type_name, []):
desc, extra_types = make_func_desc(func)
if desc: code.write('\n' + desc)
for t in extra_types:
if t in struct_and_enums['structs']:
export_type(writer, t)
if s := specified_wrappers.get(f'_CLS_EXTRA_{type_name}', ''):
template_defs.write('\n' + s)
writer.write(code.getvalue().rstrip() + '\n;\n')
struct_funcs = {}
for overloads in func_defs.values():
for overload in overloads:
struct_funcs.setdefault(overload['stname'], []).append(overload)
cls_defs = CodeWriter(1)
for keys in struct_and_enums['structs'].keys():
export_type(cls_defs, keys)
cls_template_defs = CodeWriter(1)
for template_name, template_type in sorted(template_need_export):
if template_type is None:
cls_template_defs.write(f'\n{template_name};')
continue
if template_type.endswith('*'):
_template_type = 'p_' + template_type[:-1]
else:
_template_type = template_type
cls_template_defs.write(f'\npybind_setup_template_cls_{template_name}<{template_type}>(m, "{template_type}");')
glob_defs = CodeWriter(1)
for func in struct_funcs.get('', []):
desc, extra_types = make_func_desc(func)
if desc:
if desc.startswith('/*') or desc.startswith('//'):
glob_defs.write(desc)
else:
glob_defs.write('\nm' + desc + ';')
# for t in extra_types:
# if t in struct_and_enums['structs']:
# export_type(cls_defs, t)
core_dir = output_dir / 'pyimgui_core'
update_generated_files(output_dir, [
(
core_dir / 'enums.h',
"#pragma once\n"
"#include \"gHeader.h\"\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
"void pybind_setup_pyimgui_enums(pybind11::module_ m);"
"}}\n"
),
(
core_dir / 'enums.cpp',
"#include \"./enums.h\"\n"
f"{enum_casts.getvalue()}\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
f"void pybind_setup_pyimgui_enums(pybind11::module_ m) {{ {enum_defs.getvalue()} }}"
"}}\n"
),
(
core_dir / 'structs.h',
"#pragma once\n"
"#include \"gHeader.h\"\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
"void pybind_setup_pyimgui_structs(pybind11::module_ m);\n"
"}}\n"
),
(
core_dir / 'structs.cpp',
"#include \"./structs.h\"\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
f"{specified_wrappers.get('__STRUCTS_EXTRA__', '')}\n"
f'{template_defs.getvalue()}\n'
"void pybind_setup_pyimgui_structs(pybind11::module_ m) {"
f"{specified_wrappers.get('__STRUCTS_DEF_EXTRA__', '')};\n"
f"{cls_template_defs.getvalue()};\n "
f"{cls_defs.getvalue()}\n"
"}}}\n"
),
(
core_dir / 'globals.h',
"#pragma once\n"
"#include \"gHeader.h\"\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
"void pybind_setup_pyimgui_globals(pybind11::module_ m);\n"
"}}\n"
),
(
core_dir / 'globals.cpp',
f"#include \"./globals.h\"\n"
"namespace mNameSpace{ namespace PyImguiCore{\n"
f"{specified_wrappers.get('__GLOBAL_EXTRA__', '')}\n"
"void pybind_setup_pyimgui_globals(pybind11::module_ m) {\n"
f"{specified_wrappers.get('__GLOBAL_DEF_EXTRA__', '')};\n"
f" {glob_defs.getvalue()}\n"
"}}}\n"
),
(
output_dir / 'pyimgui.h',
"""#include "gHeader.h"
#include "./pyimgui_core/enums.h"
#include "./pyimgui_core/structs.h"
#include "./pyimgui_core/globals.h"
#define PYIMGUI_CORE_NAMESPACE mNameSpace::PyImguiCore
namespace mNameSpace{ namespace PyImguiCore{
void pybind_setup_pyimgui_core(pybind11::module_ m);
}}
"""
),
(
output_dir / 'pyimgui.cpp',
"""#include "./pyimgui.h"
namespace mNameSpace{ namespace PyImguiCore{
void pybind_setup_pyimgui_core(pybind11::module_ m) {
pybind_setup_pyimgui_enums(m);
pybind_setup_pyimgui_structs(m);
pybind_setup_pyimgui_globals(m);
}
}}
"""
)
])
return [
core_dir / 'enums.cpp',
core_dir / 'structs.cpp',
core_dir / 'globals.cpp',
output_dir / 'pyimgui.cpp',
]
def load_luajit(luajit_dir):
if not luajit_dir.is_dir():
subprocess.check_call([
ensure_env.ensure_git(), 'clone', 'https://luajit.org/git/luajit.git', luajit_dir
], cwd=luajit_dir.parent)
bin_dir = luajit_dir / 'bin'
src_dir = luajit_dir / 'src'
if not (bin_dir / 'luajit.exe').is_file():
if bin_dir.exists(): shutil.rmtree(bin_dir)
subprocess.check_call([ensure_env.ensure_msys2_file('/clang64/bin/mingw32-make.exe')], cwd=luajit_dir)
(bin_dir / 'lua').mkdir(parents=True)
shutil.copy(src_dir / 'luajit.exe', bin_dir / 'luajit.exe')
shutil.copy(src_dir / 'lua51.dll', bin_dir / 'lua51.dll')
shutil.copytree(src_dir / 'jit', bin_dir / 'lua' / 'jit')
return bin_dir / 'luajit.exe'
def load_requirements(auto_src_dir, backends):
auto_src_dir.mkdir(parents=True, exist_ok=True)
cimgui_dir = auto_src_dir / 'cimgui'
if not cimgui_dir.is_dir():
subprocess.check_call([ensure_env.ensure_git(), 'clone', 'https://github.com/cimgui/cimgui.git', cimgui_dir], cwd=auto_src_dir)
subprocess.check_call([ensure_env.ensure_git(), 'submodule', 'update', '--init', '--recursive'], cwd=cimgui_dir)
if not (auto_src_dir / 'detours').is_dir():
subprocess.check_call([ensure_env.ensure_git(), 'clone', 'https://github.com/microsoft/Detours.git', auto_src_dir / 'detours'], cwd=auto_src_dir)
if not (auto_src_dir / 'stb').is_dir():
subprocess.check_call([ensure_env.ensure_git(), 'clone', 'https://github.com/nothings/stb.git', auto_src_dir / 'stb'], cwd=auto_src_dir)
ensure_env.ensure_msys2_file('/clang64/bin/gcc.exe')
env = os.environ.copy()
env = {**env, 'PATH': f"{env['PATH']};{pathlib.Path(ensure_env.ensure_msys2_file('/clang64/bin/gcc.exe')).parent}"}
subprocess.check_call([
load_luajit(auto_src_dir / 'luajit'),
'./generator.lua',
'gcc', 'internal noimstrv',
*backends,
], cwd=cimgui_dir / 'generator', env=env)
def pybind11_build(*a, debug=0, **kw):
ensure_env.ensure_msvc()
required('pybind11')
required('setuptools') # manually install setuptools
if sys.version_info < (3, 12):
os.environ['SETUPTOOLS_USE_DISTUTILS'] = 'stdlib'
from setuptools import Distribution
from pybind11.setup_helpers import Pybind11Extension, build_ext
dist = Distribution({
'cmdclass': {'build_ext': build_ext},
'ext_modules': [Pybind11Extension(*a, **kw), ]
})
cmd_obj = dist.get_command_obj('build_ext')
cmd_obj.inplace = 1
cmd_obj.ensure_finalized()
cmd_obj.debug = debug
cmd_obj.run()
def stub_gen(module_name, output_dir):
required('pybind11-stubgen')
import pybind11_stubgen
args = pybind11_stubgen.arg_parser().parse_args(["-o", str(output_dir), module_name], namespace=pybind11_stubgen.CLIArgs())
out_dir, sub_dir = pybind11_stubgen.to_output_and_subdir(
output_dir=args.output_dir,
module_name=args.module_name,
root_suffix=args.root_suffix,
)
pybind11_stubgen.run(
pybind11_stubgen.stub_parser_from_args(args),
pybind11_stubgen.Printer(invalid_expr_as_ellipses=not args.print_invalid_expressions_as_is),
args.module_name,
out_dir,
sub_dir=sub_dir,
dry_run=args.dry_run,
writer=pybind11_stubgen.Writer(stub_ext=args.stub_extension),
)
def generate(backends, debug=0):
cwd = pathlib.Path(__file__).parent
src_dir = cwd / 'src'
auto_src_dir = cwd / 'auto_src'
cimgui_dir = auto_src_dir / 'cimgui'
imgui_dir = cimgui_dir / 'imgui'
detours_dir = auto_src_dir / 'detours'
load_requirements(auto_src_dir, backends)
pybind11_build(
name="pyimgui",
sources=sorted(map(str, [
*cimgui_dir.glob('*.cpp'),
*imgui_dir.glob('*.cpp'),
*generate_pyimgui(cimgui_dir, auto_src_dir / 'pyimgui', backends),
*src_dir.glob('*.cpp'),
*(src_dir / 'mImguiImpl').glob('*.cpp'),
*(f for f in (detours_dir / 'src').glob('*.cpp') if f.name != 'uimports.cpp'),
*(imgui_dir / 'backends' / f'imgui_impl_{backend}.cpp' for backend in backends),
])),
include_dirs=sorted(map(str, [
cimgui_dir,
cimgui_dir / 'generator' / 'output',
auto_src_dir / 'pyimgui',
imgui_dir,
imgui_dir / 'backends',
src_dir,
auto_src_dir,
auto_src_dir / 'stb',
])),
extra_objects=[],
extra_compile_args=[
*(f'/DCIMGUI_USE_{backend.upper()}=1' for backend in backends),
'/DIMGUI_DISABLE_OBSOLETE_FUNCTIONS=1',
'/DIMGUI_IMPL_API=extern \"C\"',
'/D_AMD64_=1',
'/DUNICODE',
'/D_UNICODE',
'/utf-8',
],
debug=debug,
)
stub_gen('pyimgui', str(cwd))
import pyimgui
pyimgui_file = pathlib.Path(pyimgui.__file__).resolve()
for dst_dir in (
cwd.parent.parent / 'nylib',
cwd / ('debug' if debug else 'release')
):
dst_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(pyimgui_file, dst_dir / pyimgui_file.name)
if (dst_dir / 'pyimgui').is_dir():
shutil.rmtree(dst_dir / 'pyimgui')
shutil.copytree(cwd / 'pyimgui', dst_dir / 'pyimgui')
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--skip', action='store_true')
args = parser.parse_args()
generate([
'win32',
'dx9',
'dx10',
'dx11',
'dx12',
], debug=args.debug)
if not args.skip:
import pyimgui_test
pyimgui_test.test()
if __name__ == '__main__':
main()
| 23,761 | Python | .py | 536 | 34.115672 | 212 | 0.538282 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,084 | pyimgui_test.py | nyaoouo_NyLib2/scripts/pyimgui/pyimgui_test.py | import contextlib
import io
import os
import pathlib
import threading
import time
def get_cat_image(dst, cb=None):
import urllib.request
url = 'https://cataas.com/cat'
buffer = io.BytesIO()
with urllib.request.urlopen(url) as response:
chunk = 128
content_length = response.getheader('Content-Length')
content_length = int(content_length) if content_length else None
if cb:
cb(0, content_length)
while True:
read_chunk_start = time.time()
data = response.read(chunk)
if not data:
break
read_chunk_used = time.time() - read_chunk_start
if read_chunk_used < 0.5:
chunk *= 2
elif read_chunk_used > 2:
chunk //= 2
buffer.write(data)
if cb:
cb(buffer.tell(), content_length)
if content_length and buffer.tell() != content_length:
raise ValueError(f"Downloaded file size mismatch: {buffer.tell()} != {content_length}")
if cb:
cb(buffer.tell(), content_length)
buffer.seek(0)
with open(dst, 'wb') as f:
f.write(buffer.read())
return dst
def test():
import cProfile
import pstats
import pyimgui
import pyimgui.imgui as imgui
import pyimgui.imgui.ctx as imgui_ctx
class TestWindow:
wnd: pyimgui.Dx11Window | pyimgui.Dx12Window
last_io: imgui.ImGuiIO
def __init__(self):
self.show_about_window = False
self.show_debug_log_window = False
self.show_demo_window = False
self.show_id_stack_tool_window = False
self.show_metrics_window = False
self.profiler = None
self.profile_string = None
self.font = None
self.test_string = 'Hello, world!'
self.combo_items = ['item1', 'item2', 'item3']
self.combo_selected = '-'
self.test_image = None
self.test_image_path = None
self.is_init = False
self.load_progress = None
def get_test_image(self, force_reload=False):
self.test_image_path = './auto_src/cat.jpg'
self.load_progress = "0 (unknown total)"
try:
if force_reload or not os.path.isfile(self.test_image_path):
def _cb(cur, total):
if total:
self.load_progress = cur / total
else:
self.load_progress = f"(N/A) {cur}/?"
get_cat_image(self.test_image_path, _cb)
except Exception as e:
print(f"Failed to get cat image: {e}")
self.test_image_path = None
else:
self.wnd.CallBeforeFrameOnce(lambda: setattr(self, 'test_image', self.wnd.CreateTexture(self.test_image_path)))
finally:
self.load_progress = None
def do_init(self):
self.is_init = True
io = imgui.GetIO()
font_dir = pathlib.Path(os.environ['WINDIR']) / 'fonts'
if (font_file := font_dir / 'msyh.ttc').is_file():
self.font = io.Fonts.AddFontFromFileTTF(str(font_file), 16, None, io.Fonts.GetGlyphRangesChineseFull())
io.Fonts.Build()
self.wnd.InvalidateDeviceObjects()
threading.Thread(target=self.get_test_image).start()
def __call__(self, wnd: pyimgui.Dx11Window | pyimgui.Dx12Window):
self.wnd = wnd
self.last_io = imgui.GetIO()
if not self.is_init:
return self.wnd.CallBeforeFrameOnce(self.do_init)
func_last = []
with imgui_ctx.PushFont(self.font) if self.font else contextlib.nullcontext():
if self.show_about_window:
self.show_about_window = imgui.ShowAboutWindow()
if self.show_debug_log_window:
self.show_debug_log_window = imgui.ShowDebugLogWindow()
if self.show_demo_window:
self.show_demo_window = imgui.ShowDemoWindow()
if self.show_id_stack_tool_window:
self.show_id_stack_tool_window = imgui.ShowIDStackToolWindow()
if self.show_metrics_window:
self.show_metrics_window = imgui.ShowMetricsWindow()
imgui_io = imgui.GetIO()
if self.profile_string:
with imgui_ctx.Begin("Profiler result") as (show, window_open):
if not window_open:
self.profile_string = None
elif show:
imgui.Text(self.profile_string)
viewport = imgui.GetMainViewport()
cls = imgui.ImGuiWindowClass()
cls.DockNodeFlagsOverrideSet = imgui.ImGuiDockNodeFlags_NoDocking
imgui.SetNextWindowClass(cls)
imgui.SetNextWindowPos(viewport.Pos)
imgui.SetNextWindowSize(viewport.Size)
with imgui_ctx.Begin(
# f"Hello, world (fps: {imgui_io.Framerate:.1f}) ###HelloWorld",
"##FullWindow",
flags=imgui.ImGuiWindowFlags_NoDecoration | imgui.ImGuiWindowFlags_NoMove | imgui.ImGuiWindowFlags_NoSavedSettings | imgui.ImGuiWindowFlags_NoBringToFrontOnFocus
) as (show, window_open):
if not window_open:
self.wnd.Close()
if show:
if not self.profiler:
if imgui.Button("Start profiler"):
self.profiler = cProfile.Profile()
self.profiler.enable()
else:
if imgui.Button("Stop profiler"):
self.profiler.disable()
# self.profiler.print_stats()
buf = io.StringIO()
pstats.Stats(self.profiler, stream=buf).sort_stats(pstats.SortKey.CUMULATIVE).print_stats()
self.profile_string = buf.getvalue()
self.profiler = None
clicked = False
if self.test_image:
img_h = 200
img_w = self.test_image.width * img_h // self.test_image.height
clicked = imgui.ImageButton("##img_button", self.test_image.handle, imgui.ImVec2(img_w, img_h))
if self.load_progress is not None:
if isinstance(self.load_progress, float):
imgui.ProgressBar(self.load_progress, imgui.ImVec2(200, 0), f"Updating cat image: {self.load_progress:.2%}")
else:
imgui.Text(f"Updating cat image: {self.load_progress}")
elif imgui.Button("new cat image (or click image)") or clicked:
threading.Thread(target=self.get_test_image, args=(True,)).start()
imgui.Text("中文字符")
imgui.Text("This is another useful text.")
imgui.Text(f"{self.show_about_window=}")
window_size = imgui.GetWindowSize()
imgui.Text(f"Window size: {window_size.x}, {window_size.y}")
window_pos = imgui.GetWindowPos()
imgui.Text(f"Window pos: {window_pos.x}, {window_pos.y}")
if imgui.CollapsingHeader("Test"):
with imgui_ctx.BeginChild("TabTest", imgui.ImVec2(300, 100),child_flags=imgui.ImGuiChildFlags_Border) as show_child:
if show_child:
with imgui_ctx.BeginTabBar("##tabs") as show_tabbar:
if show_tabbar:
with imgui_ctx.BeginTabItem("Tab1") as (show_tab, _):
if show_tab:
imgui.Text("Tab1")
with imgui_ctx.BeginTabItem("Tab2") as (show_tab, _):
if show_tab:
imgui.Text("Tab2")
with imgui_ctx.BeginCombo("Combo", self.combo_selected) as show_combo:
if show_combo:
for item in self.combo_items:
if imgui.Selectable(item):
self.combo_selected = item
_, self.wnd.ClearColor = imgui.ColorEdit4("Clear color", self.wnd.ClearColor)
changed, self.test_string = imgui.InputText("Test string", self.test_string)
imgui.Text(f"Test string: ")
imgui.SameLine()
text_size = imgui.CalcTextSize(self.test_string)
draw_list = imgui.GetWindowDrawList()
pos = imgui.GetCursorScreenPos()
imgui.Text(self.test_string)
draw_list.AddRect(pos, imgui.ImVec2(pos.x + text_size.x, pos.y + text_size.y), imgui.GetColorU32(imgui.ImVec4(1, 0, 0, 1)))
imgui.Text(f"pos: {pos.x}, {pos.y} size: {text_size.x}, {text_size.y}")
changed, new_title = imgui.InputText("Window title", self.wnd.title)
if changed: self.wnd.title = new_title
with imgui_ctx.BeginTable(
"test_table",
2,
flags=imgui.ImGuiTableFlags_BordersInnerV | imgui.ImGuiTableFlags_BordersOuterV | imgui.ImGuiTableFlags_BordersOuterH | imgui.ImGuiTableFlags_RowBg
) as show:
if show:
for i in range(10):
imgui.TableNextRow()
for j in range(2):
imgui.TableNextColumn()
imgui.Text(f"Cell {i}, {j}")
changed, self.show_about_window = imgui.Checkbox("Show about window", self.show_about_window)
changed, self.show_debug_log_window = imgui.Checkbox("Show debug log window", self.show_debug_log_window)
changed, self.show_demo_window = imgui.Checkbox("Show demo window", self.show_demo_window)
changed, self.show_id_stack_tool_window = imgui.Checkbox("Show ID stack tool window", self.show_id_stack_tool_window)
changed, self.show_metrics_window = imgui.Checkbox("Show metrics window", self.show_metrics_window)
for f in func_last:
f()
wnd = pyimgui.Dx11Window(TestWindow())
wnd.title = "Hello, world!"
wnd.Serve()
return TestWindow()
if __name__ == '__main__':
test()
| 11,557 | Python | .py | 210 | 34.77619 | 185 | 0.498099 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,085 | func_wrappers.py | nyaoouo_NyLib2/scripts/pyimgui/func_wrappers.py | wrappers = {}
def gfunc_todo(func_name):
wrappers[f"_GFUNC_:{func_name}"] = f'/* TODO:{func_name} */'
def gfunc_otsupport(func_name):
wrappers[f"_GFUNC_:{func_name}"] = f'/* NotSupport:{func_name} */'
gfunc_todo("ImFont_CalcTextSizeA")
gfunc_todo("ImFontAtlas_GetTexDataAsAlpha8")
gfunc_todo("ImFontAtlas_GetTexDataAsRGBA32")
gfunc_todo("GetTexDataAsRGBA32")
gfunc_todo("igCombo_Str_arr")
gfunc_todo("igDebugNodeWindowsListByBeginStackParent")
gfunc_todo("igFindHoveredWindowEx")
gfunc_todo("igImFormatStringToTempBufferV")
gfunc_todo("igImTextStrFromUtf8")
gfunc_todo("igListBox_FnStrPtr")
gfunc_todo("igListBox_Str_arr")
gfunc_todo("igGetAllocatorFunctions")
gfunc_todo("ImGui_ImplDX12_RenderDrawData")
gfunc_todo("ImGui_ImplDX12_Init")
gfunc_todo("ImGui_ImplDX11_Init")
gfunc_todo("ImGui_ImplDX10_Init")
gfunc_todo("ImGui_ImplDX9_Init")
gfunc_otsupport('igTextV')
def _load_from_template():
import pathlib
with open(pathlib.Path(__file__).parent / 'func_wrappers.cpp') as f:
s = f.read()
# match /*START:funcname*/
import re
for match in re.finditer(r'/\*START:(.*)\*/(.*?)/\*END:\1\*/', s, re.DOTALL):
wrappers[match.group(1)] = match.group(2).strip()
_load_from_template()
if __name__ == '__main__':
for k, v in wrappers.items():
print(f'{k}: {v}')
| 1,323 | Python | .py | 35 | 34.828571 | 81 | 0.712157 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,086 | inject_main.py | nyaoouo_NyLib2/scripts/test_inject/inject_main.py | import contextlib
import os
import pathlib
import sys
import threading
import traceback
import typing
from nylib.winutils.pipe_rpc import RpcServer
if typing.TYPE_CHECKING:
from nylib.pyimgui import imgui
_T = typing.TypeVar('_T')
class Gui(typing.Generic[_T]):
last_io: 'imgui.ImGuiIO' = None
def __init__(self, wnd_T=typing.Type[_T]):
self.wnd = wnd_T(self.draw)
self.im_font = None
self.is_init = False
self.draw_funcs = {}
def init_draw(self):
print('init_draw')
from nylib.pyimgui import imgui
io = imgui.GetIO()
io.IniFilename = None
io.ConfigFlags = io.ConfigFlags & ~ imgui.ImGuiConfigFlags_ViewportsEnable # disable multi-window
font_dir = pathlib.Path(os.environ['WINDIR']) / 'fonts'
if (font_file := font_dir / 'msyh.ttc').is_file():
self.im_font = io.Fonts.AddFontFromFileTTF(str(font_file), 16, None, io.Fonts.GetGlyphRangesChineseFull())
io.Fonts.Build()
self.wnd.InvalidateDeviceObjects()
self.is_init = True
def draw(self):
if not self.is_init:
self.wnd.CallBeforeFrameOnce(self.init_draw)
from nylib.pyimgui.imgui import ctx
with ctx.PushFont(self.im_font) if self.im_font else contextlib.nullcontext():
for name, draw_func in self.draw_funcs.items():
try:
draw_func()
except Exception as e:
print(f'Error in draw_func {name}:')
traceback.print_exc()
def attach(self):
self.wnd.Attach()
def main():
print('Hello, world!')
print(f'python version: {sys.version}')
print('sys.executable:', sys.executable)
print('os.getcwd():', os.getcwd())
print('__file__:', __file__)
import ctypes.wintypes
if ctypes.windll.kernel32.GetModuleHandleW('d3d11.dll'):
from nylib.pyimgui import Dx11Inbound
setattr(sys, '_gui_', gui := Gui(Dx11Inbound))
elif ctypes.windll.kernel32.GetModuleHandleW('d3d12.dll'):
from nylib.pyimgui import Dx12Inbound
setattr(sys, '_gui_', gui := Gui(Dx12Inbound))
else:
raise RuntimeError('No supported graphics API found')
threading.Timer(.5, gui.attach).start()
def run_script(path):
with open(path, 'r', encoding='utf-8') as f:
code = compile(f.read(), path, 'exec')
try:
exec(code, namespace := {'__file__': path})
except Exception:
traceback.print_exc()
raise
return namespace.get('res')
RpcServer(rf'\\.\\pipe\\GamePipe-pid-{os.getpid()}', {
'run_script': run_script,
}).serve()
main()
| 2,724 | Python | .py | 72 | 29.861111 | 118 | 0.618071 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,087 | test_inject.py | nyaoouo_NyLib2/scripts/test_inject/test_inject.py | import contextlib
import os
import pathlib
import threading
import typing
from nylib.process import Process
from nylib.winutils import enable_privilege, iter_processes
from nylib.winutils.pipe_rpc import RpcClient
from nylib.winutils.python_loader import run_script
from nylib.pyimgui import Dx11Window, imgui
from nylib.pyimgui.imgui import ctx as imgui_ctx
class SelectProcess:
def __init__(self, callback: typing.Callable[[int], ...]):
self.callback = callback
self.process_list = None
self.show_process_list = None
self.load_thread = None
self.filter_text = ""
self.refresh()
def load_process_list(self):
self.process_list = [(process.szExeFile.decode('utf-8', 'ignore'), process.th32ProcessID) for process in iter_processes()]
self.update_show_process_list()
def can_refresh(self):
return self.load_thread is None or not self.load_thread.is_alive()
def refresh(self):
if self.can_refresh():
self.show_process_list = None
self.process_list = None
self.load_thread = threading.Thread(target=self.load_process_list)
self.load_thread.start()
def update_show_process_list(self):
if self.filter_text:
self.show_process_list = [(name, pid) for name, pid in self.process_list if self.filter_text.lower() in name.lower()]
else:
self.show_process_list = self.process_list
def __call__(self):
imgui.SetNextWindowSize(imgui.ImVec2(400, 300), imgui.ImGuiCond_FirstUseEver)
with imgui_ctx.Begin("Select Process") as (show, window_open):
if not window_open:
self.callback(-1)
if show:
if self.show_process_list is None:
return imgui.Text("Loading...")
if self.can_refresh() and imgui.Button("Refresh"):
return self.refresh()
imgui.SameLine()
changed, self.filter_text = imgui.InputText("Filter", self.filter_text)
if changed:
self.update_show_process_list()
with imgui_ctx.BeginTable("ProcessTable", 3, imgui.ImGuiTableFlags_ScrollY):
imgui.TableSetupScrollFreeze(0, 1)
imgui.TableSetupColumn("Name")
imgui.TableSetupColumn("PID")
imgui.TableSetupColumn("-")
imgui.TableHeadersRow()
for name, pid in self.show_process_list:
imgui.TableNextRow()
imgui.TableSetColumnIndex(0)
imgui.Text(name)
imgui.TableSetColumnIndex(1)
imgui.Text(str(pid))
imgui.TableSetColumnIndex(2)
if imgui.Button(f"Select##{name}_{pid}"):
self.callback(pid)
class Gui:
instance: 'Gui'
target_process: Process = None
target_rpc: RpcClient = None
select_script_path: pathlib.Path = None
def __init__(self):
Gui.instance = self
self.wnd = Dx11Window(self.draw)
self.im_font = None
self.is_init = False
self.select_script_view_path = pathlib.Path.cwd()
self._select_process = None
def init_draw(self):
from nylib.pyimgui import imgui
io = imgui.GetIO()
io.IniFilename = None
font_dir = pathlib.Path(os.environ['WINDIR']) / 'fonts'
if (font_file := font_dir / 'msyh.ttc').is_file():
self.im_font = io.Fonts.AddFontFromFileTTF(str(font_file), 16, None, io.Fonts.GetGlyphRangesChineseFull())
io.Fonts.Build()
self.wnd.InvalidateDeviceObjects()
self.is_init = True
def on_select_process(self, pid):
print("Selected process:", pid)
if pid > 0:
self.target_process = Process(pid)
self.target_rpc = RpcClient(f"\\\\.\\pipe\\GamePipe-pid-{self.target_process.process_id}")
self._select_process = None
def render_main(self):
viewport = imgui.GetMainViewport()
cls = imgui.ImGuiWindowClass()
cls.DockNodeFlagsOverrideSet = imgui.ImGuiDockNodeFlags_NoDocking
imgui.SetNextWindowClass(cls)
imgui.SetNextWindowPos(viewport.Pos)
imgui.SetNextWindowSize(viewport.Size)
with imgui_ctx.Begin(
"##FullWindow",
flags=imgui.ImGuiWindowFlags_NoDecoration | imgui.ImGuiWindowFlags_NoMove | imgui.ImGuiWindowFlags_NoSavedSettings | imgui.ImGuiWindowFlags_NoBringToFrontOnFocus
) as (show, window_open):
if not window_open:
self.wnd.Close()
if show:
if self.target_process is None:
btn_text = "Select Process"
else:
btn_text = f"Selected: {self.target_process.process_id}"
if imgui.Button(btn_text) and self._select_process is None:
self._select_process = SelectProcess(self.on_select_process)
if self.target_process is None: return
try:
self.target_process.get_ldr_data('python_loader.dll')
except KeyError as e:
if imgui.Button("Inject"):
threading.Thread(target=run_script, args=(self.target_process, "./inject_main.py")).start()
return
if self.select_script_path is not None:
if not self.select_script_path.is_file():
self.select_script_path = None
else:
imgui.Text(f"Selected script: {self.select_script_path}")
imgui.SameLine()
if imgui.Button("Run"):
threading.Thread(target=self.target_rpc.rpc.run_script, args=(str(self.select_script_path),)).start()
if not self.select_script_view_path.is_dir():
self.select_script_view_path = pathlib.Path.cwd()
_files = []
_dirs = []
for p in self.select_script_view_path.iterdir():
if p.is_dir():
_dirs.append(p)
else:
_files.append(p)
with imgui_ctx.BeginChild("SelectScriptView"):
if self.select_script_view_path.parent != self.select_script_view_path:
if imgui.Button(".."):
self.select_script_view_path = self.select_script_view_path.parent
for p in _dirs:
if imgui.Button(f"{p.name}/"):
self.select_script_view_path = p
for p in _files:
if imgui.Button(p.name):
self.select_script_path = p.resolve()
def draw(self):
if not self.is_init:
self.wnd.CallBeforeFrameOnce(self.init_draw)
with imgui_ctx.PushFont(self.im_font) if self.im_font else contextlib.nullcontext():
self.render_main()
if self._select_process:
self._select_process()
def serve(self):
self.wnd.Serve()
def main():
# process = Process.from_name("ChronoArk.exe")
# run_script(process, "./inject_main.py")
Gui().serve()
if __name__ == '__main__':
enable_privilege()
main()
| 7,548 | Python | .py | 162 | 33.08642 | 177 | 0.569059 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,088 | client_script.py | nyaoouo_NyLib2/scripts/test_inject/client_script.py | import typing
def reload_all(prefix):
import sys
import importlib
modules = [module for name, module in sys.modules.items() if name == prefix or name.startswith(prefix + '.')]
for module in modules:
importlib.reload(module)
reload_all('nylib.imguiutils')
reload_all('nylib.mono')
from nylib.mono import *
from nylib.mono.imgui_inspect import MonoInspector
from nylib.pyimgui import imgui
from nylib.pyimgui.imgui import ctx as imgui_ctx
from nylib import imguiutils
mono = Mono()
class PythonView:
def render_threads(self):
import threading
import traceback
for tid, frame in sys._current_frames().items():
_thread = threading._active.get(tid)
if imgui.CollapsingHeader(_thread.name if _thread else 'Thread-%d' % tid, imgui.ImGuiTreeNodeFlags_DefaultOpen):
imgui.Text(f'tid: {tid}')
# format stack
for filename, lineno, name, line in traceback.extract_stack(frame):
imgui.Text(f'{filename}:{lineno} {name} {line}')
def render(self):
with imgui_ctx.BeginTabBar("##tabs") as show_tabbar:
if show_tabbar:
with imgui_ctx.BeginTabItem("threads") as (show_tab, _):
if show_tab:
self.render_threads()
class MonoInspect:
def __init__(self):
self.inspector = MonoInspector(mono)
self.py_view = PythonView()
self.display = True
def __call__(self):
if imgui.IsKeyPressed(imgui.ImGuiKey_Insert):
self.display = not self.display
if not self.display: return
with imguiutils.BeginFullScreenBackGround("##BGWindow") as (show, window_open):
# with imgui_ctx.Begin("apis") as (show, window_open):
if not window_open:
to_remove = []
for k in draw_funcs:
if draw_funcs[k] is self:
to_remove.append(k)
for k in to_remove:
draw_funcs.pop(k, None)
return
if not show: return
with imgui_ctx.BeginTabBar("##tabs") as show_tabbar:
if show_tabbar:
with imgui_ctx.BeginTabItem("Mono") as (show_tab, _):
if show_tab:
self.inspector.render()
with imgui_ctx.BeginTabItem("Python") as (show_tab, _):
if show_tab:
self.py_view.render()
import sys
draw_funcs = sys._gui_.draw_funcs
draw_funcs['BGWindow'] = MonoInspect()
| 2,634 | Python | .py | 63 | 30.539683 | 124 | 0.580595 | nyaoouo/NyLib2 | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,089 | main.py | MNeMoNiCuZ_ImageSorting/main.py | # main.py
import tkinter as tk
from scripts.gui_setup import setup_gui
def main():
root = tk.Tk()
root.title("Image Sorting Tool")
root.geometry("1200x800")
config = {} # Initialize config dictionary here
setup_gui(root, config)
root.mainloop()
if __name__ == "__main__":
main()
| 310 | Python | .py | 12 | 22.333333 | 52 | 0.662162 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,090 | gui_setup.py | MNeMoNiCuZ_ImageSorting/scripts/gui_setup.py | # gui_setup.py
import tkinter as tk
from .menu import setup_menu
from .category_buttons import setup_category_buttons, sort_files # Import sort_files
from .logger import setup_logger, log_info
from .hotkey_utils import bind_hotkeys # Import from hotkey_utils
def setup_gui(root, config):
setup_logger() # Initialize logging
# Define colors for the GUI
bg_color = "#2e2e2e" # Main background color
button_bg_color = "#444444"
button_fg_color = "#ffffff"
root.configure(bg=bg_color)
# Main frame that holds all other frames
main_frame = tk.Frame(root, bg=bg_color, bd=0) # Remove border
main_frame.pack(fill=tk.BOTH, expand=True)
# Thumbnails frame on the left side
thumbnails_frame = tk.Frame(main_frame, bg=bg_color, width=200, bd=0) # Consistent background color, no border
thumbnails_frame.grid(row=0, column=0, rowspan=2, sticky="ns")
# Canvas for the thumbnails (without scrollbar)
canvas = tk.Canvas(thumbnails_frame, bg=bg_color, width=200, bd=0, highlightthickness=0) # Consistent background color, no border
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
# Frame inside the canvas for thumbnails
scrollable_frame = tk.Frame(canvas, bg=bg_color, bd=0) # Consistent background color, no border
scrollable_frame.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
# Create a window inside the canvas
canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
# Frame for displaying the main image
image_frame = tk.Frame(main_frame, bg=bg_color, bd=0) # No border
image_frame.grid(row=0, column=1, sticky="nsew")
# Frame for category buttons
buttons_frame = tk.Frame(main_frame, bg=bg_color, bd=0) # No border
buttons_frame.grid(row=1, column=1, sticky="ew")
# Frame for console output (extra details)
console_frame = tk.Frame(main_frame, bg=bg_color, bd=0) # No border
console_frame.grid(row=2, column=0, columnspan=2, sticky="ew")
# Configure the grid layout
main_frame.grid_rowconfigure(0, weight=3) # Image frame gets most space
main_frame.grid_rowconfigure(1, weight=1) # Buttons frame gets less space
main_frame.grid_rowconfigure(2, weight=0) # Console frame gets the least space
main_frame.grid_columnconfigure(1, weight=1) # Main content column gets all available space
# Set up the menu
setup_menu(root, config, image_frame, scrollable_frame, buttons_frame, console_frame, bg_color)
# Set up category buttons
setup_category_buttons(buttons_frame, config, button_bg_color, button_fg_color, image_frame, scrollable_frame)
# Print debug message before binding hotkeys
print("About to bind hotkeys...")
# Bind hotkeys
bind_hotkeys(root, config, image_frame, scrollable_frame)
# Print debug message after binding hotkeys
print("Hotkeys binding complete")
def handle_hotkey_press(event, path, config, image_frame, thumbnails_frame):
print(f"Hotkey '{event.keysym}' pressed for path '{path}'")
sort_files(path, config, image_frame, thumbnails_frame)
| 3,249 | Python | .py | 59 | 48.457627 | 135 | 0.703055 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,091 | file_manager.py | MNeMoNiCuZ_ImageSorting/scripts/file_manager.py | import os
def scan_folder(folder_path):
images = []
duplicates = []
file_dict = {}
for root, _, files in os.walk(folder_path):
for file in files:
if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp', '.gif')):
name, ext = os.path.splitext(file)
if name in file_dict:
duplicates.append(os.path.join(root, file))
else:
file_dict[name] = os.path.join(root, file)
images = list(file_dict.values())
return images, duplicates
def validate_folder(folder_path):
images, duplicates = scan_folder(folder_path)
if duplicates:
print(f"Duplicate image names found: {duplicates}")
return images, duplicates
| 753 | Python | .py | 20 | 28.95 | 81 | 0.584131 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,092 | image_display.py | MNeMoNiCuZ_ImageSorting/scripts/image_display.py | # image_display.py
from PIL import Image, ImageTk
import tkinter as tk
import os
def format_size(size):
for unit in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return f"{size:.2f} {unit}"
size /= 1024.0
def display_images(image_frame, thumbnails_frame, images):
if not images:
return
current_image_path = images[0]
image = Image.open(current_image_path)
image.thumbnail((image_frame.winfo_width(), image_frame.winfo_height()), Image.LANCZOS) # Use LANCZOS for high-quality downsampling
photo = ImageTk.PhotoImage(image)
for widget in image_frame.winfo_children():
widget.destroy()
file_info = get_file_info(current_image_path)
info_label = tk.Label(image_frame, text=file_info, bg="#2e2e2e", fg="white", anchor="center")
info_label.pack(fill=tk.X)
image_label = tk.Label(image_frame, image=photo, bg="#2e2e2e")
image_label.image = photo
image_label.pack(expand=True)
for widget in thumbnails_frame.winfo_children():
widget.destroy()
# Only load the next 10 images
next_images = images[:10]
for thumb_path in next_images:
thumb_image = Image.open(thumb_path)
thumb_image.thumbnail((300, 300), Image.LANCZOS)
thumb_photo = ImageTk.PhotoImage(thumb_image)
thumb_label = tk.Label(thumbnails_frame, image=thumb_photo, bg="#2e2e2e", width=200, anchor="center", bd=0, highlightthickness=0) # Consistent background color, no border
thumb_label.image = thumb_photo
thumb_label.pack(side=tk.TOP, padx=5, pady=5, fill=tk.X)
def get_file_info(image_path):
file_size = os.path.getsize(image_path)
readable_size = format_size(file_size)
image = Image.open(image_path)
width, height = image.size
supplementary_files = get_supplementary_files(image_path)
supplementary_extensions = [os.path.splitext(file)[1][1:] for file in supplementary_files] # Remove leading dot
file_name = os.path.basename(image_path)
info = f"{file_name}|{'|'.join(supplementary_extensions)} - {width}x{height}px, {readable_size}"
return info
def get_supplementary_files(image_path):
folder = os.path.dirname(image_path)
name, _ = os.path.splitext(os.path.basename(image_path))
return [f for f in os.listdir(folder) if f.startswith(name) and not f.endswith(('jpg', 'jpeg', 'png', 'webp', 'gif'))]
| 2,405 | Python | .py | 49 | 43.22449 | 179 | 0.686804 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,093 | hotkey_utils.py | MNeMoNiCuZ_ImageSorting/scripts/hotkey_utils.py | # hotkey_utils.py
from .category_buttons import sort_files
def bind_hotkeys(root, config, image_frame, thumbnails_frame):
print("Executing bind_hotkeys...")
for category in config.get("categories", []):
hotkey = category.get("hotkey")
if hotkey:
try:
# Use default arguments in lambda to capture the current values
print(f"Binding hotkey '{hotkey}' for category '{category['name']}'")
root.bind(f"<KeyPress-{hotkey}>", lambda event, path=category["path"], cfg=config, img_frame=image_frame, thumb_frame=thumbnails_frame: sort_files(path, cfg, img_frame, thumb_frame))
print(f"Successfully bound hotkey '{hotkey}' for category '{category['name']}'")
except Exception as e:
print(f"Failed to bind hotkey '{hotkey}' for category '{category['name']}': {e}")
| 884 | Python | .py | 14 | 52.857143 | 198 | 0.637097 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,094 | config_manager.py | MNeMoNiCuZ_ImageSorting/scripts/config_manager.py | import json
import os
def load_config_file(file_path):
if os.path.exists(file_path):
with open(file_path, 'r') as file:
return json.load(file)
return {}
def save_config_file(config, file_path):
with open(file_path, 'w') as file:
json.dump(config, file, indent=4)
| 305 | Python | .py | 10 | 25.3 | 42 | 0.651877 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,095 | file_operations.py | MNeMoNiCuZ_ImageSorting/scripts/file_operations.py | # file_operations.py
import tkinter as tk
from tkinter import filedialog, messagebox
from .image_display import display_images
from .file_manager import validate_folder
from .logger import log_error, log_info
from .config_manager import load_config_file, save_config_file
from .category_buttons import setup_category_buttons
from .ui_refresh import refresh_ui
from .hotkey_utils import bind_hotkeys
def select_folder(root, image_frame, thumbnails_frame, console_frame, config):
folder_path = filedialog.askdirectory()
if folder_path:
images, duplicates = validate_folder(folder_path)
if duplicates:
show_duplicates_warning(root, duplicates)
log_error(f"Duplicate image names found: {duplicates}")
update_console(console_frame, f"Duplicate image names found:\n" + "\n".join(duplicates))
else:
config['images'] = images
display_images(image_frame, thumbnails_frame, images)
config['chosen_folder'] = folder_path
log_info(f"Folder {folder_path} selected and validated successfully.")
config['current_image_index'] = 0
config['current_image_path'] = images[0] if images else ""
def show_duplicates_warning(root, duplicates):
duplicates_message = "Duplicate image names found:\n" + "\n".join(duplicates)
messagebox.showwarning("Duplicates Found", duplicates_message)
def load_project(root, image_frame, thumbnails_frame, buttons_frame, console_frame, config):
file_path = filedialog.askopenfilename(defaultextension=".json", filetypes=[("JSON files", "*.json")])
if file_path:
reload_configuration(root, image_frame, thumbnails_frame, buttons_frame, console_frame, config, file_path)
def reload_configuration(root, image_frame, thumbnails_frame, buttons_frame, console_frame, config, file_path):
new_config = load_config_file(file_path)
config.clear()
config.update(new_config)
for widget in buttons_frame.winfo_children():
widget.destroy()
setup_category_buttons(buttons_frame, config, "#444444", "#ffffff", image_frame, thumbnails_frame)
if 'chosen_folder' in config:
images, duplicates = validate_folder(config['chosen_folder'])
if not duplicates:
config['images'] = images
display_images(image_frame, thumbnails_frame, images)
config['current_image_index'] = 0
config['current_image_path'] = images[0] if images else ""
refresh_ui(config, image_frame, thumbnails_frame, buttons_frame, console_frame if console_frame else buttons_frame)
# Bind hotkeys after loading the configuration
bind_hotkeys(root, config, image_frame, thumbnails_frame)
def save_project_as(config):
file_path = filedialog.asksaveasfilename(defaultextension=".json", filetypes=[("JSON files", "*.json")])
if file_path:
relevant_config = {key: config[key] for key in ['categories', 'chosen_folder'] if key in config}
save_config_file(relevant_config, file_path)
def update_console(console_frame, message):
for widget in console_frame.winfo_children():
widget.destroy()
console_label = tk.Label(console_frame, text=message, fg="white", bg="black", anchor="w")
console_label.pack(fill=tk.BOTH)
| 3,353 | Python | .py | 59 | 49.169492 | 120 | 0.699023 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,096 | category_management.py | MNeMoNiCuZ_ImageSorting/scripts/category_management.py | # category_management.py
import tkinter as tk
from tkinter import simpledialog, filedialog, messagebox
from .config_manager import save_config_file
from .category_buttons import setup_category_buttons, add_sorting_button
from .hotkey_utils import bind_hotkeys
VALID_HOTKEYS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
def add_category(buttons_frame, config, bg_color, fg_color, image_frame=None, thumbnails_frame=None):
name = simpledialog.askstring("Category Name", "Enter the name of the category:")
path = filedialog.askdirectory(title="Select Folder for Category")
if name and path:
while True:
hotkey = simpledialog.askstring("Hotkey", "Enter the hotkey for this category (optional):")
if not hotkey or (hotkey and hotkey in VALID_HOTKEYS):
break
else:
messagebox.showerror("Invalid Hotkey", "Please enter a valid single character hotkey (a-z, A-Z, 0-9).")
category = {"name": name, "path": path, "hotkey": hotkey}
if "categories" not in config:
config["categories"] = []
config["categories"].append(category)
# Refresh the category buttons and re-bind hotkeys
setup_category_buttons(buttons_frame, config, bg_color, fg_color, image_frame, thumbnails_frame)
bind_hotkeys(buttons_frame.master, config, image_frame, thumbnails_frame)
print("Successfully added and bound new category")
def edit_categories(buttons_frame, config, bg_color, fg_color, image_frame=None, thumbnails_frame=None):
for widget in buttons_frame.winfo_children():
widget.destroy()
categories = config.get("categories", [])
for category in categories:
add_sorting_button(buttons_frame, category["name"], category["path"], category.get("hotkey"), bg_color, fg_color, config, image_frame, thumbnails_frame)
edit_window = tk.Toplevel()
edit_window.title("Edit Categories")
edit_window.configure(bg="#2e2e2e")
edit_window.geometry("600x400")
listbox = tk.Listbox(edit_window, bg="#2e2e2e", fg="white")
listbox.pack(fill=tk.BOTH, expand=True)
for i, category in enumerate(categories):
hotkey_display = f" [{category.get('hotkey')}]" if category.get('hotkey') else ""
listbox.insert(tk.END, f"{category['name']} ({category['path']}){hotkey_display}")
def delete_category():
selected = listbox.curselection()
if selected:
index = selected[0]
del categories[index]
edit_window.destroy()
edit_categories(buttons_frame, config, bg_color, fg_color, image_frame, thumbnails_frame)
print("Successfully deleted category and reloaded UI")
def edit_category():
selected = listbox.curselection()
if selected:
index = selected[0]
category = categories[index]
new_name = simpledialog.askstring("Edit Category Name", "Enter the new name for the category:", initialvalue=category["name"])
new_path = filedialog.askdirectory(title="Select New Folder for Category", initialdir=category["path"])
if new_name and new_path:
while True:
new_hotkey = simpledialog.askstring("Edit Hotkey", "Enter the new hotkey for this category (optional):", initialvalue=category.get("hotkey", ""))
if not new_hotkey or (new_hotkey and new_hotkey in VALID_HOTKEYS):
break
else:
messagebox.showerror("Invalid Hotkey", "Please enter a valid single character hotkey (a-z, A-Z, 0-9).")
categories[index] = {"name": new_name, "path": new_path, "hotkey": new_hotkey}
edit_window.destroy()
edit_categories(buttons_frame, config, bg_color, fg_color, image_frame, thumbnails_frame)
bind_hotkeys(buttons_frame.master, config, image_frame, thumbnails_frame) # Rebind hotkeys
print("Successfully edited category and reloaded UI")
btn_frame = tk.Frame(edit_window, bg="#2e2e2e")
btn_frame.pack(fill=tk.X)
delete_button = tk.Button(btn_frame, text="Delete", command=delete_category, bg="#444444", fg="white")
delete_button.pack(side=tk.LEFT, fill=tk.X, expand=True)
edit_button = tk.Button(btn_frame, text="Edit", command=edit_category, bg="#444444", fg="white")
edit_button.pack(side=tk.RIGHT, fill=tk.X, expand=True)
| 4,595 | Python | .py | 73 | 51.958904 | 166 | 0.65618 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,097 | category_buttons.py | MNeMoNiCuZ_ImageSorting/scripts/category_buttons.py | # category_buttons.py
import tkinter as tk
import shutil
import os
from tkinter import messagebox
from .logger import log_error, log_info
from .image_display import display_images as show_images
# Define the number of buttons per row
BUTTONS_PER_ROW = 6 # You can adjust this number as needed
def setup_category_buttons(buttons_frame, config, button_bg_color, button_fg_color, image_frame, thumbnails_frame):
for widget in buttons_frame.winfo_children():
widget.destroy()
buttons_container = tk.Frame(buttons_frame, bg=buttons_frame.cget("bg"))
buttons_container.pack(expand=True)
row_frame = None
for index, category in enumerate(config.get("categories", [])):
if index % BUTTONS_PER_ROW == 0:
row_frame = tk.Frame(buttons_container, bg=buttons_frame.cget("bg"))
row_frame.pack(fill=tk.X)
add_sorting_button(row_frame, category["name"], category["path"], category.get("hotkey"), button_bg_color, button_fg_color, config, image_frame, thumbnails_frame)
def add_sorting_button(frame, category_name, category_path, hotkey=None, bg_color=None, fg_color=None, config=None, image_frame=None, thumbnails_frame=None):
button = tk.Button(frame, text=f"{category_name} ({hotkey if hotkey else ''})", command=lambda: sort_files(category_path, config, image_frame, thumbnails_frame),
bg=bg_color, fg=fg_color, font=("Helvetica", 16), padx=5, pady=10, width=20)
button.pack(side=tk.LEFT, padx=5, pady=5)
def sort_files(destination_path, config, image_frame, thumbnails_frame):
current_image_path = config.get('current_image_path')
current_image_index = config.get('current_image_index', 0)
print(f"Button pressed for {destination_path}")
if current_image_path:
print(f"Current image path: {current_image_path}")
move_files(current_image_path, destination_path)
config['current_image_index'] += 1
if config['current_image_index'] < len(config.get('images', [])):
update_display(config, image_frame, thumbnails_frame)
else:
messagebox.showinfo("Sorting Complete", "All files have been sorted.")
clear_display(image_frame, thumbnails_frame)
else:
print("No current image path set.")
def move_files(image_path, destination_path):
if not os.path.exists(destination_path):
os.makedirs(destination_path)
try:
shutil.move(image_path, destination_path)
supplementary_files = get_supplementary_files(image_path)
for file in supplementary_files:
shutil.move(file, destination_path)
log_info(f"Moved {image_path} to {destination_path}")
except Exception as e:
log_error(f"Error moving files: {e}")
def get_supplementary_files(image_path):
folder = os.path.dirname(image_path)
name, _ = os.path.splitext(os.path.basename(image_path))
return [os.path.join(folder, f) for f in os.listdir(folder) if f.startswith(name) and not f.endswith(('jpg', 'jpeg', 'png', 'webp', 'gif'))]
def update_display(config, image_frame, thumbnails_frame):
images = config.get("images", [])
current_image_index = config.get('current_image_index', 0)
if current_image_index < len(images):
config['current_image_path'] = images[current_image_index]
display_images(config, image_frame, thumbnails_frame)
else:
config['current_image_path'] = ""
print("No more images to display.")
def display_images(config, image_frame, thumbnails_frame):
remaining_images = config.get('images', [])[config.get('current_image_index', 0):]
show_images(image_frame, thumbnails_frame, remaining_images)
def clear_display(image_frame, thumbnails_frame):
for widget in image_frame.winfo_children():
widget.destroy()
for widget in thumbnails_frame.winfo_children():
widget.destroy()
| 3,970 | Python | .py | 71 | 48.43662 | 171 | 0.684034 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,098 | logger.py | MNeMoNiCuZ_ImageSorting/scripts/logger.py | import logging
def setup_logger():
logging.basicConfig(filename='output.log', level=logging.INFO,
format='%(asctime)s %(levelname)s:%(message)s')
def log_error(message):
logging.error(message)
def log_info(message):
logging.info(message) | 287 | Python | .py | 8 | 28.75 | 72 | 0.664234 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |
2,290,099 | menu.py | MNeMoNiCuZ_ImageSorting/scripts/menu.py | # menu.py
import tkinter as tk
from .file_operations import load_project, save_project_as
from .category_management import add_category, edit_categories
from .file_operations import select_folder
def setup_menu(root, config, image_frame, thumbnails_frame, buttons_frame, console_frame, menu_color):
menu_bar = tk.Menu(root, bg=menu_color, fg="#ffffff")
root.config(menu=menu_bar)
file_menu = tk.Menu(menu_bar, tearoff=0, bg=menu_color, fg="#ffffff")
menu_bar.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="Load Project", command=lambda: load_project(root, image_frame, thumbnails_frame, buttons_frame, console_frame, config))
file_menu.add_command(label="Save Project As", command=lambda: save_project_as(config))
file_menu.add_separator()
file_menu.add_command(label="Exit", command=root.quit)
setup_menu = tk.Menu(menu_bar, tearoff=0, bg=menu_color, fg="#ffffff")
menu_bar.add_cascade(label="Setup", menu=setup_menu)
setup_menu.add_command(label="Select Image Folder", command=lambda: select_folder(root, image_frame, thumbnails_frame, console_frame, config))
setup_menu.add_command(label="Add Category", command=lambda: add_category(buttons_frame, config, "#444444", "#ffffff", image_frame, thumbnails_frame))
setup_menu.add_command(label="Edit Categories", command=lambda: edit_categories(buttons_frame, config, "#444444", "#ffffff", image_frame, thumbnails_frame))
| 1,471 | Python | .py | 19 | 72.368421 | 161 | 0.737206 | MNeMoNiCuZ/ImageSorting | 8 | 1 | 8 | GPL-3.0 | 9/5/2024, 10:48:51 PM (Europe/Amsterdam) |