File size: 3,007 Bytes
1b621fc
912e4bf
4d5e9f5
d740066
 
6efcc11
15ad924
f519f90
912e4bf
 
 
 
 
 
8e39690
15ad924
e5c2a3f
 
912e4bf
e5c2a3f
6897b6a
1b621fc
6897b6a
 
 
b327c9e
1b621fc
6897b6a
12cd0d5
9c73f19
12cd0d5
6897b6a
1b621fc
6897b6a
12cd0d5
 
 
 
0e69513
e5c2a3f
0e69513
 
 
912e4bf
0e69513
6efcc11
 
 
 
 
 
 
 
 
0e69513
912e4bf
0e69513
 
 
b327c9e
 
912e4bf
 
0e69513
 
 
e5c2a3f
1b621fc
12cd0d5
 
f7293ff
912e4bf
 
 
6efcc11
 
 
 
 
 
 
 
 
 
 
 
 
 
dc2cc9b
6efcc11
 
4ae9b1f
829eed1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
from huggingface_hub import HfApi
import spaces
import shutil
import logging
import subprocess

@spaces.GPU

def write_repo(base_model, model_to_merge):
    with open("repo.txt", "w") as repo:
        repo.write(base_model + "\n" + model_to_merge)

def merge_and_upload(base_model, model_to_merge, scaling_factor, weight_drop_prob, repo_name, token):
    # Define a fixed output path
    output_path = "/tmp/output"
    
    # Write the base model and model to merge to the repo.txt file
    write_repo(base_model, model_to_merge)
    
    # Construct the command to run hf_merge.py
    command = [
        "python3", "hf_merge.py",
        "-p", str(weight_drop_prob),
        "-lambda", str(scaling_factor),
        "repo.txt", output_path
    ]
    
    # Set up logging
    logging.basicConfig(level=logging.INFO)
    
    # Run the command and capture the output
    result = subprocess.run(command, capture_output=True, text=True)
    
    # Log the output
    logging.info(result.stdout)
    logging.error(result.stderr)
    
    # Check if the merge was successful
    if result.returncode!= 0:
        return f"Error in merging models: {result.stderr}"
    
    # Upload the result to Hugging Face Hub
    api = HfApi(token=token)
    try:
        # Get the username of the user who is logged in
        user = api.whoami(token=token)["name"]

        # Autofill the repo name if none is provided
        if not repo_name:
            repo_name = f"{user}/default-repo"
        else:
            repo_name = repo_name

        # Create a new repo or update an existing one
        api.create_repo(repo_id=repo_name, token=token, exist_ok=True)
        
        # Upload the file
        api.upload_file(
            path_or_fileobj=output_path,
            path_in_repo="merged_model.safetensors",
            repo_id=repo_name,
            token=token
        )
        return f"Model merged and uploaded successfully to {repo_name}!"
    except Exception as e:
        return f"Error uploading to Hugging Face Hub: {str(e)}"

    # Wipe /tmp repo after each use
    shutil.rmtree("/tmp")

# Define the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Model Merger and Uploader")
    gr.Markdown("Combine capabilities from multiple models. Works with:")
    gr.Markdown("* Stable Diffusion (1.5, XL/XL Turbo)")
    gr.Markdown("* LLMs (Mistral, Llama, etc)")
    gr.Markdown("* LoRas (must be same size)")
    gr.Markdown("* Any two homologous models")
    
    with gr.Column():
        gr.Textbox(label="Hugging Face Token", type="password")
        gr.Textbox(label="Base Model")
        gr.Textbox(label="Model to Merge")
        gr.Slider(minimum=0, maximum=10, value=3.0, label="Scaling Factor")
        gr.Slider(minimum=0, maximum=1, value=0.3, label="Weight Drop Probability")
        gr.Textbox(label="Repo Name (leave blank for default)")
        gr.Button("Merge and Upload")
    
    with gr.Column():
        gr.Textbox(label="Output")

demo.launch()